diff --git a/.backportrc.json b/.backportrc.json index ab0b1ad41c6..67f7982f6ca 100644 --- a/.backportrc.json +++ b/.backportrc.json @@ -1,5 +1,7 @@ { "upstream": "elastic/apm-server", - "branches": [{ "name": "7.x", "checked": true }, "7.9", "7.8", "7.7", "7.6", "7.5", "7.4", "7.3", "7.2", "7.1", "7.0", "6.8"], - "labels": ["backport"] + "branches": [{ "name": "7.x", "checked": true },"7.15", "7.14", "6.8"], + "labels": ["backport"], + "autoMerge": true, + "autoMergeMethod": "squash" } diff --git a/.ci/.jenkins-schema.yml b/.ci/.jenkins-schema.yml new file mode 100644 index 00000000000..e4bb6d3da4c --- /dev/null +++ b/.ci/.jenkins-schema.yml @@ -0,0 +1,16 @@ +--- +agents: + - REPO: "apm-agent-dotnet" + SPEC_FILEPATH: "src/Elastic.Apm.Specification/specs" + - REPO: "apm-agent-go" + SPEC_FILEPATH: "internal/apmschema/jsonschema" + - REPO: "apm-agent-java" + SPEC_FILEPATH: "apm-agent-core/src/test/resources/apm-server-schema/current" + - REPO: "apm-agent-nodejs" + SPEC_FILEPATH: "test/integration/api-schema/apm-server-schema" + - REPO: "apm-agent-php" + SPEC_FILEPATH: "tests/APM_Server_intake_API_schema/latest_used" + - REPO: "apm-agent-python" + SPEC_FILEPATH: "tests/upstream/json-specs" + - REPO: "apm-agent-ruby" + SPEC_FILEPATH: "spec/fixtures" diff --git a/.ci/bump-stack-version.sh b/.ci/bump-stack-version.sh new file mode 100755 index 00000000000..1733dd9a462 --- /dev/null +++ b/.ci/bump-stack-version.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash +# +# Given the stack version this script will bump the version. +# +# This script is executed by the automation we are putting in place +# and it requires the git add/commit commands. +# +# Parameters: +# $1 -> the version to be bumped. Mandatory. +# $2 -> whether to create a branch where to commit the changes to. +# this is required when reusing an existing Pull Request. +# Optional. Default true. +# +set -euo pipefail +MSG="parameter missing." +VERSION=${1:?$MSG} +CREATE_BRANCH=${2:-true} + +OS=$(uname -s| tr '[:upper:]' '[:lower:]') + +if [ "${OS}" == "darwin" ] ; then + SED="sed -i .bck" +else + SED="sed -i" +fi + +echo "Update stack with version ${VERSION}" +${SED} -E -e "s#(image: docker\.elastic\.co/.*):[0-9]+\.[0-9]+\.[0-9]+(-[a-f0-9]{8})?#\1:${VERSION}#g" docker-compose.yml + +echo "Commit changes" +if [ "$CREATE_BRANCH" = "true" ]; then + base=$(git rev-parse --abbrev-ref HEAD | sed 's#/#-#g') + git checkout -b "update-stack-version-$(date "+%Y%m%d%H%M%S")-${base}" +else + echo "Branch creation disabled." +fi +git add docker-compose.yml +git diff --staged --quiet || git commit -m "[Automation] Update elastic stack version to ${VERSION} for testing" +git --no-pager log -1 + +echo "You can now push and create a Pull Request" diff --git a/.ci/check-changelogs.groovy b/.ci/check-changelogs.groovy index a1523c83fbb..6185160784a 100644 --- a/.ci/check-changelogs.groovy +++ b/.ci/check-changelogs.groovy @@ -49,7 +49,7 @@ pipeline { deleteDir() unstash 'source' dir("${BASE_DIR}"){ - sh(label: 'Run check changelogs', script: './script/jenkins/check-changelogs.sh') + sh(label: 'Run check changelogs', script: './.ci/scripts/check-changelogs.sh') } } } diff --git a/.ci/check-packages.groovy b/.ci/check-packages.groovy index 7c5524a0c88..61b8d00c78c 100644 --- a/.ci/check-packages.groovy +++ b/.ci/check-packages.groovy @@ -7,15 +7,12 @@ pipeline { environment { BASE_DIR = 'src' PIPELINE_LOG_LEVEL = 'INFO' - URL_BASE = "${params.URL_BASE}" - VERSION = "${params.VERSION}" HOME = "${WORKSPACE}" // This limits ourselves to just the APM tests ANSIBLE_EXTRA_FLAGS = "--tags apm-server" - // The build parameters - BEATS_URL_BASE = 'https://storage.googleapis.com/beats-ci-artifacts/snapshots' - APM_URL_BASE = 'https://storage.googleapis.com/apm-ci-artifacts/jobs/snapshots' - // BRANCH_NAME = 'master' + LANG = "C.UTF-8" + LC_ALL = "C.UTF-8" + PYTHONUTF8 = "1" } options { timeout(time: 4, unit: 'HOURS') @@ -28,7 +25,11 @@ pipeline { rateLimitBuilds(throttle: [count: 60, durationName: 'hour', userBoost: true]) } triggers { - cron '@weekly' + upstream("apm-server/apm-server-mbp/${env.JOB_BASE_NAME}") + } + parameters { + string(name: 'APM_URL_BASE', defaultValue: 'https://storage.googleapis.com/apm-ci-artifacts/jobs/snapshots', description: 'The location where the APM packages should be downloaded from') + string(name: 'VERSION', defaultValue: '8.0.0-SNAPSHOT', description: 'The package version to test (modify the job configuration to add a new version)') } stages { stage('Checkout') { @@ -36,6 +37,28 @@ pipeline { steps { pipelineManager([ cancelPreviousRunningBuilds: [ when: 'PR' ] ]) deleteDir() + script { + if(isUpstreamTrigger()) { + try { + log(level: 'INFO', text: "Started by upstream pipeline. Read 'beats-tester.properties'.") + copyArtifacts(filter: 'beats-tester.properties', + flatten: true, + projectName: "apm-server/apm-server-mbp/${env.JOB_BASE_NAME}", + selector: upstream(fallbackToLastSuccessful: true)) + def props = readProperties(file: 'beats-tester.properties') + setEnvVar('APM_URL_BASE', props.get('APM_URL_BASE', '')) + setEnvVar('VERSION', props.get('VERSION', '8.0.0-SNAPSHOT')) + } catch(err) { + log(level: 'WARN', text: "copyArtifacts failed. Fallback to the head of the branch as used to be.") + setEnvVar('APM_URL_BASE', params.get('APM_URL_BASE', 'https://storage.googleapis.com/apm-ci-artifacts/jobs/snapshots')) + setEnvVar('VERSION', params.get('VERSION', '8.0.0-SNAPSHOT')) + } + } else { + log(level: 'INFO', text: "No started by upstream pipeline. Fallback to the head of the branch as used to be.") + setEnvVar('APM_URL_BASE', params.get('APM_URL_BASE')) + setEnvVar('VERSION', params.get('VERSION')) + } + } gitCheckout(basedir: "${BASE_DIR}", repo: 'git@github.com:elastic/beats-tester.git', branch: 'master', credentialsId: 'f6c7695a-671e-4f4f-a331-acdce44ff9ba') stash allowEmpty: true, name: 'source', useDefaultExcludes: false } @@ -44,7 +67,7 @@ pipeline { matrix { // TODO: when the infra is ready with the 'nested-virtualization' then we can use that label // agent { label 'nested-virtualization' } - agent { label 'darwin' } + agent { label 'metal' } axes { axis { name 'GROUPS' @@ -57,16 +80,13 @@ pipeline { stage('Test'){ options { skipDefaultCheckout() } steps { - // See https://stackoverflow.com/questions/59269208/errorrootcode-for-hash-md5-was-not-found-when-using-any-hg-mercurial-command - sh(label: "Switching OpenSSL versions to fix Py2", script: "brew switch openssl 1.0.2s") deleteDir() unstash 'source' dir("${BASE_DIR}"){ - withGoEnv(){ + withGoEnv(os: 'linux'){ sh(label: 'make batch', script: """#!/bin/bash - echo "beats_url_base: ${BEATS_URL_BASE}" > run-settings-jenkins.yml - echo "apm_url_base: ${APM_URL_BASE}" >> run-settings-jenkins.yml + echo "apm_url_base: ${APM_URL_BASE}" > run-settings-jenkins.yml echo "version: ${VERSION}" >> run-settings-jenkins.yml RUN_SETTINGS=jenkins make batch""") } @@ -77,7 +97,7 @@ pipeline { dir("${BASE_DIR}"){ junit(allowEmptyResults: true, keepLongStdio: true, testResults: "logs/*.xml") archiveArtifacts(allowEmptyArchive: true, artifacts: 'logs/**') - withGoEnv(){ + withGoEnv(os: 'linux'){ sh(label: 'make clean', script: 'make clean') } } @@ -93,7 +113,7 @@ pipeline { } post { cleanup { - notifyBuildResult(prComment: true) + notifyBuildResult(prComment: false) } } } diff --git a/.ci/docker/Makefile b/.ci/docker/Makefile deleted file mode 100644 index d9ed9f05bed..00000000000 --- a/.ci/docker/Makefile +++ /dev/null @@ -1,64 +0,0 @@ -.PHONY: help -.DEFAULT_GOAL := help -BATS_VERSION = c706d1470dd1376687776bbe985ac22d09780327 #v1.1.0 -BATS_ASSERT = 9f88b4207da750093baabc4e3f41bf68f0dd3630 #v0.3.0 -BATS_SUPPORT = 004e707638eedd62e0481e8cdc9223ad471f12ee #v0.3.0 -LTS_ALPINE ?= 12-alpine - -help: ## Display this help text - @grep -E '^[a-zA-Z_-]+[%]?:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' - -bats: ## Install bats in the project itself - @echo "Cloning bats-core" - @[ -d $(CURDIR)/bats-core ] \ - || git clone -n https://github.com/bats-core/bats-core.git $(CURDIR)/bats-core - @cd $(CURDIR)/bats-core \ - && git fetch --all \ - && git checkout -B v1.1.0 ${BATS_VERSION} - -bats-assert: ##Install bats-assert in the project itself - @echo "Clonning bats-assert" - @[ -d $(CURDIR)/tests/test_helper/bats-assert ] \ - || git clone -n https://github.com/ztombol/bats-assert $(CURDIR)/tests/test_helper/bats-assert - @cd $(CURDIR)/tests/test_helper/bats-assert \ - && git fetch --all \ - && git checkout -B v0.3.0 ${BATS_ASSERT} - -bats-support: ##Install bats-support in the project itself - @echo "Clonning bats-support" - @[ -d $(CURDIR)/tests/test_helper/bats-support ] \ - || git clone -n https://github.com/ztombol/bats-support $(CURDIR)/tests/test_helper/bats-support - @cd $(CURDIR)/tests/test_helper/bats-support \ - && git fetch --all \ - && git checkout -B v0.3.0 ${BATS_SUPPORT} - -prepare-test: bats bats-assert bats-support## Prepare the bats dependencies - @echo "Pulling Alpine image" - @docker pull node:${LTS_ALPINE} - @mkdir -p target - -convert-tests-results: ## convert TAP test results to JUnit - @APP=$*; docker run --rm -e APP=$${APP} -v "$(CURDIR)":/usr/src/app -w /usr/src/app node:${LTS_ALPINE} \ - sh -c 'npm install tap-xunit -g && cat target/results.tap | tap-xunit --package="co.elastic.pipeline.$${APP}" > target/junit-$${APP}-results.xml' - -test-golang-mage: prepare-test ## Run the tests for the specific app - cp -f ../../go.* golang-mage - mkdir -p golang-mage/approvaltest && cp -f ../../approvaltest/go.* golang-mage/approvaltest - mkdir -p golang-mage/systemtest && cp -f ../../systemtest/go.* golang-mage/systemtest - @DOCKERFILE=golang-mage bats-core/bin/bats --tap tests | tee target/results.tap - @$(MAKE) -s convert-tests-results - -test-%: prepare-test ## Run the tests for the specific app - @DOCKERFILE=$* bats-core/bin/bats --tap tests | tee target/results.tap - @$(MAKE) -s convert-tests-results - -push-%: prepare-test ## Push the Docker image to the docker.elastic.co repository - docker push "docker.elastic.co/observability-ci/$*" - -all-push: push-golang-mage ## Push all Docker images to the docker.elastic.co repository - -all-tests: test-golang-mage ## Run the tests for all the apps - -clean: ## Clean autogenerated files/folders - @rm -rf bats-core - @rm -rf target diff --git a/.ci/docker/golang-mage/Dockerfile b/.ci/docker/golang-mage/Dockerfile deleted file mode 100644 index 55b09fd5722..00000000000 --- a/.ci/docker/golang-mage/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -ARG GO_VERSION=1.13.10 -FROM golang:${GO_VERSION} - -ENV TOOLS=/tools -WORKDIR $TOOLS - -COPY go.mod go.sum ./ -COPY approvaltest/go.mod approvaltest/go.sum ./approvaltest/ -COPY systemtest/go.mod systemtest/go.sum ./systemtest/ - -RUN go mod download -RUN cd approvaltest && go mod download -RUN cd systemtest && go mod download - -RUN apt-get update -y -qq \ - && apt-get install -y -qq python3 python3-pip python3-venv \ - && rm -rf /var/lib/apt/lists/* diff --git a/.ci/docker/tests/test_helpers.bash b/.ci/docker/tests/test_helpers.bash deleted file mode 100644 index bb76ce39a84..00000000000 --- a/.ci/docker/tests/test_helpers.bash +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bats -# shellcheck shell=bash - -# check dependencies -( - type docker &>/dev/null || ( echo "docker is not available"; exit 1 ) - type curl &>/dev/null || ( echo "curl is not available"; exit 1 ) -)>&2 - -function cleanup { - docker kill "$1" &>/dev/null ||: - docker rm -fv "$1" &>/dev/null ||: -} diff --git a/.ci/docker/tests/tests.bats b/.ci/docker/tests/tests.bats deleted file mode 100644 index 5bb13e3910e..00000000000 --- a/.ci/docker/tests/tests.bats +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bats - -load 'test_helper/bats-support/load' -load 'test_helper/bats-assert/load' -load test_helpers - -IMAGE="docker.elastic.co/observability-ci/${DOCKERFILE//\//-}" -CONTAINER="${DOCKERFILE//\//-}" - -@test "${DOCKERFILE} - build image" { - cd $BATS_TEST_DIRNAME/.. - # Simplify the makefile as it does fail with '/bin/sh: 1: Bad substitution' in the CI - if [ ! -e ${DOCKERFILE} ] ; then - DOCKERFILE="${DOCKERFILE//-//}" - fi - run docker build --rm -t ${IMAGE} ${DOCKERFILE} - assert_success -} - -@test "${DOCKERFILE} - clean test containers" { - cleanup $CONTAINER -} - -@test "${DOCKERFILE} - create test container" { - run docker run -d --name $CONTAINER -P ${IMAGE} - assert_success -} - -@test "${DOCKERFILE} - test container with 0 as exitcode" { - sleep 1 - run docker inspect -f {{.State.ExitCode}} $CONTAINER ${CMD} - assert_output '0' -} - -@test "${DOCKERFILE} - clean test containers afterwards" { - cleanup $CONTAINER -} diff --git a/.ci/jobs/apm-server-check-changelogs-mbp.yml b/.ci/jobs/apm-server-check-changelogs-mbp.yml index cdd552c5c23..1b6d3d746b7 100644 --- a/.ci/jobs/apm-server-check-changelogs-mbp.yml +++ b/.ci/jobs/apm-server-check-changelogs-mbp.yml @@ -12,6 +12,7 @@ discover-pr-forks-trust: permission discover-pr-origin: merge-current discover-tags: false + head-filter-regex: '^(?!update-stack-version).*$' notification-context: 'apm-ci' property-strategies: all-branches: @@ -37,4 +38,3 @@ timeout: '15' use-author: true wipe-workspace: 'True' - periodic-folder-trigger: 1w diff --git a/.ci/jobs/apm-server-check-packages-mbp.yml b/.ci/jobs/apm-server-check-packages-mbp.yml new file mode 100644 index 00000000000..6d91f3c00bb --- /dev/null +++ b/.ci/jobs/apm-server-check-packages-mbp.yml @@ -0,0 +1,51 @@ +--- +- job: + name: apm-server/apm-server-check-packages-mbp + display-name: APM Server Package Smoke Test MBP + description: Test package installation across supported platforms + project-type: multibranch + script-path: .ci/check-packages.groovy + scm: + - github: + branch-discovery: no-pr + discover-pr-forks-strategy: merge-current + discover-pr-forks-trust: permission + discover-pr-origin: merge-current + discover-tags: false + head-filter-regex: '(master|7\.1\d|8\.\d+)' + notification-context: 'beats-tester' + build-strategies: + - skip-initial-build: true + - named-branches: + - exact-name: + name: 'master' + case-sensitive: true + - regex-name: + regex: '7\.1\d' + case-sensitive: true + - regex-name: + regex: '8\.\d+' + case-sensitive: true + - change-request: + ignore-target-only-changes: true + repo: apm-server + repo-owner: elastic + credentials-id: 2a9602aa-ab9f-4e52-baf3-b71ca88469c7-UserAndToken + ssh-checkout: + credentials: f6c7695a-671e-4f4f-a331-acdce44ff9ba + clean: + after: true + before: true + prune: true + shallow-clone: true + depth: 3 + do-not-fetch-tags: true + submodule: + disable: false + recursive: true + parent-credentials: true + timeout: 100 + reference-repo: /var/lib/jenkins/.git-references/apm-server.git + timeout: '15' + use-author: true + wipe-workspace: 'True' diff --git a/.ci/jobs/apm-server-check-packages.yml b/.ci/jobs/apm-server-check-packages.yml deleted file mode 100644 index 4cf2f02c199..00000000000 --- a/.ci/jobs/apm-server-check-packages.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- job: - name: apm-server/apm-server-check-packages - display-name: "APM Server Package Smoke Test" - description: "Test package installation across supported platforms\n" - triggers: [] - parameters: [] - project-type: pipeline - pipeline-scm: - lightweight-checkout: false - scm: - - git: - url: "https://github.com/elastic/apm-server.git" - basedir: "apm-server" - wipe-workspace: true - branches: - - "master" - script-path: apm-server/.ci/check-packages.groovy diff --git a/.ci/jobs/apm-server-mbp.yml b/.ci/jobs/apm-server-mbp.yml index 0db390fcf69..bfc8066d340 100644 --- a/.ci/jobs/apm-server-mbp.yml +++ b/.ci/jobs/apm-server-mbp.yml @@ -13,6 +13,7 @@ discover-pr-forks-trust: permission discover-pr-origin: merge-current discover-tags: true + head-filter-regex: '^(?!update-stack-version).*$' notification-context: 'apm-ci' repo: apm-server repo-owner: elastic diff --git a/.ci/jobs/defaults.yml b/.ci/jobs/defaults.yml index 89eb878dc57..83ce01b29e1 100644 --- a/.ci/jobs/defaults.yml +++ b/.ci/jobs/defaults.yml @@ -12,7 +12,6 @@ logrotate: numToKeep: 100 node: linux - periodic-folder-trigger: 1d prune-dead-branches: true publishers: - email: diff --git a/.ci/jobs/update-beats-mbp.yml b/.ci/jobs/update-beats-mbp.yml new file mode 100644 index 00000000000..2bef42a1e73 --- /dev/null +++ b/.ci/jobs/update-beats-mbp.yml @@ -0,0 +1,40 @@ +--- +- job: + name: apm-server/update-beats-mbp + display-name: apm-server Update beats + description: To keep apm-server up to date with libbeat and beats packaging + view: APM-CI + project-type: multibranch + script-path: .ci/update-beats.groovy + scm: + - github: + branch-discovery: no-pr + discover-pr-forks-strategy: merge-current + discover-pr-forks-trust: permission + discover-pr-origin: merge-current + discover-tags: false + head-filter-regex: '^(master|PR-.*)$' + notification-context: 'update-beats' + repo: apm-server + repo-owner: elastic + credentials-id: 2a9602aa-ab9f-4e52-baf3-b71ca88469c7-UserAndToken + ssh-checkout: + credentials: f6c7695a-671e-4f4f-a331-acdce44ff9ba + property-strategies: + all-branches: + - suppress-scm-triggering: true + clean: + after: true + before: true + prune: true + shallow-clone: true + depth: 4 + do-not-fetch-tags: true + submodule: + disable: false + recursive: true + parent-credentials: true + timeout: 100 + timeout: '15' + use-author: true + wipe-workspace: 'True' diff --git a/.ci/jobs/update-json-schema.yml b/.ci/jobs/update-json-schema.yml new file mode 100644 index 00000000000..cb16ae0de58 --- /dev/null +++ b/.ci/jobs/update-json-schema.yml @@ -0,0 +1,43 @@ +--- +- job: + name: apm-server/update-json-schema-mbp + display-name: apm-server Update json schema + description: Send PRs to the subscribed APM Agents if the json schema files are modified, triggered for the master branch for the elastic/apm-server project + view: APM-CI + project-type: multibranch + script-path: .ci/update-json-schema.groovy + scm: + - github: + branch-discovery: no-pr + discover-pr-forks-strategy: merge-current + discover-pr-forks-trust: permission + discover-pr-origin: merge-current + discover-tags: false + head-filter-regex: '^(?!update-stack-version).*$' + notification-context: 'update-json-schema' + repo: apm-server + repo-owner: elastic + credentials-id: 2a9602aa-ab9f-4e52-baf3-b71ca88469c7-UserAndToken + ssh-checkout: + credentials: f6c7695a-671e-4f4f-a331-acdce44ff9ba + build-strategies: + - regular-branches: true + - change-request: + ignore-target-only-changes: true + clean: + after: true + before: true + prune: true + shallow-clone: true + depth: 4 + do-not-fetch-tags: true + submodule: + disable: false + recursive: true + parent-credentials: true + timeout: 100 + timeout: '15' + use-author: true + wipe-workspace: 'True' + triggers: + - timed: 'H H(4-5) * * 1,5' diff --git a/script/jenkins/bench.sh b/.ci/scripts/bench.sh similarity index 100% rename from script/jenkins/bench.sh rename to .ci/scripts/bench.sh diff --git a/.ci/scripts/build-darwin.sh b/.ci/scripts/build-darwin.sh index b7625f8a684..4a2f561b5bd 100755 --- a/.ci/scripts/build-darwin.sh +++ b/.ci/scripts/build-darwin.sh @@ -6,4 +6,4 @@ source ./script/common.bash jenkins_setup -./script/jenkins/build.sh +./.ci/scripts/build.sh diff --git a/script/jenkins/build.sh b/.ci/scripts/build.sh similarity index 100% rename from script/jenkins/build.sh rename to .ci/scripts/build.sh diff --git a/script/jenkins/check-changelogs.sh b/.ci/scripts/check-changelogs.sh similarity index 100% rename from script/jenkins/check-changelogs.sh rename to .ci/scripts/check-changelogs.sh diff --git a/.ci/scripts/docker-get-logs.sh b/.ci/scripts/docker-get-logs.sh index 4db3945c6a2..6e21d57415c 100755 --- a/.ci/scripts/docker-get-logs.sh +++ b/.ci/scripts/docker-get-logs.sh @@ -3,7 +3,7 @@ set -euo pipefail STEP=${1:-""} -DOCKER_INFO_DIR="docker-info/${STEP}" +DOCKER_INFO_DIR="build/docker-info/${STEP}" mkdir -p ${DOCKER_INFO_DIR} cp docker-compose*.yml ${DOCKER_INFO_DIR} cd ${DOCKER_INFO_DIR} diff --git a/script/jenkins/intake.sh b/.ci/scripts/intake.sh similarity index 100% rename from script/jenkins/intake.sh rename to .ci/scripts/intake.sh diff --git a/script/jenkins/linux-test.sh b/.ci/scripts/linux-test.sh similarity index 100% rename from script/jenkins/linux-test.sh rename to .ci/scripts/linux-test.sh diff --git a/script/jenkins/package-docker-snapshot.sh b/.ci/scripts/package-docker-snapshot.sh similarity index 100% rename from script/jenkins/package-docker-snapshot.sh rename to .ci/scripts/package-docker-snapshot.sh diff --git a/script/jenkins/package.sh b/.ci/scripts/package.sh similarity index 100% rename from script/jenkins/package.sh rename to .ci/scripts/package.sh diff --git a/.ci/scripts/prepare-spec-changes.sh b/.ci/scripts/prepare-spec-changes.sh new file mode 100755 index 00000000000..70ac6ca90ff --- /dev/null +++ b/.ci/scripts/prepare-spec-changes.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +set -uexo pipefail + +readonly REPO_NAME=${1} +readonly SPECS_PATH=${2} +readonly REPO_DIR=".ci/${REPO_NAME}" + +git clone "https://github.com/elastic/${REPO_NAME}" "${REPO_DIR}" + +mkdir -p "${REPO_DIR}/${SPECS_PATH}" + +echo "Copying spec files to the ${REPO_NAME} repo" +cp docs/spec/v2/*.* "${REPO_DIR}/${SPECS_PATH}" + +cd "${REPO_DIR}" +git config user.email +git checkout -b "update-spec-files-$(date "+%Y%m%d%H%M%S")" +git add "${SPECS_PATH}" +git commit -m "synchronize json schema specs" +git --no-pager log -1 diff --git a/.ci/scripts/test-darwin.sh b/.ci/scripts/test-darwin.sh index aa671118644..6df42b0ba72 100755 --- a/.ci/scripts/test-darwin.sh +++ b/.ci/scripts/test-darwin.sh @@ -17,4 +17,4 @@ source ./script/common.bash jenkins_setup -script/jenkins/unit-test.sh +./.ci/scripts/unit-test.sh diff --git a/script/jenkins/unit-test.sh b/.ci/scripts/unit-test.sh similarity index 80% rename from script/jenkins/unit-test.sh rename to .ci/scripts/unit-test.sh index 045f2e9e1b0..c8e3142e6d9 100755 --- a/script/jenkins/unit-test.sh +++ b/.ci/scripts/unit-test.sh @@ -11,8 +11,8 @@ export COV_DIR="build/coverage" mkdir -p ${COV_DIR} make update -go install github.com/jstemmer/go-junit-report -go install github.com/t-yuki/gocover-cobertura +go install -modfile=tools/go.mod github.com/jstemmer/go-junit-report +go install -modfile=tools/go.mod github.com/t-yuki/gocover-cobertura (go test -race -covermode=atomic -coverprofile=${COV_DIR}/unit.cov -v ./... 2>&1 | tee ${OUT_FILE}) || echo -e "\033[31;49mTests FAILED\033[0m" diff --git a/.ci/scripts/update-beats.sh b/.ci/scripts/update-beats.sh new file mode 100755 index 00000000000..326781570e3 --- /dev/null +++ b/.ci/scripts/update-beats.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# +# This script is executed by the automation we are putting in place +# and it requires the git add/commit commands. +# +set -euo pipefail + +make update-beats +COMMIT_MESSAGE="Update to elastic/beats@$(go list -m -f {{.Version}} github.com/elastic/beats/... | cut -d- -f3)" + +git checkout -b "update-beats-$(date "+%Y%m%d%H%M%S")" +git add go.mod go.sum NOTICE.txt \ + .go-version docs/version.asciidoc \ + docs/fields.asciidoc include/fields.go x-pack/apm-server/include/fields.go +find . -maxdepth 2 -name Dockerfile -exec git add {} \; + +git diff --staged --quiet || git commit -m "$COMMIT_MESSAGE" +git --no-pager log -1 + +echo "You can now push and create a Pull Request" diff --git a/script/jenkins/windows-build.ps1 b/.ci/scripts/windows-build.ps1 similarity index 100% rename from script/jenkins/windows-build.ps1 rename to .ci/scripts/windows-build.ps1 diff --git a/script/jenkins/windows-test.ps1 b/.ci/scripts/windows-test.ps1 similarity index 76% rename from script/jenkins/windows-test.ps1 rename to .ci/scripts/windows-test.ps1 index dc7590baeeb..984d101c38b 100644 --- a/script/jenkins/windows-test.ps1 +++ b/.ci/scripts/windows-test.ps1 @@ -50,20 +50,8 @@ New-Item -ItemType directory -Path build\coverage | Out-Null New-Item -ItemType directory -Path build\system-tests | Out-Null New-Item -ItemType directory -Path build\system-tests\run | Out-Null -echo "Building fields.yml" -exec { mage fields } - echo "Building $env:beat" exec { mage build } "Build FAILURE" echo "Unit testing $env:beat" exec { mage goTestUnit } - -echo "System testing $env:beat" -# Get a CSV list of package names. -$packages = $(go list ./... | select-string -Pattern "/vendor/" -NotMatch | select-string -Pattern "/scripts/cmd/" -NotMatch) -$packages = ($packages|group|Select -ExpandProperty Name) -join "," -exec { go test -race -c -cover -covermode=atomic -coverpkg $packages } "go test FAILURE" - -echo "Running python tests" -exec { mage pythonUnitTest } "System test FAILURE" diff --git a/.ci/update-beats.groovy b/.ci/update-beats.groovy new file mode 100644 index 00000000000..164657a9bb3 --- /dev/null +++ b/.ci/update-beats.groovy @@ -0,0 +1,104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// https://apm-ci.elastic.co/job/apm-server/job/update-json-schema-mbp/ + +@Library('apm@current') _ + +pipeline { + agent { label 'linux && immutable' } + environment { + REPO = 'apm-server' + BASE_DIR = "src/github.com/elastic/${env.REPO}" + HOME = "${env.WORKSPACE}" + NOTIFY_TO = credentials('notify-to') + JOB_GCS_BUCKET = credentials('gcs-bucket') + JOB_GIT_CREDENTIALS = "f6c7695a-671e-4f4f-a331-acdce44ff9ba" + PIPELINE_LOG_LEVEL = 'INFO' + } + triggers { + // Only master branch will run on a timer basis + cron(env.BRANCH_NAME == 'master' ? 'H H(4-5) * * 1,5' : '') + } + options { + timeout(time: 1, unit: 'HOURS') + buildDiscarder(logRotator(numToKeepStr: '5', artifactNumToKeepStr: '5')) + timestamps() + ansiColor('xterm') + disableResume() + durabilityHint('PERFORMANCE_OPTIMIZED') + } + parameters { + booleanParam(name: 'DRY_RUN_MODE', defaultValue: false, description: 'If true, allows to execute this pipeline in dry run mode, without sending a PR.') + } + stages { + stage('Checkout'){ + steps { + deleteDir() + gitCheckout(basedir: "${BASE_DIR}", repo: "git@github.com:elastic/${REPO}.git", credentialsId: "${JOB_GIT_CREDENTIALS}") + } + } + stage('Update beats') { + options { skipDefaultCheckout() } + steps { + dir("${BASE_DIR}"){ + withGoEnv(){ + setupAPMGitEmail(global: true) + sh(label: 'make update-beats', script: '.ci/scripts/update-beats.sh') + } + } + } + } + stage('Send Pull Request'){ + options { skipDefaultCheckout() } + steps { + dir("${BASE_DIR}"){ + createPullRequest() + } + } + } + } + post { + cleanup { + notifyBuildResult(prComment: false) + } + } +} + +def createPullRequest(Map args = [:]) { + def title = '[automation] update libbeat and beats packaging' + def message = createPRDescription() + def labels = "automation" + if (params.DRY_RUN_MODE) { + log(level: 'INFO', text: "DRY-RUN: createPullRequest(labels: ${labels}, message: '${message}')") + return + } + def branchName = (isPR()) ? env.CHANGE_TARGET : env.BRANCH_NAME + if (anyChangesToBeSubmitted("${branchName}")) { + githubCreatePullRequest(title: "${title}", labels: "${labels}", description: "${message}", base: "${branchName}") + } else { + log(level: 'INFO', text: "There are no changes to be submitted.") + } +} + +def anyChangesToBeSubmitted(String branch) { + return sh(returnStatus: true, script: "git diff --quiet HEAD..${branch}") != 0 +} + +def createPRDescription() { + return """### What \n Update with libbeat and beats packaging.""" +} diff --git a/.ci/update-json-schema.groovy b/.ci/update-json-schema.groovy new file mode 100644 index 00000000000..772c31d48fe --- /dev/null +++ b/.ci/update-json-schema.groovy @@ -0,0 +1,193 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// https://apm-ci.elastic.co/job/apm-server/job/update-json-schema-mbp/ + +@Library('apm@current') _ + +pipeline { + agent { label 'linux && immutable' } + environment { + REPO = 'apm-server' + BASE_DIR = "src/github.com/elastic/${env.REPO}" + HOME = "${env.WORKSPACE}" + NOTIFY_TO = credentials('notify-to') + JOB_GCS_BUCKET = credentials('gcs-bucket') + JOB_GIT_CREDENTIALS = "f6c7695a-671e-4f4f-a331-acdce44ff9ba" + PIPELINE_LOG_LEVEL = 'INFO' + } + triggers { + cron('H H(4-5) * * 1,5') + } + options { + timeout(time: 1, unit: 'HOURS') + buildDiscarder(logRotator(numToKeepStr: '5', artifactNumToKeepStr: '5')) + timestamps() + ansiColor('xterm') + disableResume() + durabilityHint('PERFORMANCE_OPTIMIZED') + } + parameters { + booleanParam(name: 'DRY_RUN_MODE', defaultValue: false, description: 'If true, allows to execute this pipeline in dry run mode, without sending a PR.') + booleanParam(name: 'FORCE_SEND_PR', defaultValue: false, description: 'If true, will force sending a PR, although it could be affected by the value off the DRY_RUN parameter: if the latter is true, a message will be printed in the console.') + choice(name: 'APM_AGENTS', choices: [ + 'All', + '.NET', + 'Go', + 'Java', + 'Node.js', + 'PHP', + 'Python', + 'Ruby', + 'RUM'], description: 'Name of the APM Agent you want to update its specs.') + } + stages { + stage('Checkout'){ + steps { + deleteDir() + gitCheckout(basedir: "${BASE_DIR}", + repo: "git@github.com:elastic/${REPO}.git", + credentialsId: "${JOB_GIT_CREDENTIALS}" + ) + stash allowEmpty: true, name: 'source', useDefaultExcludes: false + } + } + // This stage will populate the environment, and will only be executed under any of the + // following conditions: + // 1. we run the pipeline NOT in DRY_RUN_MODE, because we want to send real PRs + // 2. we run the pipeline forcing sending real PRs, because we want so + // Because the rest of the following stages will need these variables to check for changes, + // skipping this stage would not take effect in them, as they are covered by the + // FORCE_SEND_PR check. + stage('Check for schema changes'){ + when { + beforeAgent true + anyOf { + expression { return env.DRY_RUN_MODE == "false" } + expression { return params.FORCE_SEND_PR } + } + } + environment { + // GIT_PREVIOUS_SUCCESSFUL_COMMIT might point to a local merge commit instead a commit in the + // origin, then let's use the target branch for PRs and the GIT_PREVIOUS_SUCCESSFUL_COMMIT for + // branches. + COMMIT_FROM = """${isPR() ? "origin/${env.CHANGE_TARGET}" : "${env.GIT_PREVIOUS_SUCCESSFUL_COMMIT}"}""" + } + steps { + deleteDir() + unstash 'source' + script { + dir("${BASE_DIR}"){ + regexps = [ "^docs/spec/v2/.*" ] + env.SPECS_UPDATED = isGitRegionMatch( + from: "${env.COMMIT_FROM}", + patterns: regexps) + env.PR_DESCRIPTION = createPRDescription(env.COMMIT_FROM) + } + } + } + } + stage('Send Pull Request for JSON specs'){ + options { + warnError('Pull Requests to APM agents failed') + } + when { + beforeAgent true + anyOf { + expression { return env.SPECS_UPDATED == "true" } + expression { return params.FORCE_SEND_PR } + } + } + environment { + // agentMapping is defined in the shared library as a map + APM_AGENTS = "${params?.APM_AGENTS}" + SELECTED_AGENT = agentMapping.id(env.APM_AGENTS) + } + steps { + deleteDir() + unstash 'source' + dir("${BASE_DIR}"){ + generateSteps() + } + } + } + } + post { + cleanup { + // PR comments should only be created for the main pipeline. + notifyBuildResult(prComment: false) + } + } +} + +def generateSteps() { + def agents = readYaml(file: '.ci/.jenkins-schema.yml') + def parallelTasks = [:] + agents['agents'].each { agent -> + if (agent.SPEC_FILEPATH?.trim()) { + if (env.SELECTED_AGENT == 'all' || "apm-agent-${env.SELECTED_AGENT}" == agent.REPO) { + parallelTasks["${agent.REPO}"] = generateStepForAgent(repo: "${agent.REPO}", filePath: "${agent.SPEC_FILEPATH}") + } + } + } + parallel(parallelTasks) +} + +def generateStepForAgent(Map args = [:]){ + def repo = args.containsKey('repo') ? args.get('repo') : error('generateStepForAgent: repo argument is required') + def filePath = args.containsKey('filePath') ? args.get('filePath') : error('generateStepForAgent: filePath argument is required') + return { + node('linux && immutable') { + catchError(buildResult: 'SUCCESS', stageResult: 'UNSTABLE') { + deleteDir() + unstash 'source' + dir("${BASE_DIR}"){ + setupAPMGitEmail(global: true) + sh script: """.ci/scripts/prepare-spec-changes.sh "${repo}" "${filePath}" """, label: "Prepare changes for ${repo}" + dir(".ci/${repo}") { + if (params.DRY_RUN_MODE || isPR()) { + echo "DRY-RUN: ${repo} with description: '${env.PR_DESCRIPTION}'" + } else { + githubCreatePullRequest(title: "synchronize schema spec", labels: 'automation', description: "${env.PR_DESCRIPTION}") + } + } + } + } + } + } +} + +def createPRDescription(commit) { + def message = """ + ### What + APM agent json schema automatic sync + + ### Why + """ + if (params.FORCE_SEND_PR) { + message += "*Manually forced with the CI automation job.*" + } + if (env?.SPECS_UPDATED?.equals('true')){ + def gitLog = sh(script: """ + git log --pretty=format:'* https://github.com/${env.ORG_NAME}/${env.REPO_NAME}/commit/%h %s' \ + ${commit}...HEAD \ + --follow -- docs/spec \ + | sed 's/#\\([0-9]\\+\\)/https:\\/\\/github.com\\/${env.ORG_NAME}\\/${env.REPO_NAME}\\/pull\\/\\1/g' || true""", returnStdout: true) + message += "*Changeset*\n${gitLog}" + } + return message +} diff --git a/.gitattributes b/.gitattributes index a298e8004fb..b2a1b172e6c 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1 +1,2 @@ CHANGELOG.asciidoc merge=union +internal/otel_collector linguist-generated=true diff --git a/.github/actions/check_labels/Dockerfile b/.github/actions/check_labels/Dockerfile new file mode 100644 index 00000000000..0b97507a629 --- /dev/null +++ b/.github/actions/check_labels/Dockerfile @@ -0,0 +1,3 @@ +FROM alpine:3.10 +COPY entrypoint.sh /entrypoint.sh +ENTRYPOINT ["/entrypoint.sh"] diff --git a/.github/actions/check_labels/action.yml b/.github/actions/check_labels/action.yml new file mode 100644 index 00000000000..3135792ed2c --- /dev/null +++ b/.github/actions/check_labels/action.yml @@ -0,0 +1,4 @@ +name: Check labels +runs: + using: docker + image: Dockerfile diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 9b35ac61f7e..9a453d7f799 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -3,56 +3,48 @@ If this is your first contribution, please review and sign our contributor agreement - https://www.elastic.co/contributor-agreement. -Before creating the PR, ensure that: - -1. Your branch is rebased on top of the latest master. - Squash your initial commits into meaningful commits. - After creating a PR, do not rebase of force push any longer. -2. Nothing is broken, by running the test suite (at least unit tests). - See https://github.com/elastic/apm-server/blob/master/TESTING.md for details. -3. Your code follows the style guidelines of this project: - run `make check-full` for static code checks and linting. - -A few suggestions about filling out this PR: - -1. Use a descriptive title for the PR. -2. If this pull request is work in progress, create a draft PR. -3. Please label this PR with at least one of the following labels: - - bug fix - - breaking change - - enhancement -4. Reference the related issue, and make use of magic keywords where it makes sense - https://help.github.com/articles/closing-issues-using-keywords/. -5. Do not remove any checklist items, strike through the ones that don't apply - (by using tildes, e.g. ~scratch this ~). -6. Explain how this PR can be tested by the reviewer: commands, dependencies, steps, etc. -7. Submit the pull request: - Push your changes to your forked copy of the repository and submit a pull request - (https://help.github.com/articles/using-pull-requests). -8. Please be patient. We might not be able to immediately review your code, - but we'll do our best to dedicate to it the attention it deserves. - Your effort is much appreciated! +Guidelines: + - Prefer small PRs, and split changes into multiple logical commits where they must + be delivered in a single PR. + - If the PR is incomplete and not yet ready for review, open it as a Draft. + - Once the PR is marked ready for review it is expected to pass all tests and linting, + and you should not force-push any changes. See also https://github.com/elastic/apm-server/blob/master/CONTRIBUTING.md for more tips on contributing. --> ## Motivation/summary + + ## Checklist -- [ ] I have signed the [Contributor License Agreement](https://www.elastic.co/contributor-agreement/). -- [ ] I have updated [CHANGELOG.asciidoc](https://github.com/elastic/apm-server/blob/master/CHANGELOG.asciidoc) + + +- [ ] Update [CHANGELOG.asciidoc](https://github.com/elastic/apm-server/blob/master/CHANGELOG.asciidoc) +- [ ] Documentation has been updated -I have considered changes for: -- [ ] documentation -- [ ] logging (add log lines, choose appropriate log selector, etc.) -- [ ] metrics and monitoring (create issue for Kibana team to add metrics to visualizations, e.g. [Kibana#44001](https://github.com/elastic/kibana/issues/44001)) -- [ ] automated tests (add tests for the code changes, all [**unit** tests](https://github.com/elastic/apm-server/blob/master/TESTING.md) pass locally) -- [ ] telemetry -- [ ] Elasticsearch Service (https://cloud.elastic.co) -- [ ] Elastic Cloud Enterprise (https://www.elastic.co/products/ece) -- [ ] Elastic Cloud on Kubernetes (https://www.elastic.co/elastic-cloud-kubernetes) +For functional changes, consider: +- Is it observable through the addition of either **logging** or **metrics**? +- Is its use being published in **telemetry** to enable product improvement? +- Have system tests been added to avoid regression? ## How to test these changes + + ## Related issues + + diff --git a/.github/workflows/check_labels.yml b/.github/workflows/check_labels.yml new file mode 100644 index 00000000000..e85a3a90ea4 --- /dev/null +++ b/.github/workflows/check_labels.yml @@ -0,0 +1,19 @@ +--- +name: Label checks +on: + pull_request: + types: [opened, labeled, unlabeled] + branches: [main, master] + +jobs: + check_labels: + runs-on: ubuntu-latest + steps: + - name: Install Go + uses: actions/setup-go@v2 + with: + go-version: 1.17.x + - name: Checkout + uses: actions/checkout@v2 + - name: Check labels + run: cd ./.github/workflows/check_labels && go run . diff --git a/.github/workflows/check_labels/go.mod b/.github/workflows/check_labels/go.mod new file mode 100644 index 00000000000..89dc9084c82 --- /dev/null +++ b/.github/workflows/check_labels/go.mod @@ -0,0 +1,3 @@ +module check_labels + +go 1.16 diff --git a/.github/workflows/check_labels/main.go b/.github/workflows/check_labels/main.go new file mode 100644 index 00000000000..bec2b94a11b --- /dev/null +++ b/.github/workflows/check_labels/main.go @@ -0,0 +1,45 @@ +package main + +import ( + "encoding/json" + "fmt" + "log" + "os" + "strings" +) + +func main() { + f, err := os.Open(os.Getenv("GITHUB_EVENT_PATH")) + if err != nil { + log.Fatal(err) + } + defer f.Close() + + type label struct { + Name string `json:"name"` + } + var event struct { + PullRequest struct { + Labels []label + } `json:"pull_request"` + } + if err := json.NewDecoder(f).Decode(&event); err != nil { + log.Fatal(err) + } + + // Check for any "conflicts" and "backport-*" labels. + var haveBackport bool + for _, label := range event.PullRequest.Labels { + if label.Name == "conflict" { + fmt.Printf("::error::%s\n", "Cannot merge with 'conflict' label") + os.Exit(1) + } + if !haveBackport && strings.HasPrefix(label.Name, "backport-") { + haveBackport = true + } + } + if !haveBackport { + fmt.Printf("::error::%s\n", "Missing 'backport-*' label") + os.Exit(1) + } +} diff --git a/.gitignore b/.gitignore index 13d56edf4cf..dc0efee8e25 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,8 @@ /.ci/docker/golang-mage/go.* /.ci/docker/golang-mage/approvaltest/ /.ci/docker/golang-mage/systemtest/ +/.ci/docker/golang-mage/internal/otel_collector/ +/.ci/docker/golang-mage/internal/glog **/*.idea /build /data @@ -34,3 +36,6 @@ html_docs /docker-compose.override.yml /config.mk /systemtest/logs +docker-compose.yml.bck +Dockerfile.bck +version.asciidoc.bck diff --git a/.go-version b/.go-version index 52e779f28fa..de646d2fc11 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.14.7 +1.16.6 diff --git a/.mergify.yml b/.mergify.yml new file mode 100644 index 00000000000..d2bc4b19bc2 --- /dev/null +++ b/.mergify.yml @@ -0,0 +1,103 @@ +pull_request_rules: + - name: ask to resolve conflict + conditions: + - conflict + actions: + comment: + message: | + This pull request is now in conflicts. Could you fix it @{{author}}? 🙏 + To fixup this pull request, you can check out it locally. See documentation: https://help.github.com/articles/checking-out-pull-requests-locally/ + ``` + git fetch upstream + git checkout -b {{head}} upstream/{{head}} + git merge upstream/{{base}} + git push upstream {{head}} + ``` + - name: backport patches to 7.x branch + conditions: + - merged + - base=master + - label=backport-7.x + actions: + backport: + assignees: + - "{{ author }}" + branches: + - "7.x" + title: "[{{ destination_branch }}] {{ title }} (backport #{{ number }})" + - name: backport patches to 7.15 branch + conditions: + - merged + - base=master + - label=backport-7.15 + actions: + backport: + assignees: + - "{{ author }}" + branches: + - "7.15" + title: "[{{ destination_branch }}] {{ title }} (backport #{{ number }})" + - name: backport patches to 7.14 branch + conditions: + - merged + - base=master + - label=backport-7.14 + actions: + backport: + assignees: + - "{{ author }}" + branches: + - "7.14" + title: "[{{ destination_branch }}] {{ title }} (backport #{{ number }})" + - name: backport patches to 7.13 branch + conditions: + - merged + - base=master + - label=backport-7.13 + actions: + backport: + assignees: + - "{{ author }}" + branches: + - "7.13" + title: "[{{ destination_branch }}] {{ title }} (backport #{{ number }})" + - name: Automatic squash and merge with success checks and the file docker-compose.yml is modified. + conditions: + - check-success=apm-ci/pr-merge + - label=automation + - files=docker-compose.yml + actions: + merge: + method: squash + strict: smart+fasttrack + - name: delete upstream branch with changes on docker-compose.yml after merging/closing it + conditions: + - or: + - merged + - closed + - and: + - label=automation + - head~=^update-.*-version + - files=docker-compose.yml + actions: + delete_head_branch: + - name: automatic merge when CI passes for the make update-beats + conditions: + - check-success=apm-ci/pr-merge + - label=automation + - base~=^update-beats + - files~=^(go.mod|go.sum|NOTICE.txt) + actions: + merge: + method: squash + strict: smart+fasttrack + - name: delete upstream branch after merging changes for the make update-beats + conditions: + - or: + - merged + - closed + - and: + - label=automation + - head~=^update-beats + actions: + delete_head_branch: \ No newline at end of file diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md new file mode 100644 index 00000000000..5e5c6bd172f --- /dev/null +++ b/ARCHITECTURE.md @@ -0,0 +1,9 @@ +# Architecture of the APM Server + +This document gives a high level overview over the architecture of the main APM Server components. +The main purpose of the APM Server is to ingest data. It validates, enriches and transforms data, that it receives from a variety of APM agents, into a dedicated format and passes them on to an output, such as Elasticsearch. + +## Ingest Flow +High level overview over incoming data and their flow through the APM Server until being passed on to the output publisher pipeline. + +![](./docs/images/ingest-flow.png) \ No newline at end of file diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 19af61fb98b..0f41a4388ec 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -1,4 +1,10 @@ include::./changelogs/head.asciidoc[] +include::./changelogs/8.0.asciidoc[] +include::./changelogs/7.14.asciidoc[] +include::./changelogs/7.13.asciidoc[] +include::./changelogs/7.12.asciidoc[] +include::./changelogs/7.11.asciidoc[] +include::./changelogs/7.10.asciidoc[] include::./changelogs/7.9.asciidoc[] include::./changelogs/7.8.asciidoc[] include::./changelogs/7.7.asciidoc[] diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e871806d357..6bd335c941d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,25 +1,32 @@ # Contributing to the APM Server -The APM Server is open source and we love to receive contributions from our community — you! +APM Server is open source, and we love to receive contributions from our community — you! There are many ways to contribute, from writing tutorials or blog posts, improving the documentation, -submitting bug reports and feature requests or writing code. +submitting bug reports and feature requests, or writing code. -You can get in touch with us through [Discuss](https://discuss.elastic.co/c/apm), +If you want to be rewarded for your contributions, sign up for the +[Elastic Contributor Program](https://www.elastic.co/community/contributor). +Each time you make a valid contribution, you’ll earn points that increase your chances of winning prizes and being recognized as a top contributor. + +## Questions + +GitHub is reserved for bug reports and feature requests; it is not the place +for general questions. If you have a question or an unconfirmed bug, please +visit our [discussion forum](https://discuss.elastic.co/c/apm); feedback and ideas are always welcome. ## Code contributions -If you have a bugfix or new feature that you would like to contribute, -please find or open an issue about it first. -Talk about what you would like to do. -It may be that somebody is already working on it, -or that there are particular issues that you should know about before implementing the change. +If you have a bug fix or new feature that you would like to contribute, +please find or open an issue first. +It's important to talk about what you would like to do, +as there may already be someone working on it, +or there may be context to be aware of before implementing the change. -You will have to fork the `apm-server` repo, -please follow the instructions in the [readme](README.md). +Development instructions are available in the project [readme](README.md#apm-server-development). ### Submitting your changes @@ -41,9 +48,9 @@ a committer will tag the pull request with the target version(s). Once a version is released, new features are frozen for that minor version and will not be backported. For example, -if 6.2 was just released, -the soonest a new feature will be released is 6.3, -not 6.2.1. +if 7.10 was just released, +the soonest a new feature will be released is 7.11, +not 7.10.1. Breaking changes may need to wait until the next major version. See [semver](https://semver.org/) for general information about major/minor versions. Bug fixes may be backported on a case by case basis. @@ -56,3 +63,9 @@ and merged with "Create a merge commit". Straightforward backports may be merged without review. [Backport](https://github.com/sqren/backport) is recommended for automating the backport process. + +### Examples + +This is a collection of example PRs for additions occuring somewhat frequently. + +* [Adding a new field to the Intake API and index it in Elasticsearch](https://github.com/elastic/apm-server/pull/4626#issue-555484976) diff --git a/Dockerfile b/Dockerfile index b763bdb2e58..7f149bd08de 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.14.7 +FROM golang:1.16.6 MAINTAINER Nicolas Ruflin RUN set -x && \ diff --git a/Jenkinsfile b/Jenkinsfile index 71e5c7773b1..1fe8c775c7f 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -16,6 +16,7 @@ pipeline { DOCKER_SECRET = 'secret/apm-team/ci/docker-registry/prod' DOCKER_REGISTRY = 'docker.elastic.co' DOCKER_IMAGE = "${env.DOCKER_REGISTRY}/observability-ci/apm-server" + ONLY_DOCS = "false" } options { timeout(time: 2, unit: 'HOURS') @@ -28,10 +29,11 @@ pipeline { quietPeriod(10) } triggers { - issueCommentTrigger('(?i).*(?:jenkins\\W+)?run\\W+(?:the\\W+)?(?:hey-apm|package\\W+)?tests(?:\\W+please)?.*') + issueCommentTrigger('(?i)(.*(?:jenkins\\W+)?run\\W+(?:the\\W+)?(?:(hey-apm|package|arm)\\W+)?tests(?:\\W+please)?.*|^\\/test|^\\/hey-apm|^\\/package)') } parameters { booleanParam(name: 'Run_As_Master_Branch', defaultValue: false, description: 'Allow to run any steps on a PR, some steps normally only run on master branch.') + booleanParam(name: 'arm_ci', defaultValue: true, description: 'Enable ARM build') booleanParam(name: 'linux_ci', defaultValue: true, description: 'Enable Linux build') booleanParam(name: 'osx_ci', defaultValue: true, description: 'Enable OSX CI') booleanParam(name: 'windows_ci', defaultValue: true, description: 'Enable Windows CI') @@ -40,7 +42,6 @@ pipeline { booleanParam(name: 'test_sys_env_ci', defaultValue: true, description: 'Enable system and environment test') booleanParam(name: 'bench_ci', defaultValue: true, description: 'Enable benchmarks') booleanParam(name: 'release_ci', defaultValue: true, description: 'Enable build the release packages') - booleanParam(name: 'kibana_update_ci', defaultValue: true, description: 'Enable build the Check kibana Obj. Updated') booleanParam(name: 'its_ci', defaultValue: true, description: 'Enable async ITs') string(name: 'DIAGNOSTIC_INTERVAL', defaultValue: "0", description: 'Elasticsearch detailed logging every X seconds') string(name: 'ES_LOG_LEVEL', defaultValue: "error", description: 'Elasticsearch error level') @@ -53,7 +54,6 @@ pipeline { environment { PATH = "${env.PATH}:${env.WORKSPACE}/bin" HOME = "${env.WORKSPACE}" - GOPATH = "${env.WORKSPACE}" } options { skipDefaultCheckout() } steps { @@ -64,7 +64,6 @@ pipeline { stash allowEmpty: true, name: 'source', useDefaultExcludes: false script { dir("${BASE_DIR}"){ - env.GO_VERSION = readFile(".go-version").trim() def regexps =[ "^_beats.*", "^apm-server.yml", @@ -72,13 +71,16 @@ pipeline { "^magefile.go", "^ingest.*", "^packaging.*", - "^tests/packaging.*", "^vendor/github.com/elastic/beats.*" ] + withGoEnv(){ + setEnvVar('APM_SERVER_VERSION', sh(label: 'Get beat version', script: 'make get-version', returnStdout: true)?.trim()) + } env.BEATS_UPDATED = isGitRegionMatch(patterns: regexps) - // Skip all the stages except docs for PR's with asciidoc changes only - env.ONLY_DOCS = isGitRegionMatch(patterns: [ '.*\\.asciidoc' ], comparator: 'regexp', shouldMatchAll: true) + whenTrue(isPR()) { + setEnvVar('ONLY_DOCS', isGitRegionMatch(patterns: [ '.*\\.asciidoc' ], comparator: 'regexp', shouldMatchAll: true)) + } } } } @@ -95,7 +97,6 @@ pipeline { environment { PATH = "${env.PATH}:${env.WORKSPACE}/bin" HOME = "${env.WORKSPACE}" - GOPATH = "${env.WORKSPACE}" } when { beforeAgent true @@ -109,7 +110,9 @@ pipeline { deleteDir() unstash 'source' dir("${BASE_DIR}"){ - sh(label: 'Run intake', script: './script/jenkins/intake.sh') + withGoEnv(){ + sh(label: 'Run intake', script: './.ci/scripts/intake.sh') + } } } } @@ -133,11 +136,11 @@ pipeline { withGithubNotify(context: 'Build - Linux') { deleteDir() unstash 'source' - golang(){ - dir(BASE_DIR){ + dir(BASE_DIR){ + withMageEnv(){ retry(2) { // Retry in case there are any errors to avoid temporary glitches sleep randomNumber(min: 5, max: 10) - sh(label: 'Linux build', script: './script/jenkins/build.sh') + sh(label: 'Linux build', script: './.ci/scripts/build.sh') } } } @@ -165,10 +168,12 @@ pipeline { deleteDir() unstash 'source' dir(BASE_DIR){ - retry(2) { // Retry in case there are any errors to avoid temporary glitches - sleep randomNumber(min: 5, max: 10) - powershell(label: 'Windows build', script: '.\\script\\jenkins\\windows-build.ps1') - powershell(label: 'Run Window tests', script: '.\\script\\jenkins\\windows-test.ps1') + withMageEnv(){ + retry(2) { // Retry in case there are any errors to avoid temporary glitches + sleep randomNumber(min: 5, max: 10) + powershell(label: 'Windows build', script: '.\\.ci\\scripts\\windows-build.ps1') + powershell(label: 'Run Window tests', script: '.\\.ci\\scripts\\windows-test.ps1') + } } } } @@ -185,7 +190,7 @@ pipeline { Build on a mac environment. */ stage('OSX build-test') { - agent { label 'macosx' } + agent { label 'macosx && x86_64' } options { skipDefaultCheckout() warnError('OSX execution failed') @@ -205,10 +210,12 @@ pipeline { deleteDir() unstash 'source' dir(BASE_DIR){ - retry(2) { // Retry in case there are any errors to avoid temporary glitches - sleep randomNumber(min: 5, max: 10) - sh(label: 'OSX build', script: '.ci/scripts/build-darwin.sh') - sh(label: 'Run Unit tests', script: '.ci/scripts/test-darwin.sh') + withMageEnv(){ + retry(2) { // Retry in case there are any errors to avoid temporary glitches + sleep randomNumber(min: 5, max: 10) + sh(label: 'OSX build', script: '.ci/scripts/build-darwin.sh') + sh(label: 'Run Unit tests', script: '.ci/scripts/test-darwin.sh') + } } } } @@ -219,6 +226,42 @@ pipeline { } } } + stage('ARM build-test') { + agent { label 'arm' } + options { + skipDefaultCheckout() + warnError('ARM execution failed') + } + when { + beforeAgent true + allOf { + expression { return params.arm_ci } + expression { return env.ONLY_DOCS == "false" } + } + } + environment { + HOME = "${env.WORKSPACE}" + } + steps { + withGithubNotify(context: 'Build-Test - ARM') { + deleteDir() + unstash 'source' + dir("${BASE_DIR}"){ + withMageEnv(){ + sh(label: 'ARM build', script: '.ci/scripts/build.sh') + sh(label: 'ARM Unit tests', script: './.ci/scripts/unit-test.sh') + } + } + } + } + post { + always { + dir("${BASE_DIR}/build"){ + junit(allowEmptyResults: true, keepLongStdio: true, testResults: "junit-*.xml") + } + } + } + } /** Run unit tests and report junit results. */ @@ -228,7 +271,6 @@ pipeline { environment { PATH = "${env.PATH}:${env.WORKSPACE}/bin" HOME = "${env.WORKSPACE}" - GOPATH = "${env.WORKSPACE}" } when { beforeAgent true @@ -242,19 +284,23 @@ pipeline { deleteDir() unstash 'source' dir("${BASE_DIR}"){ - sh(label: 'Run Unit tests', script: './script/jenkins/unit-test.sh') + withMageEnv(){ + sh(label: 'Run Unit tests', script: './.ci/scripts/unit-test.sh') + } } } } post { always { - coverageReport("${BASE_DIR}/build/coverage") - junit(allowEmptyResults: true, - keepLongStdio: true, - testResults: "${BASE_DIR}/build/junit-*.xml" - ) - catchError(buildResult: 'SUCCESS', message: 'Failed to grab test results tar files', stageResult: 'SUCCESS') { - tar(file: "coverage-files.tgz", archive: true, dir: "coverage", pathPrefix: "${BASE_DIR}/build") + dir("${BASE_DIR}/build"){ + coverageReport("coverage") + junit(allowEmptyResults: true, + keepLongStdio: true, + testResults: "junit-*.xml" + ) + catchError(buildResult: 'SUCCESS', message: 'Failed to grab test results tar files', stageResult: 'SUCCESS') { + tar(file: "coverage-files.tgz", archive: true, dir: "coverage") + } } codecov(repo: env.REPO, basedir: "${BASE_DIR}", secret: "${CODECOV_SECRET}") } @@ -270,7 +316,6 @@ pipeline { environment { PATH = "${env.PATH}:${env.WORKSPACE}/bin" HOME = "${env.WORKSPACE}" - GOPATH = "${env.WORKSPACE}" } when { beforeAgent true @@ -284,23 +329,26 @@ pipeline { deleteDir() unstash 'source' dir("${BASE_DIR}"){ - sh(label: 'Run Linux tests', script: './script/jenkins/linux-test.sh') + withMageEnv(){ + sh(label: 'Run Linux tests', script: './.ci/scripts/linux-test.sh') + } } } } post { always { - dir("${BASE_DIR}"){ + dir("${BASE_DIR}/build"){ archiveArtifacts(allowEmptyArchive: true, artifacts: "docker-info/**", - defaultExcludes: false) - junit(allowEmptyResults: true, - keepLongStdio: true, - testResults: "**/build/TEST-*.xml" - ) - } - catchError(buildResult: 'SUCCESS', message: 'Failed to grab test results tar files', stageResult: 'SUCCESS') { - tar(file: "system-tests-linux-files.tgz", archive: true, dir: "system-tests", pathPrefix: "${BASE_DIR}/build") + defaultExcludes: false + ) + junit(allowEmptyResults: true, + keepLongStdio: true, + testResults: "**/TEST-*.xml" + ) + catchError(buildResult: 'SUCCESS', message: 'Failed to grab test results tar files', stageResult: 'SUCCESS') { + tar(file: "system-tests-linux-files.tgz", archive: true, dir: "system-tests") + } } } } @@ -328,44 +376,14 @@ pipeline { } steps { withGithubNotify(context: 'Benchmarking') { - deleteDir() - unstash 'source' - golang(){ - dir("${BASE_DIR}"){ - sh(label: 'Run benchmarks', script: './script/jenkins/bench.sh') - sendBenchmarks(file: 'bench.out', index: "benchmark-server") - } - } - } - } - } - /** - Checks if kibana objects are updated. - */ - stage('Check kibana Obj. Updated') { - agent { label 'linux && immutable' } - options { skipDefaultCheckout() } - environment { - PATH = "${env.PATH}:${env.WORKSPACE}/bin" - HOME = "${env.WORKSPACE}" - GOPATH = "${env.WORKSPACE}" - } - when { - beforeAgent true - allOf { - expression { return params.kibana_update_ci } - expression { return env.ONLY_DOCS == "false" } - } - } - steps { - withGithubNotify(context: 'Sync Kibana') { deleteDir() unstash 'source' dir("${BASE_DIR}"){ - catchError(buildResult: 'SUCCESS', message: 'Sync Kibana is not updated', stageResult: 'UNSTABLE') { - sh(label: 'Test Sync', script: './script/jenkins/sync.sh') + withMageEnv(){ + sh(label: 'Run benchmarks', script: './.ci/scripts/bench.sh') } } + sendBenchmarks(file: "${BASE_DIR}/bench.out", index: "benchmark-server") } } } @@ -374,16 +392,16 @@ pipeline { options { skipDefaultCheckout() } when { beforeAgent true - expression { return env.GITHUB_COMMENT?.contains('hey-apm tests') } + expression { return env.GITHUB_COMMENT?.contains('hey-apm tests') || env.GITHUB_COMMENT?.contains('/hey-apm')} } steps { withGithubNotify(context: 'Hey-Apm') { deleteDir() unstash 'source' - golang(){ - dockerLogin(secret: env.DOCKER_SECRET, registry: env.DOCKER_REGISTRY) - dir("${BASE_DIR}"){ - sh(label: 'Package & Push', script: "./script/jenkins/package-docker-snapshot.sh ${env.GIT_BASE_COMMIT} ${env.DOCKER_IMAGE}") + dockerLogin(secret: env.DOCKER_SECRET, registry: env.DOCKER_REGISTRY) + dir("${BASE_DIR}"){ + withMageEnv(){ + sh(label: 'Package & Push', script: "./.ci/scripts/package-docker-snapshot.sh ${env.GIT_BASE_COMMIT} ${env.DOCKER_IMAGE}") } } build(job: 'apm-server/apm-hey-test-benchmark', propagate: true, wait: true, @@ -399,7 +417,6 @@ pipeline { environment { PATH = "${env.PATH}:${env.WORKSPACE}/bin" HOME = "${env.WORKSPACE}" - GOPATH = "${env.WORKSPACE}" SNAPSHOT = "true" } when { @@ -408,8 +425,12 @@ pipeline { expression { return params.release_ci } expression { return env.ONLY_DOCS == "false" } anyOf { - expression { return env.BEATS_UPDATED != "false" } - expression { return env.GITHUB_COMMENT?.contains('package tests') } + branch 'master' + branch pattern: '\\d+\\.\\d+', comparator: 'REGEXP' + tag pattern: 'v\\d+\\.\\d+\\.\\d+.*', comparator: 'REGEXP' + expression { return isPR() && env.BEATS_UPDATED != "false" } + expression { return env.GITHUB_COMMENT?.contains('package tests') || env.GITHUB_COMMENT?.contains('/package')} + expression { return params.Run_As_Master_Branch } } } } @@ -419,31 +440,31 @@ pipeline { withGithubNotify(context: 'Package') { deleteDir() unstash 'source' - golang(){ - dir("${BASE_DIR}"){ - sh(label: 'Build packages', script: './script/jenkins/package.sh') - sh(label: 'Test packages install', script: './script/jenkins/test-install-packages.sh') + dir("${BASE_DIR}"){ + withMageEnv(){ + sh(label: 'Build packages', script: './.ci/scripts/package.sh') dockerLogin(secret: env.DOCKER_SECRET, registry: env.DOCKER_REGISTRY) - sh(label: 'Package & Push', script: "./script/jenkins/package-docker-snapshot.sh ${env.GIT_BASE_COMMIT} ${env.DOCKER_IMAGE}") + sh(label: 'Package & Push', script: "./.ci/scripts/package-docker-snapshot.sh ${env.GIT_BASE_COMMIT} ${env.DOCKER_IMAGE}") } } } } } stage('Publish') { - when { - beforeAgent true - anyOf { - branch 'master' - branch pattern: '\\d+\\.\\d+', comparator: 'REGEXP' - branch pattern: 'v\\d?', comparator: 'REGEXP' - tag pattern: 'v\\d+\\.\\d+\\.\\d+.*', comparator: 'REGEXP' - expression { return params.Run_As_Master_Branch } - expression { return env.BEATS_UPDATED != "false" } - } + environment { + BUCKET_URI = """${isPR() ? "gs://${JOB_GCS_BUCKET}/pull-requests/pr-${env.CHANGE_ID}" : "gs://${JOB_GCS_BUCKET}/snapshots"}""" } steps { - googleStorageUpload(bucket: "gs://${JOB_GCS_BUCKET}/snapshots", + // Upload files to the default location + googleStorageUpload(bucket: "${BUCKET_URI}", + credentialsId: "${JOB_GCS_CREDENTIALS}", + pathPrefix: "${BASE_DIR}/build/distributions/", + pattern: "${BASE_DIR}/build/distributions/**/*", + sharedPublicly: true, + showInline: true) + + // Copy those files to another location with the sha commit to test them afterward. + googleStorageUpload(bucket: "gs://${JOB_GCS_BUCKET}/commits/${env.GIT_BASE_COMMIT}", credentialsId: "${JOB_GCS_CREDENTIALS}", pathPrefix: "${BASE_DIR}/build/distributions/", pattern: "${BASE_DIR}/build/distributions/**/*", @@ -487,21 +508,17 @@ pipeline { } } post { + success { + writeFile(file: 'beats-tester.properties', + text: """\ + ## To be consumed by the beats-tester pipeline + COMMIT=${env.GIT_BASE_COMMIT} + APM_URL_BASE=https://storage.googleapis.com/${env.JOB_GCS_BUCKET}/commits/${env.GIT_BASE_COMMIT} + VERSION=${env.APM_SERVER_VERSION}-SNAPSHOT""".stripIndent()) // stripIdent() requires '''/ + archiveArtifacts artifacts: 'beats-tester.properties' + } cleanup { notifyBuildResult() } } } - -def golang(Closure body){ - def golangDocker - retry(3) { // Retry in case there are any errors when building the docker images (to avoid temporary glitches) - sleep randomNumber(min: 2, max: 5) - golangDocker = docker.build('golang-mage', "--build-arg GO_VERSION=${GO_VERSION} -f ${BASE_DIR}/.ci/docker/golang-mage/Dockerfile ${BASE_DIR}") - } - withEnv(["HOME=${WORKSPACE}", "GOPATH=${WORKSPACE}", "SHELL=/bin/bash"]) { - golangDocker.inside('-v /usr/bin/docker:/usr/bin/docker -v /var/run/docker.sock:/var/run/docker.sock'){ - body() - } - } -} diff --git a/Makefile b/Makefile index f025a9bc826..fcc788a5f28 100644 --- a/Makefile +++ b/Makefile @@ -5,17 +5,24 @@ # Enforce use of modules. export GO111MODULE=on -GOOSBUILD=./build/$(shell go env GOOS) +# Ensure the Go version in .go_version is installed and used. +GOROOT?=$(shell ./script/run_with_go_ver go env GOROOT) +GO:=$(GOROOT)/bin/go +export PATH:=$(GOROOT)/bin:$(PATH) + +GOOSBUILD:=./build/$(shell $(GO) env GOOS) APPROVALS=$(GOOSBUILD)/approvals +GENPACKAGE=$(GOOSBUILD)/genpackage GOIMPORTS=$(GOOSBUILD)/goimports GOLICENSER=$(GOOSBUILD)/go-licenser GOLINT=$(GOOSBUILD)/golint MAGE=$(GOOSBUILD)/mage REVIEWDOG=$(GOOSBUILD)/reviewdog STATICCHECK=$(GOOSBUILD)/staticcheck +ELASTICPACKAGE=$(GOOSBUILD)/elastic-package PYTHON_ENV?=. -PYTHON_BIN=$(PYTHON_ENV)/build/ve/$(shell go env GOOS)/bin +PYTHON_BIN:=$(PYTHON_ENV)/build/ve/$(shell $(GO) env GOOS)/bin PYTHON=$(PYTHON_BIN)/python # Create a local config.mk file to override configuration, @@ -30,23 +37,23 @@ PYTHON=$(PYTHON_BIN)/python .PHONY: apm-server apm-server: - @go build -o $@ ./x-pack/apm-server + @$(GO) build -o $@ ./x-pack/apm-server .PHONY: apm-server-oss apm-server-oss: - @go build -o $@ + @$(GO) build -o $@ .PHONY: apm-server.test apm-server.test: - go test -c -coverpkg=github.com/elastic/apm-server/... ./x-pack/apm-server + $(GO) test -c -coverpkg=github.com/elastic/apm-server/... ./x-pack/apm-server .PHONY: apm-server-oss.test apm-server-oss.test: - go test -c -coverpkg=github.com/elastic/apm-server/... + $(GO) test -c -coverpkg=github.com/elastic/apm-server/... .PHONY: test test: - go test -v ./... + $(GO) test -v ./... .PHONY: clean: $(MAGE) @@ -65,19 +72,19 @@ SYSTEM_TEST_TARGET?=./tests/system PYTEST_OPTIONS?=--timeout=90 --durations=20 --junit-xml=build/TEST-system.xml .PHONY: check-full -check-full: update check golint staticcheck +check-full: update check golint staticcheck check-docker-compose .PHONY: check-approvals check-approvals: $(APPROVALS) @$(APPROVALS) .PHONY: check -check: $(MAGE) check-headers +check: $(MAGE) check-fmt check-headers check-package @$(MAGE) check .PHONY: bench bench: - @go test -benchmem -run=XXX -benchtime=100ms -bench='.*' ./... + @$(GO) test -benchmem -run=XXX -benchtime=100ms -bench='.*' ./... .PHONY: system-tests system-tests: $(PYTHON_BIN) apm-server.test @@ -105,14 +112,18 @@ docker-compose.override.yml: # Rules for updating config files, fields.yml, etc. ############################################################################## -update: fields go-generate add-headers copy-docs notice $(MAGE) +update: fields go-generate add-headers copy-docs gen-package notice $(MAGE) @$(MAGE) update fields_sources=\ - _meta/fields.common.yml \ $(shell find model -name fields.yml) \ $(shell find x-pack/apm-server/fields -name fields.yml) +.PHONY: gen-package gen-package-only +gen-package: gen-package-only format-package build-package +gen-package-only: $(GENPACKAGE) + @$(GENPACKAGE) + fields: include/fields.go x-pack/apm-server/include/fields.go include/fields.go x-pack/apm-server/include/fields.go: $(MAGE) magefile.go $(fields_sources) @$(MAGE) fields @@ -123,19 +134,24 @@ apm-server.yml apm-server.docker.yml: $(MAGE) magefile.go _meta/beat.yml .PHONY: go-generate go-generate: - @go generate + @$(GO) generate . ./ingest/pipeline notice: NOTICE.txt -NOTICE.txt: $(PYTHON) go.mod - @$(PYTHON) script/generate_notice.py -b "Elastic APM Server" -s "github.com/elastic/beats*" . ./x-pack/apm-server +NOTICE.txt: $(PYTHON) go.mod tools/go.mod + @$(PYTHON) script/generate_notice.py . ./x-pack/apm-server .PHONY: add-headers add-headers: $(GOLICENSER) ifndef CHECK_HEADERS_DISABLED - @$(GOLICENSER) -exclude x-pack + @$(GOLICENSER) -exclude x-pack -exclude internal/otel_collector @$(GOLICENSER) -license Elastic x-pack endif +## get-version : Get the apm server version +.PHONY: get-version +get-version: + @grep defaultBeatVersion cmd/version.go | cut -d'=' -f2 | tr -d '"' + ############################################################################## # Documentation. ############################################################################## @@ -159,41 +175,30 @@ copy-docs: @cp tests/system/error.approved.json docs/data/elasticsearch/generated/errors.json @cp tests/system/transaction.approved.json docs/data/elasticsearch/generated/transactions.json @cp tests/system/spans.approved.json docs/data/elasticsearch/generated/spans.json - @cp tests/system/metricset.approved.json docs/data/elasticsearch/generated/metricsets.json ############################################################################## # Beats synchronisation. ############################################################################## BEATS_VERSION?=master -BEATS_MODULE=$(shell go list -m -f {{.Path}} all | grep github.com/elastic/beats) +BEATS_MODULE:=$(shell $(GO) list -m -f {{.Path}} all | grep github.com/elastic/beats) .PHONY: update-beats update-beats: update-beats-module update - @echo --- Use this commit message: Update to elastic/beats@$(shell go list -m -f {{.Version}} $(BEATS_MODULE) | cut -d- -f3) + @echo --- Use this commit message: Update to elastic/beats@$(shell $(GO) list -m -f {{.Version}} $(BEATS_MODULE) | cut -d- -f3) .PHONY: update-beats-module update-beats-module: - go get -d -u $(BEATS_MODULE)@$(BEATS_VERSION) && go mod tidy - diff -u .go-version $$(go list -m -f {{.Dir}} $(BEATS_MODULE))/.go-version \ - || { code=$$?; echo ".go-version out of sync with Beats"; exit $$code; } - rsync -crv --delete $$(go list -m -f {{.Dir}} $(BEATS_MODULE))/testing/environments testing/ - -############################################################################## -# Kibana synchronisation. -############################################################################## - -.PHONY: are-kibana-objects-updated -are-kibana-objects-updated: $(PYTHON) build/index-pattern.json - @$(PYTHON) ./script/are_kibana_saved_objects_updated.py --branch ${BEATS_VERSION} build/index-pattern.json -build/index-pattern.json: $(PYTHON) apm-server - @./apm-server --strict.perms=false export index-pattern > $@ + $(GO) get -d -u $(BEATS_MODULE)@$(BEATS_VERSION) && $(GO) mod tidy + cp -f $$($(GO) list -m -f {{.Dir}} $(BEATS_MODULE))/.go-version .go-version + find . -maxdepth 2 -name Dockerfile -exec sed -i'.bck' -E -e "s#(FROM golang):[0-9]+\.[0-9]+\.[0-9]+#\1:$$(cat .go-version)#g" {} \; + sed -i'.bck' -E -e "s#(:go-version): [0-9]+\.[0-9]+\.[0-9]+#\1: $$(cat .go-version)#g" docs/version.asciidoc ############################################################################## # Linting, style-checking, license header checks, etc. ############################################################################## -GOLINT_TARGETS?=$(shell go list ./...) +GOLINT_TARGETS?=$(shell $(GO) list ./...) GOLINT_UPSTREAM?=origin/master REVIEWDOG_FLAGS?=-conf=reviewdog.yml -f=golint -diff="git diff $(GOLINT_UPSTREAM)" GOLINT_COMMAND=$(GOLINT) ${GOLINT_TARGETS} | grep -v "should have comment" | $(REVIEWDOG) $(REVIEWDOG_FLAGS) @@ -213,18 +218,35 @@ check-changelogs: $(PYTHON) .PHONY: check-headers check-headers: $(GOLICENSER) ifndef CHECK_HEADERS_DISABLED - @$(GOLICENSER) -d -exclude build -exclude x-pack + @$(GOLICENSER) -d -exclude build -exclude x-pack -exclude internal/otel_collector @$(GOLICENSER) -d -exclude build -license Elastic x-pack endif -# TODO(axw) once we move to modules, start using "mage fmt" instead. -.PHONY: gofmt autopep8 +.PHONY: check-docker-compose +check-docker-compose: $(PYTHON_BIN) + @PATH=$(PYTHON_BIN):$(PATH) ./script/check_docker_compose.sh $(BEATS_VERSION) + +.PHONY: check-package format-package build-package +check-package: $(ELASTICPACKAGE) + @(cd apmpackage/apm; $(CURDIR)/$(ELASTICPACKAGE) check) +format-package: $(ELASTICPACKAGE) + @(cd apmpackage/apm; $(CURDIR)/$(ELASTICPACKAGE) format) +build-package: $(ELASTICPACKAGE) + @(cd apmpackage/apm; $(CURDIR)/$(ELASTICPACKAGE) build) + +.PHONY: check-gofmt check-autopep8 gofmt autopep8 +check-fmt: check-gofmt check-autopep8 fmt: gofmt autopep8 +check-gofmt: $(GOIMPORTS) + @PATH=$(GOOSBUILD):$(PATH) sh script/check_goimports.sh gofmt: $(GOIMPORTS) add-headers @echo "fmt - goimports: Formatting Go code" - @$(GOIMPORTS) -local github.com/elastic -l -w $(shell find . -type f -name '*.go' 2>/dev/null) -autopep8: $(MAGE) - @$(MAGE) pythonAutopep8 + @PATH=$(GOOSBUILD):$(PATH) GOIMPORTSFLAGS=-w sh script/goimports.sh +check-autopep8: $(PYTHON_BIN) + @PATH=$(PYTHON_BIN):$(PATH) sh script/autopep8_all.sh --diff --exit-code +autopep8: $(PYTHON_BIN) + @echo "fmt - autopep8: Formatting Python code" + @PATH=$(PYTHON_BIN):$(PATH) sh script/autopep8_all.sh --in-place ############################################################################## # Rules for creating and installing build tools. @@ -234,26 +256,33 @@ BIN_MAGE=$(GOOSBUILD)/bin/mage # BIN_MAGE is the standard "mage" binary. $(BIN_MAGE): go.mod - go build -o $@ github.com/magefile/mage + $(GO) build -o $@ github.com/magefile/mage # MAGE is the compiled magefile. $(MAGE): magefile.go $(BIN_MAGE) $(BIN_MAGE) -compile=$@ -$(STATICCHECK): go.mod - go build -o $@ honnef.co/go/tools/cmd/staticcheck +.PHONY: $(GENPACKAGE) +$(GENPACKAGE): + @$(GO) build -o $@ github.com/elastic/apm-server/apmpackage/cmd/gen-package $(GOLINT): go.mod - go build -o $@ golang.org/x/lint/golint + $(GO) build -o $@ golang.org/x/lint/golint $(GOIMPORTS): go.mod - go build -o $@ golang.org/x/tools/cmd/goimports + $(GO) build -o $@ golang.org/x/tools/cmd/goimports + +$(STATICCHECK): tools/go.mod + $(GO) build -o $@ -modfile=$< honnef.co/go/tools/cmd/staticcheck + +$(GOLICENSER): tools/go.mod + $(GO) build -o $@ -modfile=$< github.com/elastic/go-licenser -$(GOLICENSER): go.mod - go build -o $@ github.com/elastic/go-licenser +$(REVIEWDOG): tools/go.mod + $(GO) build -o $@ -modfile=$< github.com/reviewdog/reviewdog/cmd/reviewdog -$(REVIEWDOG): go.mod - go build -o $@ github.com/reviewdog/reviewdog/cmd/reviewdog +$(ELASTICPACKAGE): tools/go.mod + $(GO) build -o $@ -modfile=$< github.com/elastic/elastic-package $(PYTHON): $(PYTHON_BIN) $(PYTHON_BIN): $(PYTHON_BIN)/activate @@ -263,22 +292,19 @@ $(PYTHON_BIN)/activate: $(MAGE) .PHONY: $(APPROVALS) $(APPROVALS): - @go build -o $@ github.com/elastic/apm-server/approvaltest/cmd/check-approvals + @$(GO) build -o $@ github.com/elastic/apm-server/approvaltest/cmd/check-approvals ############################################################################## # Release manager. ############################################################################## -# Builds a snapshot release. The Go version defined in .go-version will be -# installed and used for the build. +# Builds a snapshot release. release-manager-snapshot: export SNAPSHOT=true -release-manager-snapshot: release-manager-release +release-manager-snapshot: release -# Builds a snapshot release. The Go version defined in .go-version will be -# installed and used for the build. +# Builds a snapshot release. .PHONY: release-manager-release -release-manager-release: - script/run_with_go_ver $(MAKE) release +release-manager-release: release .PHONY: release release: export PATH:=$(dir $(BIN_MAGE)):$(PATH) diff --git a/NOTICE.txt b/NOTICE.txt index 4003a8a216f..a5bddc7de8d 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1,4326 +1,22742 @@ Elastic APM Server -Copyright 2017-2020 Elasticsearch BV +Copyright 2014-2021 Elasticsearch BV This product includes software developed by The Apache Software Foundation (http://www.apache.org/). -========================================================================== +================================================================================ Third party libraries used by the Elastic APM Server project: -========================================================================== +================================================================================ + + +-------------------------------------------------------------------------------- +Dependency : github.com/apache/thrift +Version: v0.14.2 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/apache/thrift@v0.14.2/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------- +SOFTWARE DISTRIBUTED WITH THRIFT: + +The Apache Thrift software includes a number of subcomponents with +separate copyright notices and license terms. Your use of the source +code for the these subcomponents is subject to the terms and +conditions of the following licenses. + +-------------------------------------------------- +Portions of the following files are licensed under the MIT License: + + lib/erl/src/Makefile.am + +Please see doc/otp-base-license.txt for the full terms of this license. + +-------------------------------------------------- +For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: + +# Copyright (c) 2007 Thomas Porschberg +# +# Copying and distribution of this file, with or without +# modification, are permitted in any medium without royalty provided +# the copyright notice and this notice are preserved. + +-------------------------------------------------- +For the lib/nodejs/lib/thrift/json_parse.js: + +/* + json_parse.js + 2015-05-02 + Public Domain. + NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. + +*/ +(By Douglas Crockford ) + +-------------------------------------------------- +For lib/cpp/src/thrift/windows/SocketPair.cpp + +/* socketpair.c + * Copyright 2007 by Nathan C. Myers ; some rights reserved. + * This code is Free Software. It may be copied freely, in original or + * modified form, subject only to the restrictions that (1) the author is + * relieved from all responsibilities for any use for any purpose, and (2) + * this copyright notice must be retained, unchanged, in its entirety. If + * for any reason the author might be held responsible for any consequences + * of copying or use, license is withheld. + */ + + +-------------------------------------------------- +For lib/py/compat/win32/stdint.h + +// ISO C9x compliant stdint.h for Microsoft Visual Studio +// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 +// +// Copyright (c) 2006-2008 Alexander Chemeris +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. The name of the author may be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////////// + + +-------------------------------------------------- +Codegen template in t_html_generator.h + +* Bootstrap v2.0.3 +* +* Copyright 2012 Twitter, Inc +* Licensed under the Apache License v2.0 +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Designed and built with all the love in the world @twitter by @mdo and @fat. + +--------------------------------------------------- +For t_cl_generator.cc + + * Copyright (c) 2008- Patrick Collison + * Copyright (c) 2006- Facebook + +--------------------------------------------------- + + +-------------------------------------------------------------------------------- +Dependency : github.com/cespare/xxhash/v2 +Version: v2.1.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/cespare/xxhash/v2@v2.1.1/LICENSE.txt: + +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/dgraph-io/badger/v2 +Version: v2.2007.3-0.20201012072640-f5a7e0a1c83b +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/dgraph-io/badger/v2@v2.2007.3-0.20201012072640-f5a7e0a1c83b/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + +-------------------------------------------------------------------------------- +Dependency : github.com/dustin/go-humanize +Version: v1.0.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/dustin/go-humanize@v1.0.0/LICENSE: + +Copyright (c) 2005-2008 Dustin Sallings + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/beats/v7 +Version: v7.0.0-alpha2.0.20210911003435-13e34660f62a +Licence type (autodetected): Elastic +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/beats/v7@v7.0.0-alpha2.0.20210911003435-13e34660f62a/LICENSE.txt: + +Source code in this repository is variously licensed under the Apache License +Version 2.0, an Apache compatible license, or the Elastic License. Outside of +the "x-pack" folder, source code in a given file is licensed under the Apache +License Version 2.0, unless otherwise noted at the beginning of the file or a +LICENSE file present in the directory subtree declares a separate license. +Within the "x-pack" folder, source code in a given file is licensed under the +Elastic License, unless otherwise noted at the beginning of the file or a +LICENSE file present in the directory subtree declares a separate license. + +The build produces two sets of binaries - one set that falls under the Elastic +License and another set that falls under Apache License Version 2.0. The +binaries that contain `-oss` in the artifact name are licensed under the Apache +License Version 2.0. + + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/ecs +Version: v1.11.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/ecs@v1.11.0/LICENSE.txt: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/gmux +Version: v0.1.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/gmux@v0.1.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 Elastic and contributors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/go-elasticsearch/v7 +Version: v7.5.1-0.20210728153421-6462d8b84e7d +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-elasticsearch/v7@v7.5.1-0.20210728153421-6462d8b84e7d/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 Elasticsearch BV + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/go-elasticsearch/v8 +Version: v8.0.0-20210727161915-8cf93274b968 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-elasticsearch/v8@v8.0.0-20210727161915-8cf93274b968/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 Elasticsearch BV + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/go-hdrhistogram +Version: v0.1.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-hdrhistogram@v0.1.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2014 Coda Hale +Copyright (c) 2020 Elasticsearch BV + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/go-ucfg +Version: v0.8.4-0.20200415140258-1232bd4774a6 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-ucfg@v0.8.4-0.20200415140258-1232bd4774a6/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/go-sourcemap/sourcemap +Version: v2.1.3+incompatible +Licence type (autodetected): BSD-2-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/go-sourcemap/sourcemap@v2.1.3+incompatible/LICENSE: + +Copyright (c) 2016 The github.com/go-sourcemap/sourcemap Contributors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/gofrs/uuid +Version: v4.0.0+incompatible +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/gofrs/uuid@v4.0.0+incompatible/LICENSE: + +Copyright (C) 2013-2018 by Maxim Bublis + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/gogo/protobuf +Version: v1.3.2 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/gogo/protobuf@v1.3.2/LICENSE: + +Copyright (c) 2013, The GoGo Authors. All rights reserved. + +Protocol Buffers for Go with Gadgets + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +-------------------------------------------------------------------------------- +Dependency : github.com/google/pprof +Version: v0.0.0-20210609004039-a478d1d731e9 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/google/pprof@v0.0.0-20210609004039-a478d1d731e9/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/hashicorp/go-multierror +Version: v1.1.1 +Licence type (autodetected): MPL-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-multierror@v1.1.1/LICENSE: + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + + +-------------------------------------------------------------------------------- +Dependency : github.com/hashicorp/golang-lru +Version: v0.5.4 +Licence type (autodetected): MPL-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/golang-lru@v0.5.4/LICENSE: + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + + +-------------------------------------------------------------------------------- +Dependency : github.com/jaegertracing/jaeger +Version: v1.25.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/jaegertracing/jaeger@v1.25.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/json-iterator/go +Version: v1.1.11 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/json-iterator/go@v1.1.11/LICENSE: + +MIT License + +Copyright (c) 2016 json-iterator + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/libp2p/go-reuseport +Version: v0.0.2 +Licence type (autodetected): ISC +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/libp2p/go-reuseport@v0.0.2/LICENSE: + +Copyright (c) 2013 Conformal Systems LLC. + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-------------------------------------------------------------------------------- +Dependency : github.com/modern-go/reflect2 +Version: v1.0.1 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/modern-go/reflect2@v1.0.1/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger +Version: v0.34.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger@v0.34.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/patrickmn/go-cache +Version: v2.1.0+incompatible +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/patrickmn/go-cache@v2.1.0+incompatible/LICENSE: + +Copyright (c) 2012-2017 Patrick Mylund Nielsen and the go-cache contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/pkg/errors +Version: v0.9.1 +Licence type (autodetected): BSD-2-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/pkg/errors@v0.9.1/LICENSE: + +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/ryanuber/go-glob +Version: v1.0.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/ryanuber/go-glob@v1.0.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2014 Ryan Uber + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/spf13/cobra +Version: v1.2.1 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/spf13/cobra@v1.2.1/LICENSE.txt: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + +-------------------------------------------------------------------------------- +Dependency : github.com/spf13/pflag +Version: v1.0.5 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/spf13/pflag@v1.0.5/LICENSE: + +Copyright (c) 2012 Alex Ogier. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : go.elastic.co/apm +Version: v1.13.1 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.elastic.co/apm@v1.13.1/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 Elasticsearch BV + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : go.elastic.co/apm/module/apmelasticsearch +Version: v1.12.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/module/apmelasticsearch@v1.12.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 Elasticsearch BV + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : go.elastic.co/apm/module/apmgrpc +Version: v1.12.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/module/apmgrpc@v1.12.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 Elasticsearch BV + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : go.elastic.co/apm/module/apmhttp +Version: v1.12.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/module/apmhttp@v1.12.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 Elasticsearch BV + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : go.elastic.co/fastjson +Version: v1.1.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.elastic.co/fastjson@v1.1.0/LICENSE: + +Copyright 2018 Elasticsearch BV + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +--- + +Copyright (c) 2016 Mail.Ru Group + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : go.opentelemetry.io/collector +Version: v0.34.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : go.opentelemetry.io/collector/model +Version: v0.34.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/model@v0.34.0/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : go.uber.org/atomic +Version: v1.9.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.uber.org/atomic@v1.9.0/LICENSE.txt: + +Copyright (c) 2016 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : go.uber.org/zap +Version: v1.19.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.uber.org/zap@v1.19.1/LICENSE.txt: + +Copyright (c) 2016-2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/net +Version: v0.0.0-20210908191846-a5e095526f91 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/net@v0.0.0-20210908191846-a5e095526f91/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/sync +Version: v0.0.0-20210220032951-036812b2e83c +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/sync@v0.0.0-20210220032951-036812b2e83c/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/time +Version: v0.0.0-20210611083556-38a9dc6acbc6 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/time@v0.0.0-20210611083556-38a9dc6acbc6/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : google.golang.org/grpc +Version: v1.40.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/google.golang.org/grpc@v1.40.0/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : gopkg.in/yaml.v2 +Version: v2.4.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/gopkg.in/yaml.v2@v2.4.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + + +================================================================================ +Indirect dependencies + + +-------------------------------------------------------------------------------- +Dependency : code.cloudfoundry.org/go-diodes +Version: v0.0.0-20190809170250-f77fb823c7ee +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/code.cloudfoundry.org/go-diodes@v0.0.0-20190809170250-f77fb823c7ee/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +Dependency : code.cloudfoundry.org/go-loggregator +Version: v7.4.0+incompatible +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/code.cloudfoundry.org/go-loggregator@v7.4.0+incompatible/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +-------------------------------------------------------------------------------- +Dependency : code.cloudfoundry.org/gofileutils +Version: v0.0.0-20170111115228-4d0c80011a0f +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/code.cloudfoundry.org/gofileutils@v0.0.0-20170111115228-4d0c80011a0f/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : code.cloudfoundry.org/rfc5424 +Version: v0.0.0-20180905210152-236a6d29298a +Licence type (autodetected): BSD-2-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/code.cloudfoundry.org/rfc5424@v0.0.0-20180905210152-236a6d29298a/LICENSE: + +BSD 2-Clause License + +Copyright (c) 2016, Ross Kinder +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/DataDog/zstd +Version: v1.4.4 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/!data!dog/zstd@v1.4.4/LICENSE: + +Simplified BSD License + +Copyright (c) 2016, Datadog +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/Masterminds/semver +Version: v1.4.2 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/!masterminds/semver@v1.4.2/LICENSE.txt: + +The Masterminds +Copyright (C) 2014-2015, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/bi-zone/go-winio +Version: v0.4.15 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/bi-zone/go-winio@v0.4.15/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/sarama +Version: v1.19.1-0.20210120173147-5c8cb347d877 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/sarama@v1.19.1-0.20210120173147-5c8cb347d877/LICENSE: + +Copyright (c) 2013 Shopify + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/StackExchange/wmi +Version: v1.2.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/!stack!exchange/wmi@v1.2.1/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2013 Stack Exchange + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/armon/go-radix +Version: v1.0.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/armon/go-radix@v1.0.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2014 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/aws/aws-sdk-go-v2 +Version: v0.9.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2@v0.9.0/LICENSE.txt: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/cespare/xxhash +Version: v1.1.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/cespare/xxhash@v1.1.0/LICENSE.txt: + +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/cloudfoundry-community/go-cfclient +Version: v0.0.0-20190808214049-35bcce23fc5f +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/cloudfoundry-community/go-cfclient@v0.0.0-20190808214049-35bcce23fc5f/LICENSE: + +The MIT License + +Copyright (c) 2017 Long Nguyen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/cloudfoundry/noaa +Version: v2.1.0+incompatible +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/cloudfoundry/noaa@v2.1.0+incompatible/LICENSE: + +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +-------------------------------------------------------------------------------- +Dependency : github.com/cloudfoundry/sonde-go +Version: v0.0.0-20171206171820-b33733203bb4 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/cloudfoundry/sonde-go@v0.0.0-20171206171820-b33733203bb4/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + +-------------------------------------------------------------------------------- +Dependency : github.com/containerd/containerd +Version: v1.3.3 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/containerd/containerd@v1.3.3/LICENSE: + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/davecgh/go-spew +Version: v1.1.1 +Licence type (autodetected): ISC +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/davecgh/go-spew@v1.1.1/LICENSE: + +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/dgraph-io/badger/v3 +Version: v3.2103.1 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/dgraph-io/badger/v3@v3.2103.1/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + +-------------------------------------------------------------------------------- +Dependency : github.com/dgraph-io/ristretto +Version: v0.1.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/dgraph-io/ristretto@v0.1.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + +-------------------------------------------------------------------------------- +Dependency : github.com/dgryski/go-farm +Version: v0.0.0-20200201041132-a6ae2369ad13 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/dgryski/go-farm@v0.0.0-20200201041132-a6ae2369ad13/LICENSE: + +Copyright (c) 2014-2017 Damian Gryski +Copyright (c) 2016-2017 Nicola Asuni - Tecnick.com + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + + +-------------------------------------------------------------------------------- +Dependency : github.com/dlclark/regexp2 +Version: v1.4.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/dlclark/regexp2@v1.4.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) Doug Clark + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/docker/distribution +Version: v2.7.1+incompatible +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/docker/distribution@v2.7.1+incompatible/LICENSE: + +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +-------------------------------------------------------------------------------- +Dependency : github.com/docker/engine +Version: v0.0.0-20191113042239-ea84732a7725 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/docker/engine@v0.0.0-20191113042239-ea84732a7725/LICENSE: + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2018 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/docker/go-connections +Version: v0.4.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/docker/go-connections@v0.4.0/LICENSE: + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/docker/go-units +Version: v0.4.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/docker/go-units@v0.4.0/LICENSE: + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/andrewkroh/goja +Version: v0.0.0-20190128172624-dd2ac4456e20 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/andrewkroh/goja@v0.0.0-20190128172624-dd2ac4456e20/LICENSE: + +Copyright (c) 2016 Dmitry Panov + +Copyright (c) 2012 Robert Krimen + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/dop251/goja_nodejs +Version: v0.0.0-20171011081505-adff31b136e6 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/dop251/goja_nodejs@v0.0.0-20171011081505-adff31b136e6/LICENSE: + +Copyright (c) 2016 Dmitry Panov + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/eapache/go-resiliency +Version: v1.2.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/eapache/go-resiliency@v1.2.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2014 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + +-------------------------------------------------------------------------------- +Dependency : github.com/eapache/go-xerial-snappy +Version: v0.0.0-20180814174437-776d5712da21 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/eapache/go-xerial-snappy@v0.0.0-20180814174437-776d5712da21/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2016 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/eapache/queue +Version: v1.1.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/eapache/queue@v1.1.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2014 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/elastic-agent-client/v7 +Version: v7.0.0-20210727140539-f0905d9377f6 +Licence type (autodetected): Elastic +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-client/v7@v7.0.0-20210727140539-f0905d9377f6/LICENSE.txt: + +ELASTIC LICENSE AGREEMENT + +PLEASE READ CAREFULLY THIS ELASTIC LICENSE AGREEMENT (THIS "AGREEMENT"), WHICH +CONSTITUTES A LEGALLY BINDING AGREEMENT AND GOVERNS ALL OF YOUR USE OF ALL OF +THE ELASTIC SOFTWARE WITH WHICH THIS AGREEMENT IS INCLUDED ("ELASTIC SOFTWARE") +THAT IS PROVIDED IN OBJECT CODE FORMAT, AND, IN ACCORDANCE WITH SECTION 2 BELOW, +CERTAIN OF THE ELASTIC SOFTWARE THAT IS PROVIDED IN SOURCE CODE FORMAT. BY +INSTALLING OR USING ANY OF THE ELASTIC SOFTWARE GOVERNED BY THIS AGREEMENT, YOU +ARE ASSENTING TO THE TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE +WITH SUCH TERMS AND CONDITIONS, YOU MAY NOT INSTALL OR USE THE ELASTIC SOFTWARE +GOVERNED BY THIS AGREEMENT. IF YOU ARE INSTALLING OR USING THE SOFTWARE ON +BEHALF OF A LEGAL ENTITY, YOU REPRESENT AND WARRANT THAT YOU HAVE THE ACTUAL +AUTHORITY TO AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT ON BEHALF OF +SUCH ENTITY. + +Posted Date: April 20, 2018 + +This Agreement is entered into by and between Elasticsearch BV ("Elastic") and +You, or the legal entity on behalf of whom You are acting (as applicable, +"You"). + +1. OBJECT CODE END USER LICENSES, RESTRICTIONS AND THIRD PARTY OPEN SOURCE +SOFTWARE + + 1.1 Object Code End User License. Subject to the terms and conditions of + Section 1.2 of this Agreement, Elastic hereby grants to You, AT NO CHARGE and + for so long as you are not in breach of any provision of this Agreement, a + License to the Basic Features and Functions of the Elastic Software. + + 1.2 Reservation of Rights; Restrictions. As between Elastic and You, Elastic + and its licensors own all right, title and interest in and to the Elastic + Software, and except as expressly set forth in Sections 1.1, and 2.1 of this + Agreement, no other license to the Elastic Software is granted to You under + this Agreement, by implication, estoppel or otherwise. You agree not to: (i) + reverse engineer or decompile, decrypt, disassemble or otherwise reduce any + Elastic Software provided to You in Object Code, or any portion thereof, to + Source Code, except and only to the extent any such restriction is prohibited + by applicable law, (ii) except as expressly permitted in this Agreement, + prepare derivative works from, modify, copy or use the Elastic Software Object + Code or the Commercial Software Source Code in any manner; (iii) except as + expressly permitted in Section 1.1 above, transfer, sell, rent, lease, + distribute, sublicense, loan or otherwise transfer, Elastic Software Object + Code, in whole or in part, to any third party; (iv) use Elastic Software + Object Code for providing time-sharing services, any software-as-a-service, + service bureau services or as part of an application services provider or + other service offering (collectively, "SaaS Offering") where obtaining access + to the Elastic Software or the features and functions of the Elastic Software + is a primary reason or substantial motivation for users of the SaaS Offering + to access and/or use the SaaS Offering ("Prohibited SaaS Offering"); (v) + circumvent the limitations on use of Elastic Software provided to You in + Object Code format that are imposed or preserved by any License Key, or (vi) + alter or remove any Marks and Notices in the Elastic Software. If You have any + question as to whether a specific SaaS Offering constitutes a Prohibited SaaS + Offering, or are interested in obtaining Elastic's permission to engage in + commercial or non-commercial distribution of the Elastic Software, please + contact elastic_license@elastic.co. + + 1.3 Third Party Open Source Software. The Commercial Software may contain or + be provided with third party open source libraries, components, utilities and + other open source software (collectively, "Open Source Software"), which Open + Source Software may have applicable license terms as identified on a website + designated by Elastic. Notwithstanding anything to the contrary herein, use of + the Open Source Software shall be subject to the license terms and conditions + applicable to such Open Source Software, to the extent required by the + applicable licensor (which terms shall not restrict the license rights granted + to You hereunder, but may contain additional rights). To the extent any + condition of this Agreement conflicts with any license to the Open Source + Software, the Open Source Software license will govern with respect to such + Open Source Software only. Elastic may also separately provide you with + certain open source software that is licensed by Elastic. Your use of such + Elastic open source software will not be governed by this Agreement, but by + the applicable open source license terms. + +2. COMMERCIAL SOFTWARE SOURCE CODE + + 2.1 Limited License. Subject to the terms and conditions of Section 2.2 of + this Agreement, Elastic hereby grants to You, AT NO CHARGE and for so long as + you are not in breach of any provision of this Agreement, a limited, + non-exclusive, non-transferable, fully paid up royalty free right and license + to the Commercial Software in Source Code format, without the right to grant + or authorize sublicenses, to prepare Derivative Works of the Commercial + Software, provided You (i) do not hack the licensing mechanism, or otherwise + circumvent the intended limitations on the use of Elastic Software to enable + features other than Basic Features and Functions or those features You are + entitled to as part of a Subscription, and (ii) use the resulting object code + only for reasonable testing purposes. + + 2.2 Restrictions. Nothing in Section 2.1 grants You the right to (i) use the + Commercial Software Source Code other than in accordance with Section 2.1 + above, (ii) use a Derivative Work of the Commercial Software outside of a + Non-production Environment, in any production capacity, on a temporary or + permanent basis, or (iii) transfer, sell, rent, lease, distribute, sublicense, + loan or otherwise make available the Commercial Software Source Code, in whole + or in part, to any third party. Notwithstanding the foregoing, You may + maintain a copy of the repository in which the Source Code of the Commercial + Software resides and that copy may be publicly accessible, provided that you + include this Agreement with Your copy of the repository. + +3. TERMINATION + + 3.1 Termination. This Agreement will automatically terminate, whether or not + You receive notice of such Termination from Elastic, if You breach any of its + provisions. + + 3.2 Post Termination. Upon any termination of this Agreement, for any reason, + You shall promptly cease the use of the Elastic Software in Object Code format + and cease use of the Commercial Software in Source Code format. For the + avoidance of doubt, termination of this Agreement will not affect Your right + to use Elastic Software, in either Object Code or Source Code formats, made + available under the Apache License Version 2.0. + + 3.3 Survival. Sections 1.2, 2.2. 3.3, 4 and 5 shall survive any termination or + expiration of this Agreement. + +4. DISCLAIMER OF WARRANTIES AND LIMITATION OF LIABILITY + + 4.1 Disclaimer of Warranties. TO THE MAXIMUM EXTENT PERMITTED UNDER APPLICABLE + LAW, THE ELASTIC SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, + AND ELASTIC AND ITS LICENSORS MAKE NO WARRANTIES WHETHER EXPRESSED, IMPLIED OR + STATUTORY REGARDING OR RELATING TO THE ELASTIC SOFTWARE. TO THE MAXIMUM EXTENT + PERMITTED UNDER APPLICABLE LAW, ELASTIC AND ITS LICENSORS SPECIFICALLY + DISCLAIM ALL IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE AND NON-INFRINGEMENT WITH RESPECT TO THE ELASTIC SOFTWARE, AND WITH + RESPECT TO THE USE OF THE FOREGOING. FURTHER, ELASTIC DOES NOT WARRANT RESULTS + OF USE OR THAT THE ELASTIC SOFTWARE WILL BE ERROR FREE OR THAT THE USE OF THE + ELASTIC SOFTWARE WILL BE UNINTERRUPTED. + + 4.2 Limitation of Liability. IN NO EVENT SHALL ELASTIC OR ITS LICENSORS BE + LIABLE TO YOU OR ANY THIRD PARTY FOR ANY DIRECT OR INDIRECT DAMAGES, + INCLUDING, WITHOUT LIMITATION, FOR ANY LOSS OF PROFITS, LOSS OF USE, BUSINESS + INTERRUPTION, LOSS OF DATA, COST OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY + SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, IN CONNECTION WITH + OR ARISING OUT OF THE USE OR INABILITY TO USE THE ELASTIC SOFTWARE, OR THE + PERFORMANCE OF OR FAILURE TO PERFORM THIS AGREEMENT, WHETHER ALLEGED AS A + BREACH OF CONTRACT OR TORTIOUS CONDUCT, INCLUDING NEGLIGENCE, EVEN IF ELASTIC + HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +5. MISCELLANEOUS + + This Agreement completely and exclusively states the entire agreement of the + parties regarding the subject matter herein, and it supersedes, and its terms + govern, all prior proposals, agreements, or other communications between the + parties, oral or written, regarding such subject matter. This Agreement may be + modified by Elastic from time to time, and any such modifications will be + effective upon the "Posted Date" set forth at the top of the modified + Agreement. If any provision hereof is held unenforceable, this Agreement will + continue without said provision and be interpreted to reflect the original + intent of the parties. This Agreement and any non-contractual obligation + arising out of or in connection with it, is governed exclusively by Dutch law. + This Agreement shall not be governed by the 1980 UN Convention on Contracts + for the International Sale of Goods. All disputes arising out of or in + connection with this Agreement, including its existence and validity, shall be + resolved by the courts with jurisdiction in Amsterdam, The Netherlands, except + where mandatory law provides for the courts at another location in The + Netherlands to have jurisdiction. The parties hereby irrevocably waive any and + all claims and defenses either might otherwise have in any such action or + proceeding in any of such courts based upon any alleged lack of personal + jurisdiction, improper venue, forum non conveniens or any similar claim or + defense. A breach or threatened breach, by You of Section 2 may cause + irreparable harm for which damages at law may not provide adequate relief, and + therefore Elastic shall be entitled to seek injunctive relief without being + required to post a bond. You may not assign this Agreement (including by + operation of law in connection with a merger or acquisition), in whole or in + part to any third party without the prior written consent of Elastic, which + may be withheld or granted by Elastic in its sole and absolute discretion. + Any assignment in violation of the preceding sentence is void. Notices to + Elastic may also be sent to legal@elastic.co. + +6. DEFINITIONS + + The following terms have the meanings ascribed: + + 6.1 "Affiliate" means, with respect to a party, any entity that controls, is + controlled by, or which is under common control with, such party, where + "control" means ownership of at least fifty percent (50%) of the outstanding + voting shares of the entity, or the contractual right to establish policy for, + and manage the operations of, the entity. + + 6.2 "Basic Features and Functions" means those features and functions of the + Elastic Software that are eligible for use under a Basic license, as set forth + at https://www.elastic.co/subscriptions, as may be modified by Elastic from + time to time. + + 6.3 "Commercial Software" means the Elastic Software Source Code in any file + containing a header stating the contents are subject to the Elastic License or + which is contained in the repository folder labeled "x-pack", unless a LICENSE + file present in the directory subtree declares a different license. + + 6.4 "Derivative Work of the Commercial Software" means, for purposes of this + Agreement, any modification(s) or enhancement(s) to the Commercial Software, + which represent, as a whole, an original work of authorship. + + 6.5 "License" means a limited, non-exclusive, non-transferable, fully paid up, + royalty free, right and license, without the right to grant or authorize + sublicenses, solely for Your internal business operations to (i) install and + use the applicable Features and Functions of the Elastic Software in Object + Code, and (ii) permit Contractors and Your Affiliates to use the Elastic + software as set forth in (i) above, provided that such use by Contractors must + be solely for Your benefit and/or the benefit of Your Affiliates, and You + shall be responsible for all acts and omissions of such Contractors and + Affiliates in connection with their use of the Elastic software that are + contrary to the terms and conditions of this Agreement. + + 6.6 "License Key" means a sequence of bytes, including but not limited to a + JSON blob, that is used to enable certain features and functions of the + Elastic Software. + + 6.7 "Marks and Notices" means all Elastic trademarks, trade names, logos and + notices present on the Documentation as originally provided by Elastic. + + 6.8 "Non-production Environment" means an environment for development, testing + or quality assurance, where software is not used for production purposes. + + 6.9 "Object Code" means any form resulting from mechanical transformation or + translation of Source Code form, including but not limited to compiled object + code, generated documentation, and conversions to other media types. + + 6.10 "Source Code" means the preferred form of computer software for making + modifications, including but not limited to software source code, + documentation source, and configuration files. + + 6.11 "Subscription" means the right to receive Support Services and a License + to the Commercial Software. + + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/go-concert +Version: v0.2.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-concert@v0.2.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/go-lumber +Version: v0.1.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-lumber@v0.1.0/LICENSE: + +Copyright (c) 2012–2016 Elasticsearch + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/go-seccomp-bpf +Version: v1.1.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-seccomp-bpf@v1.1.0/LICENSE.txt: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/go-structform +Version: v0.0.9 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-structform@v0.0.9/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2012–2018 Elastic + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/go-sysinfo +Version: v1.7.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-sysinfo@v1.7.0/LICENSE.txt: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/go-txfile +Version: v0.0.7 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-txfile@v0.0.7/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/go-windows +Version: v1.0.1 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-windows@v1.0.1/LICENSE.txt: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/gosigar +Version: v0.14.1 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/gosigar@v0.14.1/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/fatih/color +Version: v1.12.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/fatih/color@v1.12.0/LICENSE.md: + +The MIT License (MIT) + +Copyright (c) 2013 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/felixge/httpsnoop +Version: v1.0.2 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/felixge/httpsnoop@v1.0.2/LICENSE.txt: + +Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com) + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/adriansr/fsnotify +Version: v0.0.0-20180417234312-c9bbe1f46f1d +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/adriansr/fsnotify@v0.0.0-20180417234312-c9bbe1f46f1d/LICENSE: + +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012 fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/go-logr/logr +Version: v0.2.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/go-logr/logr@v0.2.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/go-ole/go-ole +Version: v1.2.5 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/go-ole/go-ole@v1.2.5/LICENSE: + +The MIT License (MIT) + +Copyright © 2013-2017 Yasuhiro Matsumoto, + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the “Software”), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/gofrs/flock +Version: v0.7.2-0.20190320160742-5135e617513b +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/gofrs/flock@v0.7.2-0.20190320160742-5135e617513b/LICENSE: + +Copyright (c) 2015, Tim Heckman +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of linode-netint nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/gogo/googleapis +Version: v1.4.1 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/gogo/googleapis@v1.4.1/LICENSE: + +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015, Google Inc + Copyright 2018, GoGo Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +-------------------------------------------------------------------------------- +Dependency : github.com/golang/glog +Version: v0.0.0-20160126235308-23def4e6c14b +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +No licence file provided. + +-------------------------------------------------------------------------------- +Dependency : github.com/golang/groupcache +Version: v0.0.0-20210331224755-41bb18bfe9da +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/golang/groupcache@v0.0.0-20210331224755-41bb18bfe9da/LICENSE: + +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/golang/protobuf +Version: v1.5.2 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/golang/protobuf@v1.5.2/LICENSE: + +Copyright 2010 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +-------------------------------------------------------------------------------- +Dependency : github.com/golang/snappy +Version: v0.0.3 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/golang/snappy@v0.0.3/LICENSE: + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/gomodule/redigo +Version: v1.8.3 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/gomodule/redigo@v1.8.3/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + +-------------------------------------------------------------------------------- +Dependency : github.com/google/flatbuffers +Version: v1.12.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/google/flatbuffers@v1.12.0/LICENSE.txt: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/google/go-cmp +Version: v0.5.6 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/google/go-cmp@v0.5.6/LICENSE: + +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/google/gofuzz +Version: v1.1.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/google/gofuzz@v1.1.0/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/googleapis/gnostic +Version: v0.4.1 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/googleapis/gnostic@v0.4.1/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + --------------------------------------------------------------------- -Dependency: code.cloudfoundry.org/go-diodes -Revision: f77fb823c7ee -License type (autodetected): Apache-2.0 -Contents of "NOTICE": +-------------------------------------------------------------------------------- +Dependency : github.com/gorilla/mux +Version: v1.8.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/gorilla/mux@v1.8.0/LICENSE: + +Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/gorilla/websocket +Version: v1.4.2 +Licence type (autodetected): BSD-2-Clause +-------------------------------------------------------------------------------- - Copyright (c) 2017-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. +Contents of probable licence file $GOMODCACHE/github.com/gorilla/websocket@v1.4.2/LICENSE: - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. - http://www.apache.org/licenses/LICENSE-2.0 +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. - This project may include a number of subcomponents with separate - copyright notices and license terms. Your use of these subcomponents - is subject to the terms and conditions of each subcomponent's license, - as noted in the LICENSE file. + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. --------------------------------------------------------------------- -Dependency: code.cloudfoundry.org/go-loggregator -Version: v7.4.0 -License type (autodetected): Apache-2.0 -Contents of "NOTICE": +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - go-loggregator - Copyright (c) 2017-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. +-------------------------------------------------------------------------------- +Dependency : github.com/h2non/filetype +Version: v1.1.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Contents of probable licence file $GOMODCACHE/github.com/h2non/filetype@v1.1.1/LICENSE: - http://www.apache.org/licenses/LICENSE-2.0 +The MIT License - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Copyright (c) Tomas Aparicio --------------------------------------------------------------------- -Dependency: code.cloudfoundry.org/gofileutils -Revision: 4d0c80011a0f -License type (autodetected): Apache-2.0 -Contents of "NOTICE": +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: - Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. - This project contains software that is Copyright (c) 2014-2015 Pivotal Software, Inc. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. - This project is licensed to you under the Apache License, Version 2.0 (the "License"). - You may not use this project except in compliance with the License. +-------------------------------------------------------------------------------- +Dependency : github.com/hashicorp/cronexpr +Version: v1.1.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/cronexpr@v1.1.0/APLv2: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - This project may include a number of subcomponents with separate copyright notices - and license terms. Your use of these subcomponents is subject to the terms and - conditions of the subcomponent's license, as noted in the LICENSE file. + http://www.apache.org/licenses/LICENSE-2.0 --------------------------------------------------------------------- -Dependency: code.cloudfoundry.org/rfc5424 -Revision: 236a6d29298a -License type (autodetected): BSD-2-Clause -Contents of "LICENSE": + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. - BSD 2-Clause License - Copyright (c) 2016, Ross Kinder - All rights reserved. +-------------------------------------------------------------------------------- +Dependency : github.com/hashicorp/errwrap +Version: v1.1.0 +Licence type (autodetected): MPL-2.0 +-------------------------------------------------------------------------------- - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/errwrap@v1.1.0/LICENSE: - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. +Mozilla Public License, version 2.0 - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. +1. Definitions - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE - FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: github.com/apache/thrift -Revision: b2a4d4ae21c7 -License type (autodetected): Apache-2.0 -Contents of "NOTICE": - - Apache Thrift - Copyright 2006-2010 The Apache Software Foundation. - - This product includes software developed at - The Apache Software Foundation (http://www.apache.org/). - --------------------------------------------------------------------- -Dependency: github.com/armon/go-radix -Version: v1.0.0 -License type (autodetected): MIT -Contents of "LICENSE": +1.1. “Contributor” - The MIT License (MIT) + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. - Copyright (c) 2014 Armon Dadgar +1.2. “Contributor Version” - Permission is hereby granted, free of charge, to any person obtaining a copy of - this software and associated documentation files (the "Software"), to deal in - the Software without restriction, including without limitation the rights to - use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of - the Software, and to permit persons to whom the Software is furnished to do so, - subject to the following conditions: + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. +1.3. “Contribution” - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS - FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR - COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER - IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + means Covered Software of a particular Contributor. --------------------------------------------------------------------- -Dependency: github.com/aws/aws-sdk-go-v2 -Version: v0.9.0 -License type (autodetected): Apache-2.0 -Contents of "NOTICE.txt": +1.4. “Covered Software” - AWS SDK for Go - Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. - Copyright 2014-2015 Stripe, Inc. + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. --------------------------------------------------------------------- -Dependency: github.com/census-instrumentation/opencensus-proto -Version: v0.2.1 -License type (autodetected): Apache-2.0 +1.5. “Incompatible With Secondary Licenses” + means --------------------------------------------------------------------- -Dependency: github.com/cespare/xxhash/v2 -Version: v2.1.1 -License type (autodetected): MIT -Contents of "LICENSE.txt": - - Copyright (c) 2016 Caleb Spare - - MIT License - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - --------------------------------------------------------------------- -Dependency: github.com/cloudfoundry-community/go-cfclient -Revision: 35bcce23fc5f -License type (autodetected): MIT -Contents of "LICENSE": - - The MIT License - - Copyright (c) 2017 Long Nguyen - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. - --------------------------------------------------------------------- -Dependency: github.com/cloudfoundry/noaa -Version: v2.1.0 -License type (autodetected): Apache-2.0 -Contents of "NOTICE": - - noaa - - Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or - http://www.apache.org/licenses/LICENSE-2.0 + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +1.6. “Executable Form” --------------------------------------------------------------------- -Dependency: github.com/cloudfoundry/sonde-go -Revision: b33733203bb4 -License type (autodetected): Apache-2.0 -Contents of "NOTICE": + means any form of the work other than Source Code Form. - sonde-go +1.7. “Larger Work” - Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +1.8. “License” - http://www.apache.org/licenses/LICENSE-2.0 + means this document. - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - Limitations under the License. +1.9. “Licensable” --------------------------------------------------------------------- -Dependency: github.com/containerd/containerd -Version: v1.3.3 -License type (autodetected): Apache-2.0 -Contents of "NOTICE": + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. - Docker - Copyright 2012-2015 Docker, Inc. +1.10. “Modifications” - This product includes software developed at Docker, Inc. (https://www.docker.com). + means any of the following: - The following is courtesy of our legal counsel: + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + b. any new file in Source Code Form that contains any Covered Software. - Use and transfer of Docker may be subject to certain restrictions by the - United States and other governments. - It is your responsibility to ensure that your use and/or transfer does not - violate applicable laws. +1.11. “Patent Claims” of a Contributor - For more information, please see https://www.bis.doc.gov + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. - See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. +1.12. “Secondary License” --------------------------------------------------------------------- -Dependency: github.com/davecgh/go-spew -Version: v1.1.1 -License type (autodetected): ISC -Contents of "LICENSE": + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. - ISC License +1.13. “Source Code Form” - Copyright (c) 2012-2016 Dave Collins + means the form of the work preferred for making modifications. - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. +1.14. “You” (or “Your”) - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. --------------------------------------------------------------------- -Dependency: github.com/dlclark/regexp2 -Version: v1.2.1 -License type (autodetected): MIT -Contents of "LICENSE": - The MIT License (MIT) +2. License Grants and Conditions - Copyright (c) Doug Clark +2.1. Grants - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. --------------------------------------------------------------------- -Dependency: github.com/docker/distribution -Version: v2.7.1 -License type (autodetected): Apache-2.0 +2.2. Effective Date --------------------------------------------------------------------- -Dependency: github.com/docker/docker -Replacement: github.com/docker/engine -Revision: ea84732a7725 -License type (autodetected): Apache-2.0 -Contents of "NOTICE": + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. - Docker - Copyright 2012-2017 Docker, Inc. +2.3. Limitations on Grant Scope - This product includes software developed at Docker, Inc. (https://www.docker.com). + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: - This product contains software (https://github.com/creack/pty) developed - by Keith Rarick, licensed under the MIT License. + a. for any code that a Contributor has removed from Covered Software; or - The following is courtesy of our legal counsel: + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. - Use and transfer of Docker may be subject to certain restrictions by the - United States and other governments. - It is your responsibility to ensure that your use and/or transfer does not - violate applicable laws. + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). - For more information, please see https://www.bis.doc.gov +2.4. Subsequent Licenses - See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). --------------------------------------------------------------------- -Dependency: github.com/docker/go-connections -Version: v0.4.0 -License type (autodetected): Apache-2.0 +2.5. Representation --------------------------------------------------------------------- -Dependency: github.com/docker/go-units -Version: v0.4.0 -License type (autodetected): Apache-2.0 + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. --------------------------------------------------------------------- -Dependency: github.com/dop251/goja -Replacement: github.com/andrewkroh/goja -Revision: dd2ac4456e20 -License type (autodetected): MIT -Contents of "LICENSE": +2.6. Fair Use - Copyright (c) 2016 Dmitry Panov + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. - Copyright (c) 2012 Robert Krimen +2.7. Conditions - Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated - documentation files (the "Software"), to deal in the Software without restriction, including without limitation - the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to the following conditions: + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. - The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE - WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR - COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +3. Responsibilities --------------------------------------------------------------------- -Dependency: github.com/dop251/goja_nodejs -Replacement: github.com/dop251/goja_nodejs -Revision: adff31b136e6 -License type (autodetected): MIT -Contents of "LICENSE": +3.1. Distribution of Source Form - Copyright (c) 2016 Dmitry Panov + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. - Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated - documentation files (the "Software"), to deal in the Software without restriction, including without limitation - the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to the following conditions: +3.2. Distribution of Executable Form - The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + If You distribute Covered Software in Executable Form then: - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE - WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR - COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous --------------------------------------------------------------------- -Dependency: github.com/dustin/go-humanize -Version: v1.0.0 -License type (autodetected): MIT -Contents of "LICENSE": + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. - Copyright (c) 2005-2008 Dustin Sallings - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: +10. Versions of the License - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. +10.1. New Versions - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. - +10.2. Effect of New Versions --------------------------------------------------------------------- -Dependency: github.com/eapache/go-resiliency -Version: v1.1.0 -License type (autodetected): MIT -Contents of "LICENSE": - - The MIT License (MIT) - - Copyright (c) 2014 Evan Huus - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - --------------------------------------------------------------------- -Dependency: github.com/eapache/go-xerial-snappy -Revision: 776d5712da21 -License type (autodetected): MIT -Contents of "LICENSE": - - The MIT License (MIT) - - Copyright (c) 2016 Evan Huus - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - --------------------------------------------------------------------- -Dependency: github.com/eapache/queue -Version: v1.1.0 -License type (autodetected): MIT -Contents of "LICENSE": - - The MIT License (MIT) - - Copyright (c) 2014 Evan Huus - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - --------------------------------------------------------------------- -Dependency: github.com/elastic/beats/v7 -Version: v7.0.0 -Revision: 49e8024953a4 -License type (autodetected): Apache-2.0 - --------------------------------------------------------------------- -Dependency: github.com/elastic/ecs -Version: v1.5.0 -License type (autodetected): Apache-2.0 -Contents of "NOTICE.txt": - - Elastic Common Schema - Copyright 2018 Elasticsearch B.V. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - --------------------------------------------------------------------- -Dependency: github.com/elastic/elastic-agent-client/v7 -Version: v7.0.0 -Revision: d43b7ad5833a -License type (autodetected): ELASTIC -Contents of "LICENSE.txt": - - ELASTIC LICENSE AGREEMENT - - PLEASE READ CAREFULLY THIS ELASTIC LICENSE AGREEMENT (THIS "AGREEMENT"), WHICH - CONSTITUTES A LEGALLY BINDING AGREEMENT AND GOVERNS ALL OF YOUR USE OF ALL OF - THE ELASTIC SOFTWARE WITH WHICH THIS AGREEMENT IS INCLUDED ("ELASTIC SOFTWARE") - THAT IS PROVIDED IN OBJECT CODE FORMAT, AND, IN ACCORDANCE WITH SECTION 2 BELOW, - CERTAIN OF THE ELASTIC SOFTWARE THAT IS PROVIDED IN SOURCE CODE FORMAT. BY - INSTALLING OR USING ANY OF THE ELASTIC SOFTWARE GOVERNED BY THIS AGREEMENT, YOU - ARE ASSENTING TO THE TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE - WITH SUCH TERMS AND CONDITIONS, YOU MAY NOT INSTALL OR USE THE ELASTIC SOFTWARE - GOVERNED BY THIS AGREEMENT. IF YOU ARE INSTALLING OR USING THE SOFTWARE ON - BEHALF OF A LEGAL ENTITY, YOU REPRESENT AND WARRANT THAT YOU HAVE THE ACTUAL - AUTHORITY TO AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT ON BEHALF OF - SUCH ENTITY. - - Posted Date: April 20, 2018 - - This Agreement is entered into by and between Elasticsearch BV ("Elastic") and - You, or the legal entity on behalf of whom You are acting (as applicable, - "You"). - - 1. OBJECT CODE END USER LICENSES, RESTRICTIONS AND THIRD PARTY OPEN SOURCE - SOFTWARE - - 1.1 Object Code End User License. Subject to the terms and conditions of - Section 1.2 of this Agreement, Elastic hereby grants to You, AT NO CHARGE and - for so long as you are not in breach of any provision of this Agreement, a - License to the Basic Features and Functions of the Elastic Software. - - 1.2 Reservation of Rights; Restrictions. As between Elastic and You, Elastic - and its licensors own all right, title and interest in and to the Elastic - Software, and except as expressly set forth in Sections 1.1, and 2.1 of this - Agreement, no other license to the Elastic Software is granted to You under - this Agreement, by implication, estoppel or otherwise. You agree not to: (i) - reverse engineer or decompile, decrypt, disassemble or otherwise reduce any - Elastic Software provided to You in Object Code, or any portion thereof, to - Source Code, except and only to the extent any such restriction is prohibited - by applicable law, (ii) except as expressly permitted in this Agreement, - prepare derivative works from, modify, copy or use the Elastic Software Object - Code or the Commercial Software Source Code in any manner; (iii) except as - expressly permitted in Section 1.1 above, transfer, sell, rent, lease, - distribute, sublicense, loan or otherwise transfer, Elastic Software Object - Code, in whole or in part, to any third party; (iv) use Elastic Software - Object Code for providing time-sharing services, any software-as-a-service, - service bureau services or as part of an application services provider or - other service offering (collectively, "SaaS Offering") where obtaining access - to the Elastic Software or the features and functions of the Elastic Software - is a primary reason or substantial motivation for users of the SaaS Offering - to access and/or use the SaaS Offering ("Prohibited SaaS Offering"); (v) - circumvent the limitations on use of Elastic Software provided to You in - Object Code format that are imposed or preserved by any License Key, or (vi) - alter or remove any Marks and Notices in the Elastic Software. If You have any - question as to whether a specific SaaS Offering constitutes a Prohibited SaaS - Offering, or are interested in obtaining Elastic's permission to engage in - commercial or non-commercial distribution of the Elastic Software, please - contact elastic_license@elastic.co. - - 1.3 Third Party Open Source Software. The Commercial Software may contain or - be provided with third party open source libraries, components, utilities and - other open source software (collectively, "Open Source Software"), which Open - Source Software may have applicable license terms as identified on a website - designated by Elastic. Notwithstanding anything to the contrary herein, use of - the Open Source Software shall be subject to the license terms and conditions - applicable to such Open Source Software, to the extent required by the - applicable licensor (which terms shall not restrict the license rights granted - to You hereunder, but may contain additional rights). To the extent any - condition of this Agreement conflicts with any license to the Open Source - Software, the Open Source Software license will govern with respect to such - Open Source Software only. Elastic may also separately provide you with - certain open source software that is licensed by Elastic. Your use of such - Elastic open source software will not be governed by this Agreement, but by - the applicable open source license terms. - - 2. COMMERCIAL SOFTWARE SOURCE CODE - - 2.1 Limited License. Subject to the terms and conditions of Section 2.2 of - this Agreement, Elastic hereby grants to You, AT NO CHARGE and for so long as - you are not in breach of any provision of this Agreement, a limited, - non-exclusive, non-transferable, fully paid up royalty free right and license - to the Commercial Software in Source Code format, without the right to grant - or authorize sublicenses, to prepare Derivative Works of the Commercial - Software, provided You (i) do not hack the licensing mechanism, or otherwise - circumvent the intended limitations on the use of Elastic Software to enable - features other than Basic Features and Functions or those features You are - entitled to as part of a Subscription, and (ii) use the resulting object code - only for reasonable testing purposes. - - 2.2 Restrictions. Nothing in Section 2.1 grants You the right to (i) use the - Commercial Software Source Code other than in accordance with Section 2.1 - above, (ii) use a Derivative Work of the Commercial Software outside of a - Non-production Environment, in any production capacity, on a temporary or - permanent basis, or (iii) transfer, sell, rent, lease, distribute, sublicense, - loan or otherwise make available the Commercial Software Source Code, in whole - or in part, to any third party. Notwithstanding the foregoing, You may - maintain a copy of the repository in which the Source Code of the Commercial - Software resides and that copy may be publicly accessible, provided that you - include this Agreement with Your copy of the repository. - - 3. TERMINATION - - 3.1 Termination. This Agreement will automatically terminate, whether or not - You receive notice of such Termination from Elastic, if You breach any of its - provisions. - - 3.2 Post Termination. Upon any termination of this Agreement, for any reason, - You shall promptly cease the use of the Elastic Software in Object Code format - and cease use of the Commercial Software in Source Code format. For the - avoidance of doubt, termination of this Agreement will not affect Your right - to use Elastic Software, in either Object Code or Source Code formats, made - available under the Apache License Version 2.0. - - 3.3 Survival. Sections 1.2, 2.2. 3.3, 4 and 5 shall survive any termination or - expiration of this Agreement. - - 4. DISCLAIMER OF WARRANTIES AND LIMITATION OF LIABILITY - - 4.1 Disclaimer of Warranties. TO THE MAXIMUM EXTENT PERMITTED UNDER APPLICABLE - LAW, THE ELASTIC SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, - AND ELASTIC AND ITS LICENSORS MAKE NO WARRANTIES WHETHER EXPRESSED, IMPLIED OR - STATUTORY REGARDING OR RELATING TO THE ELASTIC SOFTWARE. TO THE MAXIMUM EXTENT - PERMITTED UNDER APPLICABLE LAW, ELASTIC AND ITS LICENSORS SPECIFICALLY - DISCLAIM ALL IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR - PURPOSE AND NON-INFRINGEMENT WITH RESPECT TO THE ELASTIC SOFTWARE, AND WITH - RESPECT TO THE USE OF THE FOREGOING. FURTHER, ELASTIC DOES NOT WARRANT RESULTS - OF USE OR THAT THE ELASTIC SOFTWARE WILL BE ERROR FREE OR THAT THE USE OF THE - ELASTIC SOFTWARE WILL BE UNINTERRUPTED. - - 4.2 Limitation of Liability. IN NO EVENT SHALL ELASTIC OR ITS LICENSORS BE - LIABLE TO YOU OR ANY THIRD PARTY FOR ANY DIRECT OR INDIRECT DAMAGES, - INCLUDING, WITHOUT LIMITATION, FOR ANY LOSS OF PROFITS, LOSS OF USE, BUSINESS - INTERRUPTION, LOSS OF DATA, COST OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY - SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, IN CONNECTION WITH - OR ARISING OUT OF THE USE OR INABILITY TO USE THE ELASTIC SOFTWARE, OR THE - PERFORMANCE OF OR FAILURE TO PERFORM THIS AGREEMENT, WHETHER ALLEGED AS A - BREACH OF CONTRACT OR TORTIOUS CONDUCT, INCLUDING NEGLIGENCE, EVEN IF ELASTIC - HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - - 5. MISCELLANEOUS - - This Agreement completely and exclusively states the entire agreement of the - parties regarding the subject matter herein, and it supersedes, and its terms - govern, all prior proposals, agreements, or other communications between the - parties, oral or written, regarding such subject matter. This Agreement may be - modified by Elastic from time to time, and any such modifications will be - effective upon the "Posted Date" set forth at the top of the modified - Agreement. If any provision hereof is held unenforceable, this Agreement will - continue without said provision and be interpreted to reflect the original - intent of the parties. This Agreement and any non-contractual obligation - arising out of or in connection with it, is governed exclusively by Dutch law. - This Agreement shall not be governed by the 1980 UN Convention on Contracts - for the International Sale of Goods. All disputes arising out of or in - connection with this Agreement, including its existence and validity, shall be - resolved by the courts with jurisdiction in Amsterdam, The Netherlands, except - where mandatory law provides for the courts at another location in The - Netherlands to have jurisdiction. The parties hereby irrevocably waive any and - all claims and defenses either might otherwise have in any such action or - proceeding in any of such courts based upon any alleged lack of personal - jurisdiction, improper venue, forum non conveniens or any similar claim or - defense. A breach or threatened breach, by You of Section 2 may cause - irreparable harm for which damages at law may not provide adequate relief, and - therefore Elastic shall be entitled to seek injunctive relief without being - required to post a bond. You may not assign this Agreement (including by - operation of law in connection with a merger or acquisition), in whole or in - part to any third party without the prior written consent of Elastic, which - may be withheld or granted by Elastic in its sole and absolute discretion. - Any assignment in violation of the preceding sentence is void. Notices to - Elastic may also be sent to legal@elastic.co. - - 6. DEFINITIONS - - The following terms have the meanings ascribed: - - 6.1 "Affiliate" means, with respect to a party, any entity that controls, is - controlled by, or which is under common control with, such party, where - "control" means ownership of at least fifty percent (50%) of the outstanding - voting shares of the entity, or the contractual right to establish policy for, - and manage the operations of, the entity. - - 6.2 "Basic Features and Functions" means those features and functions of the - Elastic Software that are eligible for use under a Basic license, as set forth - at https://www.elastic.co/subscriptions, as may be modified by Elastic from - time to time. - - 6.3 "Commercial Software" means the Elastic Software Source Code in any file - containing a header stating the contents are subject to the Elastic License or - which is contained in the repository folder labeled "x-pack", unless a LICENSE - file present in the directory subtree declares a different license. - - 6.4 "Derivative Work of the Commercial Software" means, for purposes of this - Agreement, any modification(s) or enhancement(s) to the Commercial Software, - which represent, as a whole, an original work of authorship. - - 6.5 "License" means a limited, non-exclusive, non-transferable, fully paid up, - royalty free, right and license, without the right to grant or authorize - sublicenses, solely for Your internal business operations to (i) install and - use the applicable Features and Functions of the Elastic Software in Object - Code, and (ii) permit Contractors and Your Affiliates to use the Elastic - software as set forth in (i) above, provided that such use by Contractors must - be solely for Your benefit and/or the benefit of Your Affiliates, and You - shall be responsible for all acts and omissions of such Contractors and - Affiliates in connection with their use of the Elastic software that are - contrary to the terms and conditions of this Agreement. - - 6.6 "License Key" means a sequence of bytes, including but not limited to a - JSON blob, that is used to enable certain features and functions of the - Elastic Software. - - 6.7 "Marks and Notices" means all Elastic trademarks, trade names, logos and - notices present on the Documentation as originally provided by Elastic. - - 6.8 "Non-production Environment" means an environment for development, testing - or quality assurance, where software is not used for production purposes. - - 6.9 "Object Code" means any form resulting from mechanical transformation or - translation of Source Code form, including but not limited to compiled object - code, generated documentation, and conversions to other media types. - - 6.10 "Source Code" means the preferred form of computer software for making - modifications, including but not limited to software source code, - documentation source, and configuration files. - - 6.11 "Subscription" means the right to receive Support Services and a License - to the Commercial Software. - --------------------------------------------------------------------- -Dependency: github.com/elastic/go-elasticsearch/v7 -Version: v7.8.0 -License type (autodetected): Apache-2.0 - --------------------------------------------------------------------- -Dependency: github.com/elastic/go-elasticsearch/v8 -Version: v8.0.0 -Revision: 59b6a186f8dd -License type (autodetected): Apache-2.0 - --------------------------------------------------------------------- -Dependency: github.com/elastic/go-hdrhistogram -Version: v0.1.0 -License type (autodetected): MIT -Contents of "LICENSE": - - The MIT License (MIT) - - Copyright (c) 2014 Coda Hale - Copyright (c) 2020 Elasticsearch BV - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. - --------------------------------------------------------------------- -Dependency: github.com/elastic/go-lumber -Version: v0.1.0 -License type (autodetected): Apache-2.0 + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. --------------------------------------------------------------------- -Dependency: github.com/elastic/go-seccomp-bpf -Version: v1.1.0 -License type (autodetected): Apache-2.0 -Contents of "NOTICE.txt": +10.3. Modified Versions - Elastic go-seccomp-bpf - Copyright 2018 Elasticsearch B.V. + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). - This product includes software developed at - Elasticsearch, B.V. (https://www.elastic.co/). +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. --------------------------------------------------------------------- -Dependency: github.com/elastic/go-structform -Version: v0.0.7 -License type (autodetected): Apache-2.0 +Exhibit A - Source Code Form License Notice --------------------------------------------------------------------- -Dependency: github.com/elastic/go-sysinfo -Version: v1.4.0 -License type (autodetected): Apache-2.0 -Contents of "NOTICE.txt": + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. - Elastic go-sysinfo - Copyright 2017-2020 Elasticsearch B.V. +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. - This product includes software developed at - Elasticsearch, B.V. (https://www.elastic.co/). +You may add additional accurate notices of copyright ownership. --------------------------------------------------------------------- -Dependency: github.com/elastic/go-txfile -Version: v0.0.7 -License type (autodetected): Apache-2.0 +Exhibit B - “Incompatible With Secondary Licenses” Notice --------------------------------------------------------------------- -Dependency: github.com/elastic/go-ucfg -Version: v0.8.3 -License type (autodetected): Apache-2.0 + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. --------------------------------------------------------------------- -Dependency: github.com/elastic/go-windows -Version: v1.0.1 -License type (autodetected): Apache-2.0 -Contents of "NOTICE.txt": - Elastic go-windows - Copyright 2017-2019 Elasticsearch B.V. - This product includes software developed at - Elasticsearch, B.V. (https://www.elastic.co/). +-------------------------------------------------------------------------------- +Dependency : github.com/hashicorp/go-cleanhttp +Version: v0.5.1 +Licence type (autodetected): MPL-2.0 +-------------------------------------------------------------------------------- --------------------------------------------------------------------- -Dependency: github.com/elastic/gosigar -Version: v0.10.6 -Revision: f115143bb233 -License type (autodetected): Apache-2.0 -Contents of "NOTICE": +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-cleanhttp@v0.5.1/LICENSE: - Copyright (c) [2009-2011] VMware, Inc. All Rights Reserved. +Mozilla Public License, version 2.0 - This product is licensed to you under the Apache License, Version 2.0 (the "License"). - You may not use this product except in compliance with the License. +1. Definitions - This product includes a number of subcomponents with - separate copyright notices and license terms. Your use of these - subcomponents is subject to the terms and conditions of the - subcomponent's license, as noted in the LICENSE file. +1.1. "Contributor" --------------------------------------------------------------------- -Dependency: github.com/fatih/color -Version: v1.9.0 -License type (autodetected): MIT -Contents of "LICENSE.md": + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. - The MIT License (MIT) +1.2. "Contributor Version" - Copyright (c) 2013 Fatih Arslan + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. - Permission is hereby granted, free of charge, to any person obtaining a copy of - this software and associated documentation files (the "Software"), to deal in - the Software without restriction, including without limitation the rights to - use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of - the Software, and to permit persons to whom the Software is furnished to do so, - subject to the following conditions: +1.3. "Contribution" - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. + means Covered Software of a particular Contributor. - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS - FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR - COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER - IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +1.4. "Covered Software" --------------------------------------------------------------------- -Dependency: github.com/garyburd/redigo -Version: v1.0.1 -Revision: b8dc90050f24 -License type (autodetected): Apache-2.0 - --------------------------------------------------------------------- -Dependency: github.com/go-sourcemap/sourcemap -Version: v2.1.3 -License type (autodetected): BSD-2-Clause -Contents of "LICENSE": - - Copyright (c) 2016 The github.com/go-sourcemap/sourcemap Contributors. - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: github.com/gofrs/flock -Version: v0.7.2 -Revision: 5135e617513b -License type (autodetected): BSD-3-Clause -Contents of "LICENSE": - - Copyright (c) 2015, Tim Heckman - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. +1.5. "Incompatible With Secondary Licenses" + means - * Neither the name of linode-netint nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE - FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: github.com/gofrs/uuid -Version: v3.3.0 -License type (autodetected): MIT -Contents of "LICENSE": - - Copyright (C) 2013-2018 by Maxim Bublis - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - --------------------------------------------------------------------- -Dependency: github.com/gogo/googleapis -Version: v1.3.1 -Revision: b8d18e97a9a1 -License type (autodetected): Apache-2.0 - --------------------------------------------------------------------- -Dependency: github.com/gogo/protobuf -Version: v1.3.1 -License type (autodetected): BSD-3-Clause -Contents of "LICENSE": - - Copyright (c) 2013, The GoGo Authors. All rights reserved. - - Protocol Buffers for Go with Gadgets - - Go support for Protocol Buffers - Google's data interchange format - - Copyright 2010 The Go Authors. All rights reserved. - https://github.com/golang/protobuf - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: github.com/golang/protobuf -Version: v1.4.2 -License type (autodetected): BSD-3-Clause -Contents of "LICENSE": - - Copyright 2010 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: github.com/golang/snappy -Version: v0.0.1 -License type (autodetected): BSD-3-Clause -Contents of "LICENSE": - - Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: github.com/google/go-cmp -Version: v0.5.2 -License type (autodetected): BSD-3-Clause -Contents of "LICENSE": - - Copyright (c) 2017 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: github.com/google/gofuzz -Version: v1.1.0 -License type (autodetected): Apache-2.0 + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or --------------------------------------------------------------------- -Dependency: github.com/google/pprof -Revision: 1a94d8640e99 -License type (autodetected): Apache-2.0 + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. --------------------------------------------------------------------- -Dependency: github.com/googleapis/gnostic -Version: v0.3.1 -Revision: 25d8b0b66985 -License type (autodetected): Apache-2.0 +1.6. "Executable Form" --------------------------------------------------------------------- -Dependency: github.com/gorilla/websocket -Version: v1.4.1 -License type (autodetected): BSD-2-Clause -Contents of "LICENSE": + means any form of the work other than Source Code Form. - Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. +1.7. "Larger Work" - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. - Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. +1.8. "License" - Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. + means this document. - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE - FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: github.com/grpc-ecosystem/grpc-gateway -Version: v1.13.0 -License type (autodetected): BSD-3-Clause -Contents of "LICENSE.txt": - - Copyright (c) 2015, Gengo, Inc. - All rights reserved. - - Redistribution and use in source and binary forms, with or without modification, - are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name of Gengo, Inc. nor the names of its - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR - ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: github.com/hashicorp/errwrap -Version: v1.0.0 -License type (autodetected): MPL-2.0 -Contents of "LICENSE": +1.9. "Licensable" - Mozilla Public License, version 2.0 + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. - 1. Definitions +1.10. "Modifications" - 1.1. “Contributor” + means any of the following: - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or - 1.2. “Contributor Version” + b. any new file in Source Code Form that contains any Covered Software. - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. +1.11. "Patent Claims" of a Contributor - 1.3. “Contribution” + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. - means Covered Software of a particular Contributor. +1.12. "Secondary License" - 1.4. “Covered Software” + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. +1.13. "Source Code Form" - 1.5. “Incompatible With Secondary Licenses” - means + means the form of the work preferred for making modifications. - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or +1.14. "You" (or "Your") - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. - 1.6. “Executable Form” - means any form of the work other than Source Code Form. +2. License Grants and Conditions - 1.7. “Larger Work” +2.1. Grants - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: - 1.8. “License” + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and - means this document. + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. - 1.9. “Licensable” +2.2. Effective Date - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. - 1.10. “Modifications” +2.3. Limitations on Grant Scope - means any of the following: + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or + a. for any code that a Contributor has removed from Covered Software; or - b. any new file in Source Code Form that contains any Covered Software. + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or - 1.11. “Patent Claims” of a Contributor + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). - 1.12. “Secondary License” +2.4. Subsequent Licenses - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). - 1.13. “Source Code Form” +2.5. Representation - means the form of the work preferred for making modifications. + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. - 1.14. “You” (or “Your”) +2.6. Fair Use - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. +2.7. Conditions - 2. License Grants and Conditions + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. - 2.1. Grants - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: +3. Responsibilities - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and +3.1. Distribution of Source Form - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. - 2.2. Effective Date - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - 2.3. Limitations on Grant Scope +-------------------------------------------------------------------------------- +Dependency : github.com/hashicorp/go-rootcerts +Version: v1.0.2 +Licence type (autodetected): MPL-2.0 +-------------------------------------------------------------------------------- - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-rootcerts@v1.0.2/LICENSE: - a. for any code that a Contributor has removed from Covered Software; or +Mozilla Public License, version 2.0 - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or +1. Definitions - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. +1.1. "Contributor" - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. - 2.4. Subsequent Licenses +1.2. "Contributor Version" - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. - 2.5. Representation +1.3. "Contribution" - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. + means Covered Software of a particular Contributor. - 2.6. Fair Use +1.4. "Covered Software" - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. - 2.7. Conditions +1.5. "Incompatible With Secondary Licenses" + means - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. - 3. Responsibilities +1.6. "Executable Form" - 3.1. Distribution of Source Form + means any form of the work other than Source Code Form. - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. +1.7. "Larger Work" - 3.2. Distribution of Executable Form + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. - If You distribute Covered Software in Executable Form then: +1.8. "License" - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - - 3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - - 3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - - 3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - - 4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - - 5. Termination - - 5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - - 5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - - 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - - 6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - - 7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - - 8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - - 9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - - 10. Versions of the License - - 10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - - 10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - - 10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - - 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - - Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - - If it is not possible or desirable to put the notice in a particular file, then - You may include the notice in a location (such as a LICENSE file in a relevant - directory) where a recipient would be likely to look for such a notice. - - You may add additional accurate notices of copyright ownership. - - Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - --------------------------------------------------------------------- -Dependency: github.com/hashicorp/go-multierror -Version: v1.1.0 -License type (autodetected): MPL-2.0 -Contents of "LICENSE": + means this document. - Mozilla Public License, version 2.0 +1.9. "Licensable" - 1. Definitions + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. - 1.1. “Contributor” +1.10. "Modifications" - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. + means any of the following: - 1.2. “Contributor Version” + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. + b. any new file in Source Code Form that contains any Covered Software. - 1.3. “Contribution” +1.11. "Patent Claims" of a Contributor - means Covered Software of a particular Contributor. + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. - 1.4. “Covered Software” +1.12. "Secondary License" - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. - 1.5. “Incompatible With Secondary Licenses” - means +1.13. "Source Code Form" - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or + means the form of the work preferred for making modifications. - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. +1.14. "You" (or "Your") - 1.6. “Executable Form” + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. - means any form of the work other than Source Code Form. - 1.7. “Larger Work” +2. License Grants and Conditions - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. +2.1. Grants - 1.8. “License” + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: - means this document. + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and - 1.9. “Licensable” + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. +2.2. Effective Date - 1.10. “Modifications” + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. - means any of the following: +2.3. Limitations on Grant Scope - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: - b. any new file in Source Code Form that contains any Covered Software. + a. for any code that a Contributor has removed from Covered Software; or - 1.11. “Patent Claims” of a Contributor + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. - 1.12. “Secondary License” + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. +2.4. Subsequent Licenses - 1.13. “Source Code Form” + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). - means the form of the work preferred for making modifications. +2.5. Representation - 1.14. “You” (or “Your”) + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. +2.6. Fair Use + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. - 2. License Grants and Conditions +2.7. Conditions - 2.1. Grants + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and +3. Responsibilities - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. +3.1. Distribution of Source Form - 2.2. Effective Date + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - 2.3. Limitations on Grant Scope - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: +-------------------------------------------------------------------------------- +Dependency : github.com/hashicorp/go-uuid +Version: v1.0.2 +Licence type (autodetected): MPL-2.0 +-------------------------------------------------------------------------------- - a. for any code that a Contributor has removed from Covered Software; or +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-uuid@v1.0.2/LICENSE: - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or +Mozilla Public License, version 2.0 - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. +1. Definitions - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). +1.1. "Contributor" - 2.4. Subsequent Licenses + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). +1.2. "Contributor Version" - 2.5. Representation + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. +1.3. "Contribution" - 2.6. Fair Use + means Covered Software of a particular Contributor. - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. +1.4. "Covered Software" - 2.7. Conditions + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. +1.5. "Incompatible With Secondary Licenses" + means + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or - 3. Responsibilities + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. - 3.1. Distribution of Source Form +1.6. "Executable Form" - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. + means any form of the work other than Source Code Form. - 3.2. Distribution of Executable Form +1.7. "Larger Work" - If You distribute Covered Software in Executable Form then: + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - - 3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - - 3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - - 3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - - 4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - - 5. Termination - - 5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - - 5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - - 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - - 6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - - 7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - - 8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - - 9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - - 10. Versions of the License - - 10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - - 10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - - 10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - - 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - - Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - - If it is not possible or desirable to put the notice in a particular file, then - You may include the notice in a location (such as a LICENSE file in a relevant - directory) where a recipient would be likely to look for such a notice. - - You may add additional accurate notices of copyright ownership. - - Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - --------------------------------------------------------------------- -Dependency: github.com/hashicorp/go-uuid -Version: v1.0.1 -License type (autodetected): MPL-2.0 -Contents of "LICENSE": +1.8. "License" - Mozilla Public License, version 2.0 + means this document. - 1. Definitions +1.9. "Licensable" - 1.1. "Contributor" + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. +1.10. "Modifications" - 1.2. "Contributor Version" + means any of the following: - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or - 1.3. "Contribution" + b. any new file in Source Code Form that contains any Covered Software. - means Covered Software of a particular Contributor. +1.11. "Patent Claims" of a Contributor - 1.4. "Covered Software" + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. +1.12. "Secondary License" - 1.5. "Incompatible With Secondary Licenses" - means + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or +1.13. "Source Code Form" - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. + means the form of the work preferred for making modifications. - 1.6. "Executable Form" +1.14. "You" (or "Your") - means any form of the work other than Source Code Form. + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. - 1.7. "Larger Work" - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. +2. License Grants and Conditions - 1.8. "License" +2.1. Grants - means this document. + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: - 1.9. "Licensable" + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. - 1.10. "Modifications" +2.2. Effective Date - means any of the following: + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or +2.3. Limitations on Grant Scope - b. any new file in Source Code Form that contains any Covered Software. + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: - 1.11. "Patent Claims" of a Contributor + a. for any code that a Contributor has removed from Covered Software; or - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or - 1.12. "Secondary License" + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). - 1.13. "Source Code Form" +2.4. Subsequent Licenses - means the form of the work preferred for making modifications. + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). - 1.14. "You" (or "Your") +2.5. Representation - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. +2.6. Fair Use - 2. License Grants and Conditions + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. - 2.1. Grants +2.7. Conditions - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. +3. Responsibilities - 2.2. Effective Date +3.1. Distribution of Source Form - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. - 2.3. Limitations on Grant Scope - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. - a. for any code that a Contributor has removed from Covered Software; or +10.2. Effect of New Versions - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. +10.3. Modified Versions - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). - 2.4. Subsequent Licenses +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). +Exhibit A - Source Code Form License Notice - 2.5. Representation + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. - 2.6. Fair Use +You may add additional accurate notices of copyright ownership. - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. +Exhibit B - "Incompatible With Secondary Licenses" Notice - 2.7. Conditions + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - 3. Responsibilities +-------------------------------------------------------------------------------- +Dependency : github.com/hashicorp/nomad/api +Version: v0.0.0-20201203164818-6318a8ac7bf8 +Licence type (autodetected): MPL-2.0 +-------------------------------------------------------------------------------- - 3.1. Distribution of Source Form +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/nomad/api@v0.0.0-20201203164818-6318a8ac7bf8/LICENSE: - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - - 3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - - 3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - - 3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - - 3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - - 4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - - 5. Termination - - 5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - - 5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - - 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - - 6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - - 7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - - 8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - - 9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - - 10. Versions of the License - - 10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. +Mozilla Public License, version 2.0 - 10.2. Effect of New Versions +1. Definitions - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. +1.1. "Contributor" - 10.3. Modified Versions + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). +1.2. "Contributor Version" - 10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. - Exhibit A - Source Code Form License Notice +1.3. "Contribution" - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. + means Covered Software of a particular Contributor. - If it is not possible or desirable to put the notice in a particular file, - then You may include the notice in a location (such as a LICENSE file in a - relevant directory) where a recipient would be likely to look for such a - notice. +1.4. "Covered Software" - You may add additional accurate notices of copyright ownership. + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. - Exhibit B - "Incompatible With Secondary Licenses" Notice +1.5. "Incompatible With Secondary Licenses" + means - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or --------------------------------------------------------------------- -Dependency: github.com/hashicorp/golang-lru -Version: v0.5.3 -License type (autodetected): MPL-2.0 -Contents of "LICENSE": + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. - Mozilla Public License, version 2.0 +1.6. "Executable Form" - 1. Definitions + means any form of the work other than Source Code Form. - 1.1. "Contributor" +1.7. "Larger Work" - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. - 1.2. "Contributor Version" +1.8. "License" - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. + means this document. - 1.3. "Contribution" +1.9. "Licensable" - means Covered Software of a particular Contributor. + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. - 1.4. "Covered Software" +1.10. "Modifications" - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. + means any of the following: - 1.5. "Incompatible With Secondary Licenses" - means + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or + b. any new file in Source Code Form that contains any Covered Software. - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. +1.11. "Patent Claims" of a Contributor - 1.6. "Executable Form" + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. - means any form of the work other than Source Code Form. +1.12. "Secondary License" - 1.7. "Larger Work" + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. +1.13. "Source Code Form" - 1.8. "License" + means the form of the work preferred for making modifications. - means this document. +1.14. "You" (or "Your") - 1.9. "Licensable" + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - 1.10. "Modifications" +2. License Grants and Conditions - means any of the following: +2.1. Grants - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: - b. any new file in Source Code Form that contains any Covered Software. + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and - 1.11. "Patent Claims" of a Contributor + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. +2.2. Effective Date - 1.12. "Secondary License" + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. +2.3. Limitations on Grant Scope - 1.13. "Source Code Form" + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: - means the form of the work preferred for making modifications. + a. for any code that a Contributor has removed from Covered Software; or - 1.14. "You" (or "Your") + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). - 2. License Grants and Conditions +2.4. Subsequent Licenses - 2.1. Grants + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: +2.5. Representation - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. +2.6. Fair Use - 2.2. Effective Date + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. +2.7. Conditions - 2.3. Limitations on Grant Scope + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - a. for any code that a Contributor has removed from Covered Software; or +3. Responsibilities - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or +3.1. Distribution of Source Form - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - 2.4. Subsequent Licenses - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). +-------------------------------------------------------------------------------- +Dependency : github.com/imdario/mergo +Version: v0.3.6 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/imdario/mergo@v0.3.6/LICENSE: + +Copyright (c) 2013 Dario Castañé. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/inconshreveable/mousetrap +Version: v1.0.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- - 2.5. Representation +Contents of probable licence file $GOMODCACHE/github.com/inconshreveable/mousetrap@v1.0.0/LICENSE: - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. +Copyright 2014 Alan Shreve - 2.6. Fair Use +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. + http://www.apache.org/licenses/LICENSE-2.0 - 2.7. Conditions +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. +-------------------------------------------------------------------------------- +Dependency : github.com/jcmturner/gofork +Version: v1.0.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/jcmturner/gofork@v1.0.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/jmespath/go-jmespath +Version: v0.4.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- - 3. Responsibilities +Contents of probable licence file $GOMODCACHE/github.com/jmespath/go-jmespath@v0.4.0/LICENSE: - 3.1. Distribution of Source Form +Copyright 2015 James Saryerwinnie - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - - 3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - - 3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - - 3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - - 3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - - 4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - - 5. Termination - - 5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - - 5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - - 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - - 6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - - 7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - - 8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - - 9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - - 10. Versions of the License - - 10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - - 10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - - 10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - - 10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - - Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - - If it is not possible or desirable to put the notice in a particular file, - then You may include the notice in a location (such as a LICENSE file in a - relevant directory) where a recipient would be likely to look for such a - notice. - - You may add additional accurate notices of copyright ownership. - - Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - --------------------------------------------------------------------- -Dependency: github.com/imdario/mergo -Version: v0.3.6 -License type (autodetected): BSD-3-Clause -Contents of "LICENSE": - - Copyright (c) 2013 Dario Castañé. All rights reserved. - Copyright (c) 2012 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: github.com/inconshreveable/mousetrap -Version: v1.0.0 -License type (autodetected): Apache-2.0 +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at --------------------------------------------------------------------- -Dependency: github.com/jaegertracing/jaeger -Version: v1.16.0 -License type (autodetected): Apache-2.0 -Contents of "NOTICE": + http://www.apache.org/licenses/LICENSE-2.0 - Jaeger, Distributed Tracing Platform. +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/joeshaw/multierror +Version: v0.0.0-20140124173710-69b34d4ec901 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/joeshaw/multierror@v0.0.0-20140124173710-69b34d4ec901/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2014 Joe Shaw + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/jonboulle/clockwork +Version: v0.2.2 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/jonboulle/clockwork@v0.2.2/LICENSE: + +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - Copyright 2015-2019 The Jaeger Project Authors + http://www.apache.org/licenses/LICENSE-2.0 - Licensed under Apache License 2.0. See LICENSE for terms. + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. - Includes software developed at Uber Technologies, Inc. (https://eng.uber.com/). --------------------------------------------------------------------- -Dependency: github.com/jcmturner/gofork +-------------------------------------------------------------------------------- +Dependency : github.com/josharian/intern Version: v1.0.0 -License type (autodetected): BSD-3-Clause -Contents of "LICENSE": - - Copyright (c) 2009 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: github.com/jmespath/go-jmespath -Revision: c2b33e8439af -License type (autodetected): Apache-2.0 - --------------------------------------------------------------------- -Dependency: github.com/joeshaw/multierror -Revision: 69b34d4ec901 -License type (autodetected): MIT -Contents of "LICENSE": - - The MIT License (MIT) - - Copyright (c) 2014 Joe Shaw - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. - --------------------------------------------------------------------- -Dependency: github.com/json-iterator/go -Version: v1.1.10 -License type (autodetected): MIT -Contents of "LICENSE": - - MIT License - - Copyright (c) 2016 json-iterator - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - --------------------------------------------------------------------- -Dependency: github.com/klauspost/compress -Version: v1.9.3 -Revision: c099ac9f21dd -License type (autodetected): BSD-3-Clause -Contents of "LICENSE": - - Copyright (c) 2012 The Go Authors. All rights reserved. - Copyright (c) 2019 Klaus Post. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: github.com/konsorten/go-windows-terminal-sequences -Version: v1.0.2 -License type (autodetected): MIT -Contents of "LICENSE": +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/josharian/intern@v1.0.0/license.md: + +MIT License + +Copyright (c) 2019 Josh Bleecher Snyder + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/klauspost/compress +Version: v1.12.3 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/klauspost/compress@v1.12.3/LICENSE: + +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/knadh/koanf +Version: v1.2.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- - (The MIT License) +Contents of probable licence file $GOMODCACHE/github.com/knadh/koanf@v1.2.1/LICENSE: - Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de) +The MIT License - Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +Copyright (c) 2019, Kailash Nadh. https://github.com/knadh - The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. --------------------------------------------------------------------- -Dependency: github.com/mailru/easyjson -Version: v0.7.1 -License type (autodetected): MIT -Contents of "LICENSE": +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. - Copyright (c) 2016 Mail.Ru Group - Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +-------------------------------------------------------------------------------- +Dependency : github.com/konsorten/go-windows-terminal-sequences +Version: v1.0.3 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- - The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +Contents of probable licence file $GOMODCACHE/github.com/konsorten/go-windows-terminal-sequences@v1.0.3/LICENSE: - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +(The MIT License) --------------------------------------------------------------------- -Dependency: github.com/Masterminds/semver -Version: v1.4.2 -License type (autodetected): MIT -Contents of "LICENSE.txt": +Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de) - The Masterminds - Copyright (C) 2014-2015, Matt Butcher and Matt Farina +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. --------------------------------------------------------------------- -Dependency: github.com/mattn/go-colorable -Version: v0.1.7 -License type (autodetected): MIT -Contents of "LICENSE": +-------------------------------------------------------------------------------- +Dependency : github.com/mailru/easyjson +Version: v0.7.7 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- - The MIT License (MIT) +Contents of probable licence file $GOMODCACHE/github.com/mailru/easyjson@v0.7.7/LICENSE: - Copyright (c) 2016 Yasuhiro Matsumoto +Copyright (c) 2016 Mail.Ru Group - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------- -Dependency: github.com/mattn/go-isatty -Version: v0.0.12 -License type (autodetected): MIT -Contents of "LICENSE": - Copyright (c) Yasuhiro MATSUMOTO +-------------------------------------------------------------------------------- +Dependency : github.com/mattn/go-colorable +Version: v0.1.8 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- - MIT License (Expat) +Contents of probable licence file $GOMODCACHE/github.com/mattn/go-colorable@v0.1.8/LICENSE: - Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +The MIT License (MIT) - The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +Copyright (c) 2016 Yasuhiro Matsumoto - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: --------------------------------------------------------------------- -Dependency: github.com/Microsoft/go-winio -Version: v0.4.15 -Revision: fc70bd9a86b5 -License type (autodetected): MIT -Contents of "LICENSE": - - The MIT License (MIT) - - Copyright (c) 2015 Microsoft - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - --------------------------------------------------------------------- -Dependency: github.com/miekg/dns -Version: v1.1.15 -License type (autodetected): BSD-3-Clause -Contents of "LICENSE": - - Extensions of the original work are copyright (c) 2011 Miek Gieben - - As this is fork of the official Go code the same license applies: - - Copyright (c) 2009 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: github.com/mitchellh/hashstructure -Version: v1.0.0 -License type (autodetected): MIT -Contents of "LICENSE": - - The MIT License (MIT) - - Copyright (c) 2016 Mitchell Hashimoto - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. - --------------------------------------------------------------------- -Dependency: github.com/modern-go/concurrent -Revision: bacd9c7ef1dd -License type (autodetected): Apache-2.0 - --------------------------------------------------------------------- -Dependency: github.com/modern-go/reflect2 -Version: v1.0.1 -License type (autodetected): Apache-2.0 +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. --------------------------------------------------------------------- -Dependency: github.com/open-telemetry/opentelemetry-collector -Version: v0.2.1 -Revision: c300f1341702 -License type (autodetected): Apache-2.0 +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. --------------------------------------------------------------------- -Dependency: github.com/opencontainers/go-digest -Version: v1.0.0 -Revision: ac19fd6e7483 -License type (autodetected): Apache-2.0 --------------------------------------------------------------------- -Dependency: github.com/opencontainers/image-spec -Version: v1.0.2 -Revision: 775207bd45b6 -License type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- +Dependency : github.com/mattn/go-isatty +Version: v0.0.14 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- --------------------------------------------------------------------- -Dependency: github.com/opentracing/opentracing-go -Version: v1.1.1 -Revision: a7454ce5950e -License type (autodetected): Apache-2.0 - --------------------------------------------------------------------- -Dependency: github.com/patrickmn/go-cache -Version: v2.1.0 -License type (autodetected): MIT -Contents of "LICENSE": - - Copyright (c) 2012-2017 Patrick Mylund Nielsen and the go-cache contributors - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. - --------------------------------------------------------------------- -Dependency: github.com/pierrec/lz4 -Version: v2.2.6 -License type (autodetected): BSD-3-Clause -Contents of "LICENSE": - - Copyright (c) 2015, Pierre Curto - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. +Contents of probable licence file $GOMODCACHE/github.com/mattn/go-isatty@v0.0.14/LICENSE: - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. +Copyright (c) Yasuhiro MATSUMOTO - * Neither the name of xxHash nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE - FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: github.com/pkg/errors -Version: v0.9.1 -License type (autodetected): BSD-2-Clause -Contents of "LICENSE": +MIT License (Expat) - Copyright (c) 2015, Dave Cheney - All rights reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE - FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: github.com/prometheus/procfs -Version: v0.1.3 -License type (autodetected): Apache-2.0 -Contents of "NOTICE": - - procfs provides functions to retrieve system, kernel and process - metrics from the pseudo-filesystem proc. - - Copyright 2014-2015 The Prometheus Authors - - This product includes software developed at - SoundCloud Ltd. (http://soundcloud.com/). - --------------------------------------------------------------------- -Dependency: github.com/rcrowley/go-metrics -Revision: 10cdbea86bc0 -License type (autodetected): BSD-2-Clause -Contents of "LICENSE": - - Copyright 2012 Richard Crowley. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS - OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE - FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - THE POSSIBILITY OF SUCH DAMAGE. - - The views and conclusions contained in the software and documentation - are those of the authors and should not be interpreted as representing - official policies, either expressed or implied, of Richard Crowley. - --------------------------------------------------------------------- -Dependency: github.com/ryanuber/go-glob -Revision: 256dc444b735 -License type (autodetected): MIT -Contents of "LICENSE": - - The MIT License (MIT) - - Copyright (c) 2014 Ryan Uber - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - --------------------------------------------------------------------- -Dependency: github.com/santhosh-tekuri/jsonschema -Version: v1.2.4 -License type (autodetected): BSD-3-Clause -Contents of "LICENSE": - - Copyright (c) 2017 Santhosh Kumar Tekuri. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: github.com/Shopify/sarama -Replacement: github.com/elastic/sarama -Revision: 355d120d0970 -License type (autodetected): MIT -Contents of "LICENSE": - - Copyright (c) 2013 Shopify - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - --------------------------------------------------------------------- -Dependency: github.com/sirupsen/logrus -Version: v1.4.2 -License type (autodetected): MIT -Contents of "LICENSE": - - The MIT License (MIT) - - Copyright (c) 2014 Simon Eskildsen - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. - --------------------------------------------------------------------- -Dependency: github.com/spf13/cobra -Version: v0.0.5 -License type (autodetected): Apache-2.0 - --------------------------------------------------------------------- -Dependency: github.com/spf13/pflag -Version: v1.0.5 -License type (autodetected): BSD-3-Clause -Contents of "LICENSE": - - Copyright (c) 2012 Alex Ogier. All rights reserved. - Copyright (c) 2012 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: github.com/ua-parser/uap-go -Revision: e1c09f13e2fe -License type (autodetected): Apache-2.0 - --------------------------------------------------------------------- -Dependency: github.com/uber/tchannel-go -Version: v1.16.0 -License type (autodetected): MIT -Contents of "LICENSE.md": - - Copyright (c) 2015 Uber Technologies, Inc. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. - --------------------------------------------------------------------- -Dependency: github.com/urso/go-bin -Revision: 781c575c9f0e -License type (autodetected): Apache-2.0 - --------------------------------------------------------------------- -Dependency: github.com/xdg/scram -Revision: 7eeb5667e42c -License type (autodetected): Apache-2.0 - --------------------------------------------------------------------- -Dependency: github.com/xdg/stringprep -Version: v1.0.0 -License type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- +Dependency : github.com/miekg/dns +Version: v1.1.25 +Licence type (autodetected): BSD +-------------------------------------------------------------------------------- --------------------------------------------------------------------- -Dependency: go.elastic.co/apm -Version: v1.8.0 -License type (autodetected): Apache-2.0 -Contents of "NOTICE": - - Elastic APM Go Agent - Copyright 2018-2019 Elasticsearch B.V. - - This product includes software developed at Elasticsearch, B.V. (https://www.elastic.co/). - - ========================================= - Third party code included by the Go Agent - ========================================= - - ------------------------------------------------------------------------------------ - This project copies code from the Go standard library (https://github.com/golang/go) - ------------------------------------------------------------------------------------ - - Copyright (c) 2009 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -------------------------------------------------------------------------- - This project copies code from Gorilla Mux (https://github.com/gorilla/mux) - -------------------------------------------------------------------------- - - Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - ------------------------------------------------------------ - This project copies code from pq (https://github.com/lib/pq) - ------------------------------------------------------------ - - Copyright (c) 2011-2013, 'pq' Contributors Portions Copyright (C) 2011 Blake Mizerany - - Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - --------------------------------------------------------------------- -Dependency: go.elastic.co/apm/module/apmelasticsearch -Version: v1.7.2 -License type (autodetected): Apache-2.0 - --------------------------------------------------------------------- -Dependency: go.elastic.co/apm/module/apmgrpc -Version: v1.7.0 -License type (autodetected): Apache-2.0 +Contents of probable licence file $GOMODCACHE/github.com/miekg/dns@v1.1.25/COPYRIGHT: --------------------------------------------------------------------- -Dependency: go.elastic.co/apm/module/apmhttp -Version: v1.7.2 -License type (autodetected): Apache-2.0 +Copyright 2009 The Go Authors. All rights reserved. Use of this source code +is governed by a BSD-style license that can be found in the LICENSE file. +Extensions of the original work are copyright (c) 2011 Miek Gieben --------------------------------------------------------------------- -Dependency: go.elastic.co/ecszap -Version: v0.2.0 -License type (autodetected): Apache-2.0 -Contents of "NOTICE.txt": - - ecszap - Copyright 2020-2020 Elasticsearch B.V. - - ========================================================================== - Third party libraries used by the Elastic ecszap project: - ========================================================================== - - Dependency: github.com/stretchr/testify - Version: v1.4.0 - License type: MIT - https://github.com/stretchr/testify/blob/199de5f3a493a9bf2dcc7fa5bf841d7759c13d7d/LICENSE: - -------------------------------------------------------------------- - MIT License - - Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - -------------------------------------------------------------------- - - - Dependency: go.uber.org/zap - Version: v1.14.0 - License type: MIT - https://github.com/uber-go/zap/blob/0bd02a6308c1bac3a03b02dc385555297cb22f83/LICENSE.txt: - -------------------------------------------------------------------- - Copyright (c) 2016-2017 Uber Technologies, Inc. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. - -------------------------------------------------------------------- - - - Dependency: github.com/magefile/mage - Version: v1.9.0 - License type: Apache-2.0 - https://github.com/magefile/mage/blob/324c6690ed410efc1d9b597e477c46d42cbeb340/LICENSE: - -------------------------------------------------------------------- - Apache License 2.0 - - - -------------------------------------------------------------------- - Dependency: github.com/pkg/errors - Version: v0.9.1 - License type (autodetected): BSD-2-Clause - https://github.com/pkg/errors/blob/614d223910a179a466c1767a985424175c39b465/LICENSE - -------------------------------------------------------------------- - Copyright (c) 2015, Dave Cheney - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. +Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is +governed by a BSD-style license that can be found in the LICENSE file. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. +Copyright 2014 CloudFlare. All rights reserved. Use of this source code is +governed by a BSD-style license that can be found in the LICENSE file. + + +-------------------------------------------------------------------------------- +Dependency : github.com/mitchellh/copystructure +Version: v1.2.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/mitchellh/copystructure@v1.2.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/mitchellh/go-homedir +Version: v1.1.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/mitchellh/go-homedir@v1.1.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE - FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -------------------------------------------------------------------- - --------------------------------------------------------------------- -Dependency: go.elastic.co/fastjson + +-------------------------------------------------------------------------------- +Dependency : github.com/mitchellh/hashstructure Version: v1.1.0 -License type (autodetected): Apache-2.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/mitchellh/hashstructure@v1.1.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2016 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/mitchellh/mapstructure +Version: v1.4.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/mitchellh/mapstructure@v1.4.1/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/mitchellh/reflectwalk +Version: v1.0.2 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/mitchellh/reflectwalk@v1.0.2/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/modern-go/concurrent +Version: v0.0.0-20180306012644-bacd9c7ef1dd +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/modern-go/concurrent@v0.0.0-20180306012644-bacd9c7ef1dd/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal +Version: v0.34.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal@v0.34.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/opencontainers/go-digest +Version: v1.0.0-rc1.0.20190228220655-ac19fd6e7483 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/opencontainers/go-digest@v1.0.0-rc1.0.20190228220655-ac19fd6e7483/LICENSE: + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/opencontainers/image-spec +Version: v1.0.2-0.20190823105129-775207bd45b6 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/opencontainers/image-spec@v1.0.2-0.20190823105129-775207bd45b6/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2016 The Linux Foundation. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/opentracing/opentracing-go +Version: v1.2.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/opentracing/opentracing-go@v1.2.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 The OpenTracing Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/pierrec/lz4 +Version: v2.5.2+incompatible +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/pierrec/lz4@v2.5.2+incompatible/LICENSE: + +Copyright (c) 2015, Pierre Curto +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of xxHash nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +-------------------------------------------------------------------------------- +Dependency : github.com/prometheus/procfs +Version: v0.7.3 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/prometheus/procfs@v0.7.3/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/rcrowley/go-metrics +Version: v0.0.0-20201227073835-cf1acfcdf475 +Licence type (autodetected): BSD-2-Clause-FreeBSD +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/rcrowley/go-metrics@v0.0.0-20201227073835-cf1acfcdf475/LICENSE: + +Copyright 2012 Richard Crowley. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation +are those of the authors and should not be interpreted as representing +official policies, either expressed or implied, of Richard Crowley. + --------------------------------------------------------------------- -Dependency: go.uber.org/atomic +-------------------------------------------------------------------------------- +Dependency : github.com/rs/cors +Version: v1.8.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/rs/cors@v1.8.0/LICENSE: + +Copyright (c) 2014 Olivier Poitrey + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is furnished +to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/shirou/gopsutil +Version: v3.21.7+incompatible +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/shirou/gopsutil@v3.21.7+incompatible/LICENSE: + +gopsutil is distributed under BSD license reproduced below. + +Copyright (c) 2014, WAKAYAMA Shirou +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the gopsutil authors nor the names of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +------- +internal/common/binary.go in the gopsutil is copied and modifid from golang/encoding/binary.go. + + + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +Dependency : github.com/sirupsen/logrus Version: v1.6.0 -License type (autodetected): MIT -Contents of "LICENSE.txt": - - Copyright (c) 2016 Uber Technologies, Inc. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. - --------------------------------------------------------------------- -Dependency: go.uber.org/multierr -Version: v1.5.0 -License type (autodetected): MIT -Contents of "LICENSE.txt": - - Copyright (c) 2017 Uber Technologies, Inc. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. - --------------------------------------------------------------------- -Dependency: go.uber.org/zap -Version: v1.15.0 -License type (autodetected): MIT -Contents of "LICENSE.txt": - - Copyright (c) 2016-2017 Uber Technologies, Inc. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. - --------------------------------------------------------------------- -Dependency: golang.org/x/crypto -Revision: 5c72a883971a -License type (autodetected): BSD-3-Clause -Contents of "LICENSE": - - Copyright (c) 2009 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: golang.org/x/net -Revision: c89045814202 -License type (autodetected): BSD-3-Clause -Contents of "LICENSE": - - Copyright (c) 2009 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: golang.org/x/oauth2 -Revision: bf48bf16ab8d -License type (autodetected): BSD-3-Clause -Contents of "LICENSE": - - Copyright (c) 2009 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: golang.org/x/sync -Revision: 6e8e738ad208 -License type (autodetected): BSD-3-Clause -Contents of "LICENSE": - - Copyright (c) 2009 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: golang.org/x/sys -Revision: c12d262b63d8 -License type (autodetected): BSD-3-Clause -Contents of "LICENSE": - - Copyright (c) 2009 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: golang.org/x/text -Version: v0.3.3 -License type (autodetected): BSD-3-Clause -Contents of "LICENSE": - - Copyright (c) 2009 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: golang.org/x/time -Revision: 555d28b269f0 -License type (autodetected): BSD-3-Clause -Contents of "LICENSE": - - Copyright (c) 2009 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: google.golang.org/genproto -Revision: f3c370f40bfb -License type (autodetected): Apache-2.0 - --------------------------------------------------------------------- -Dependency: google.golang.org/grpc -Version: v1.29.1 -License type (autodetected): Apache-2.0 - --------------------------------------------------------------------- -Dependency: google.golang.org/protobuf -Version: v1.23.0 -License type (autodetected): BSD-3-Clause -Contents of "LICENSE": - - Copyright (c) 2018 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: gopkg.in/inf.v0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/sirupsen/logrus@v1.6.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/spf13/cast +Version: v1.4.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/spf13/cast@v1.4.1/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2014 Steve Francia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- +Dependency : github.com/tklauser/go-sysconf +Version: v0.3.5 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/tklauser/go-sysconf@v0.3.5/LICENSE: + +BSD 3-Clause License + +Copyright (c) 2018-2021, Tobias Klauser +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/tklauser/numcpus +Version: v0.2.2 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/tklauser/numcpus@v0.2.2/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} Authors of Cilium + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +-------------------------------------------------------------------------------- +Dependency : github.com/ugorji/go/codec +Version: v1.1.8 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/ugorji/go/codec@v1.1.8/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2012-2015 Ugorji Nwoke. +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/urso/diag +Version: v0.0.0-20200210123136-21b3cc8eb797 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/urso/diag@v0.0.0-20200210123136-21b3cc8eb797/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/urso/go-bin +Version: v0.0.0-20180220135811-781c575c9f0e +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/urso/go-bin@v0.0.0-20180220135811-781c575c9f0e/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/urso/sderr +Version: v0.0.0-20210525210834-52b04e8f5c71 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/urso/sderr@v0.0.0-20210525210834-52b04e8f5c71/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/xdg/scram +Version: v1.0.3 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/xdg/scram@v1.0.3/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + +-------------------------------------------------------------------------------- +Dependency : github.com/xdg/stringprep +Version: v1.0.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/xdg/stringprep@v1.0.0/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + +-------------------------------------------------------------------------------- +Dependency : go.elastic.co/ecszap +Version: v1.0.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.elastic.co/ecszap@v1.0.0/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 Elastic and contributors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +Dependency : go.opencensus.io +Version: v0.23.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.opencensus.io@v0.23.0/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +Dependency : go.opentelemetry.io/contrib +Version: v0.22.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/contrib@v0.22.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc +Version: v0.22.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc@v0.22.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +Version: v0.22.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp@v0.22.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : go.opentelemetry.io/otel +Version: v1.0.0-RC2 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel@v1.0.0-!r!c2/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : go.opentelemetry.io/otel/internal/metric +Version: v0.22.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/internal/metric@v0.22.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : go.opentelemetry.io/otel/metric +Version: v0.22.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/metric@v0.22.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : go.opentelemetry.io/otel/trace +Version: v1.0.0-RC2 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/trace@v1.0.0-!r!c2/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : go.uber.org/multierr +Version: v1.7.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.uber.org/multierr@v1.7.0/LICENSE.txt: + +Copyright (c) 2017-2021 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/crypto +Version: v0.0.0-20210817164053-32db794688a5 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.0.0-20210817164053-32db794688a5/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/oauth2 +Version: v0.0.0-20210514164344-f6687ab2804c +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/oauth2@v0.0.0-20210514164344-f6687ab2804c/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/sys +Version: v0.0.0-20210910150752-751e447fb3d0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.0.0-20210910150752-751e447fb3d0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/term +Version: v0.0.0-20201126162022-7de9c90e9dd1 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/term@v0.0.0-20201126162022-7de9c90e9dd1/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/text +Version: v0.3.7 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/text@v0.3.7/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : google.golang.org/genproto +Version: v0.0.0-20210909211513-a8c4777a87af +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/google.golang.org/genproto@v0.0.0-20210909211513-a8c4777a87af/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : google.golang.org/protobuf +Version: v1.27.1 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/google.golang.org/protobuf@v1.27.1/LICENSE: + +Copyright (c) 2018 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : gopkg.in/inf.v0 Version: v0.9.1 -License type (autodetected): BSD-3-Clause -Contents of "LICENSE": - - Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go - Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: gopkg.in/jcmturner/aescts.v1 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/gopkg.in/inf.v0@v0.9.1/LICENSE: + +Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go +Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : gopkg.in/jcmturner/aescts.v1 Version: v1.0.1 -License type (autodetected): Apache-2.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/gopkg.in/jcmturner/aescts.v1@v1.0.1/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. --------------------------------------------------------------------- -Dependency: gopkg.in/jcmturner/dnsutils.v1 + +-------------------------------------------------------------------------------- +Dependency : gopkg.in/jcmturner/dnsutils.v1 Version: v1.0.1 -License type (autodetected): Apache-2.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/gopkg.in/jcmturner/dnsutils.v1@v1.0.1/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. --------------------------------------------------------------------- -Dependency: gopkg.in/jcmturner/goidentity.v3 + +-------------------------------------------------------------------------------- +Dependency : gopkg.in/jcmturner/goidentity.v3 Version: v3.0.0 -License type (autodetected): Apache-2.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/gopkg.in/jcmturner/goidentity.v3@v3.0.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. --------------------------------------------------------------------- -Dependency: gopkg.in/jcmturner/gokrb5.v7 + +-------------------------------------------------------------------------------- +Dependency : gopkg.in/jcmturner/gokrb5.v7 Version: v7.5.0 -License type (autodetected): Apache-2.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/gopkg.in/jcmturner/gokrb5.v7@v7.5.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. --------------------------------------------------------------------- -Dependency: gopkg.in/jcmturner/rpc.v1 + +-------------------------------------------------------------------------------- +Dependency : gopkg.in/jcmturner/rpc.v1 Version: v1.1.0 -License type (autodetected): Apache-2.0 - --------------------------------------------------------------------- -Dependency: gopkg.in/yaml.v2 -Version: v2.3.0 -License type (autodetected): Apache-2.0 -Contents of "NOTICE": - - Copyright 2011-2016 Canonical Ltd. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - --------------------------------------------------------------------- -Dependency: gopkg.in/yaml.v2 -Version: v2.3.0 -License type (autodetected): MIT -Contents of "LICENSE.libyaml": - - The following files were ported to Go from C files of libyaml, and thus - are still covered by their original copyright and license: - - apic.go - emitterc.go - parserc.go - readerc.go - scannerc.go - writerc.go - yamlh.go - yamlprivateh.go - - Copyright (c) 2006 Kirill Simonov - - Permission is hereby granted, free of charge, to any person obtaining a copy of - this software and associated documentation files (the "Software"), to deal in - the Software without restriction, including without limitation the rights to - use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies - of the Software, and to permit persons to whom the Software is furnished to do - so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - --------------------------------------------------------------------- -Dependency: howett.net/plist -Revision: 3b63eb3a43b5 -License type (autodetected): BSD-2-Clause -Contents of "LICENSE": - - Copyright (c) 2013, Dustin L. Howett. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR - ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - The views and conclusions contained in the software and documentation are those - of the authors and should not be interpreted as representing official policies, - either expressed or implied, of the FreeBSD Project. - - -------------------------------------------------------------------------------- - Parts of this package were made available under the license covering - the Go language and all attended core libraries. That license follows. - -------------------------------------------------------------------------------- - - Copyright (c) 2012 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------- -Dependency: k8s.io/api -Version: v0.18.3 -License type (autodetected): Apache-2.0 - --------------------------------------------------------------------- -Dependency: k8s.io/apimachinery -Version: v0.18.3 -License type (autodetected): Apache-2.0 - --------------------------------------------------------------------- -Dependency: k8s.io/client-go -Replacement: k8s.io/client-go -Version: v0.18.3 -License type (autodetected): Apache-2.0 - --------------------------------------------------------------------- -Dependency: k8s.io/klog -Version: v1.0.0 -License type (autodetected): Apache-2.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/gopkg.in/jcmturner/rpc.v1@v1.1.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at --------------------------------------------------------------------- -Dependency: k8s.io/utils -Revision: a9aa75ae1b89 -License type (autodetected): Apache-2.0 + http://www.apache.org/licenses/LICENSE-2.0 --------------------------------------------------------------------- -Dependency: sigs.k8s.io/structured-merge-diff/v3 -Version: v3.0.0 -License type (autodetected): Apache-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : howett.net/plist +Version: v0.0.0-20201203080718-1454fab16a06 +Licence type (autodetected): BSD-2-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/howett.net/plist@v0.0.0-20201203080718-1454fab16a06/LICENSE: + +Copyright (c) 2013, Dustin L. Howett. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation are those +of the authors and should not be interpreted as representing official policies, +either expressed or implied, of the FreeBSD Project. + +-------------------------------------------------------------------------------- +Parts of this package were made available under the license covering +the Go language and all attended core libraries. That license follows. +-------------------------------------------------------------------------------- + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : k8s.io/api +Version: v0.19.4 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/k8s.io/api@v0.19.4/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : k8s.io/apimachinery +Version: v0.19.4 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/k8s.io/apimachinery@v0.19.4/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : k8s.io/client-go +Version: v0.19.4 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/k8s.io/client-go@v0.19.4/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : k8s.io/klog/v2 +Version: v2.2.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/k8s.io/klog/v2@v2.2.0/LICENSE: + +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : k8s.io/utils +Version: v0.0.0-20200729134348-d5654de09c73 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/k8s.io/utils@v0.0.0-20200729134348-d5654de09c73/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : sigs.k8s.io/structured-merge-diff/v4 +Version: v4.0.1 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/sigs.k8s.io/structured-merge-diff/v4@v4.0.1/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 --------------------------------------------------------------------- -Dependency: sigs.k8s.io/yaml + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : sigs.k8s.io/yaml Version: v1.2.0 -License type (autodetected): MIT -Contents of "LICENSE": - - The MIT License (MIT) - - Copyright (c) 2014 Sam Ghods - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - - - Copyright (c) 2012 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/sigs.k8s.io/yaml@v1.2.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + diff --git a/README.md b/README.md index 7fe8d948011..6d006940b59 100644 --- a/README.md +++ b/README.md @@ -3,21 +3,20 @@ # APM Server -The APM Server receives data from the Elastic APM agents and stores the data into Elasticsearch. +The APM Server receives data from Elastic APM agents and transforms it into Elasticsearch documents. +Read more about Elastic APM at [elastic.co/apm](https://www.elastic.co/apm). -[Read more about Elastic APM](https://www.elastic.co/products/apm). - -Please take questions or feedback to the [Discuss forum](https://discuss.elastic.co/c/apm) for APM. +For questions and feature requests, visit the [discussion forum](https://discuss.elastic.co/c/apm). ## Getting Started -To get started with APM please see our [Getting Started Guide](https://www.elastic.co/guide/en/apm/get-started). +To get started with APM, see our [Quick start guide](https://www.elastic.co/guide/en/apm/get-started/current/install-and-run.html). ## APM Server Development ### Requirements -* [Golang](https://golang.org/dl/) 1.14.7 +* [Golang](https://golang.org/dl/) 1.16.x ### Install @@ -75,6 +74,21 @@ To generate required configuration files and templates run: make update ``` +### Generate package + +APM-Server includes a script to generate an integration package to run with Fleet. +To Generate a package run: + +``` +make fields gen-package +``` + +That command takes the existing `fields.yml` files and split them into `ecs.yml` and `fields.yml` files for each data stream type. +It also generates a `README.md` with a field reference that will be shown in the integration package. + +After generating a package, `apmpackage/apm` should be manually copied to `elastic/integrations`. +Then follow instructions in https://github.com/elastic/integrations/blob/master/CONTRIBUTING.md. + ### Cleanup To clean APM Server source code, run the following commands: @@ -93,7 +107,8 @@ For further development, check out the [beat developer guide](https://www.elasti ### Contributing -See [contributing](CONTRIBUTING.md) for details about reporting bugs or requesting features in APM server. +See [contributing](CONTRIBUTING.md) for details about reporting bugs, requesting features, +or contributing to APM Server. ### Releases @@ -158,4 +173,4 @@ When building images for testing pre-release versions, we recommend setting `SNA ## Documentation -The [Documentation](https://www.elastic.co/guide/en/apm/server/current/index.html) for the APM Server can be found in the `docs` folder. +[Documentation](https://www.elastic.co/guide/en/apm/server/current/index.html) for the APM Server can be found in the `docs` folder. diff --git a/RELEASES.md b/RELEASES.md index 3d4fb97aec0..f9a160d11f9 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -11,10 +11,6 @@ run `make update-beats` in the branch from which the new branch will be created before FF to recognize potential issues -* Update Kibana Index Pattern - - If fields are not up-to-date, run `make update && script/update_kibana_objects.py` and create a PR. - * Update Changelog * Review existing [changelogs/head](https://github.com/elastic/apm-server/tree/master/changelogs/head.asciidoc) to ensure all relevant notes have been added @@ -33,11 +29,13 @@ * update versions in `major.x` branch to next minor version, e.g. [#2804](https://github.com/elastic/apm-server/pull/2804) * Update to latest changes of [beats](https://github.com/elastic/beats/pulls/) - - When beats has merged all PRs and for minor releases created the new branch, update beats again. + * Update `BEATS_VERSION` to the release version in the top-level Makefile + * When beats has merged all PRs and for minor releases created the new branch, run `make update-beats` and commit the changes. * Ensure a branch or tag is created for the [go-elasticsearch](https://github.com/elastic/go-elasticsearch) library and update to it. + `go get github.com/elastic/go-elasticsearch/v$major@$major.$minor` + * The following may also need to be updated manually: * APM Overview's [release highlights](https://github.com/elastic/apm-server/blob/master/docs/guide/apm-release-notes.asciidoc) - Anything exciting across the APM stack! * APM Overview's [breaking changes](https://github.com/elastic/apm-server/blob/master/docs/guide/apm-breaking-changes.asciidoc) - Any breaking changes across the APM stack. @@ -46,6 +44,10 @@ * For major releases, update and smoke test the dev quick start [`docker-compose.yml`](https://github.com/elastic/apm-server/blob/master/docs/guide/docker-compose.yml). +## After feature freeze + +* Update [.mergify.yml](https://github.com/elastic/apm-server/blob/master/.mergify.yml) with a new backport rule for the next version. + ## On release day * New branches need to be added to `conf.yml` in the `elastic/docs` repo. [Example](https://github.com/elastic/docs/pull/893/files#diff-4a701a5adb4359c6abf9b8e1cb38819fR925). **This is handled by the docs release manager.** diff --git a/_meta/beat.yml b/_meta/beat.yml index c200ab22ad3..ae2a6aab41d 100644 --- a/_meta/beat.yml +++ b/_meta/beat.yml @@ -6,6 +6,47 @@ apm-server: # Defines the host and port the server is listening on. Use "unix:/path/to.sock" to listen on a unix domain socket. host: "{{ .listen_hostport }}" + # Agent authorization configuration. If no methods are defined, all requests will be allowed. + #auth: + # Agent authorization using Elasticsearch API Keys. + #api_key: + #enabled: false + # + # Restrict how many unique API keys are allowed per minute. Should be set to at least the amount of different + # API keys configured in your monitored services. Every unique API key triggers one request to Elasticsearch. + #limit: 100 + + # Define a shared secret token for authorizing agents using the "Bearer" authorization method. + #secret_token: + + # Allow anonymous access only for specified agents and/or services. This is primarily intended to allow + # limited access for untrusted agents, such as Real User Monitoring. + #anonymous: + # By default anonymous auth is automatically enabled when either auth.api_key or + # auth.secret_token is enabled, and RUM is enabled. Otherwise, anonymous auth is + # disabled by default. + # + # When anonymous auth is enabled, only agents matching allow_agent and services + # matching allow_service are allowed. See below for details on default values for + # allow_agent. + #enabled: + + # Allow anonymous access only for specified agents. + #allow_agent: [rum-js, js-base] + + # Allow anonymous access only for specified service names. By default, all service names are allowed. + #allow_service: [] + + # Rate-limit anonymous access by IP and number of events. + #rate_limit: + # Rate limiting is defined per unique client IP address, for a limited number of IP addresses. + # Sites with many concurrent clients should consider increasing this limit. Defaults to 1000. + #ip_limit: 1000 + + # Defines the maximum amount of events allowed per IP per second. Defaults to 300. The overall + # maximum event throughput for anonymous access is (event_limit * ip_limit). + #event_limit: 300 + # Maximum permitted size in bytes of a request's header accepted by the server to be processed. #max_header_size: 1048576 @@ -27,10 +68,19 @@ apm-server: # Maximum number of new connections to accept simultaneously (0 means unlimited). #max_connections: 0 + # Custom HTTP headers to add to all HTTP responses, e.g. for security policy compliance. + #response_headers: + # X-My-Header: Contents of the header + # If true (default), APM Server captures the IP of the instrumented service # or the IP and User Agent of the real user (RUM requests). #capture_personal_data: true + # If specified, APM Server will record this value in events which have no service environment + # defined, and add it to agent configuration queries to Kibana when none is specified in the + # request from the agent. + #default_service_environment: + # Enable APM Server Golang expvar support (https://golang.org/pkg/expvar/). #expvar: #enabled: false @@ -93,12 +143,17 @@ apm-server: # Agents include the token in the following format: Authorization: Bearer . # It is recommended to use an authorization token in combination with SSL enabled, # and save the token in the apm-server keystore. + # + # WARNING: This configuration is deprecated and replaced with `apm-server.auth.secret_token`, and will be removed + # in the 8.0 release. If that config is defined, this one will be ignored. #secret_token: # Enable API key authorization by setting enabled to true. By default API key support is disabled. # Agents include a valid API key in the following format: Authorization: ApiKey . # The key must be the base64 encoded representation of the API key's "id:key". - # This is an experimental feature, use with care. + # + # WARNING: This configuration is deprecated and replaced with `apm-server.auth.api_key`, and will be removed + # in the 8.0 release. If that config is defined, this one will be ignored. #api_key: #enabled: false @@ -133,10 +188,22 @@ apm-server: #ssl.enabled: true # Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`. - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # + # Control the verification of Elasticsearch certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * strict, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. If the Subject Alternative + # Name is empty, it returns an error. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions 1.0 up to @@ -175,6 +242,10 @@ apm-server: #rum: #enabled: false + # Rate-limit RUM agents. + # + # WARNING: This configuration is deprecated and replaced with `apm-server.auth.anonymous.rate_limit`, + # and will be removed in the 8.0 release. If that config is defined, this one will be ignored. #event_rate: # Defines the maximum amount of events allowed to be sent to the APM Server RUM @@ -188,19 +259,27 @@ apm-server: #-- General RUM settings + # A list of service names to allow, to limit service-specific indices and data streams + # created for unauthenticated RUM events. + # If the list is empty, any service name is allowed. + # + # WARNING: This configuration is deprecated and replaced with `apm-server.auth.anonymous.allow_service`, + # and will be removed in the 8.0 release. If that config is defined, this one will be ignored. + #allow_service_names: [] + # A list of permitted origins for real user monitoring. # User-agents will send an origin header that will be validated against this list. # An origin is made of a protocol scheme, host and port, without the url path. # Allowed origins in this setting can have * to match anything (eg.: http://*.example.com) # If an item in the list is a single '*', everything will be allowed. - #allow_origins : ['*'] + #allow_origins: ['*'] # A list of Access-Control-Allow-Headers to allow RUM requests, in addition to "Content-Type", # "Content-Encoding", and "Accept" - #allow_headers : [] + #allow_headers: [] # Custom HTTP headers to add to RUM responses, e.g. for security policy compliance. - #response_headers : + #response_headers: # X-My-Header: Contents of the header # Regexp to be matched against a stacktrace frame's `file_name` and `abs_path` attributes. @@ -219,9 +298,15 @@ apm-server: # Sourcemapping is enabled by default. #enabled: true - # Source maps are always fetched from Elasticsearch, by default using the output.elasticsearch configuration. + # Timeout for fetching source maps. + #timeout: 5s + + # Source maps may be fetched from Elasticsearch by using the + # output.elasticsearch configuration. # A different instance must be configured when using any other output. # This setting only affects sourcemap reads - the output determines where sourcemaps are written. + # Note: Configuring elasticsearch is not supported if apm-server is being + # managed by Fleet. #elasticsearch: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (`http` and `9200`). @@ -274,10 +359,22 @@ apm-server: #ssl.enabled: true # Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`. - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # + # Control the verification of Kibana certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * strict, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. If the Subject Alternative + # Name is empty, it returns an error. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions 1.0 up to @@ -384,6 +481,12 @@ apm-server: # When enabling Jaeger integration, APM Server acts as Jaeger collector. It supports jaeger.thrift over HTTP # and gRPC. This is an experimental feature, use with care. + # + # WARNING: This configuration is deprecated, and will be removed in the 8.0 release. + # + # Jaeger gRPC is now served on the same port as Elastic APM agents, defined by the + # "apm-server.host" configuration; it is implicitly enabled, and an agent tag called + # "elastic-apm-auth" is required when auth is enabled. #jaeger: #grpc: # Set to true to enable the Jaeger gRPC collector service. @@ -607,10 +710,22 @@ output.elasticsearch: #ssl.enabled: true # Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`. - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # + # Control the verification of Elasticsearch certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * strict, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. If the Subject Alternative + # Name is empty, it returns an error. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions 1.0 up to @@ -736,10 +851,22 @@ output.elasticsearch: #ssl.enabled: false # Optional SSL configuration options. SSL is off by default. - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # + # Control the verification of Logstash certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * strict, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. If the Subject Alternative + # Name is empty, it returns an error. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions 1.0 up to @@ -883,10 +1010,22 @@ output.elasticsearch: #ssl.enabled: false # Optional SSL configuration options. SSL is off by default. - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # + # Control the verification of Kafka certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * strict, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. If the Subject Alternative + # Name is empty, it returns an error. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions 1.0 up to @@ -1055,12 +1194,11 @@ output.elasticsearch: #interval: 0 # Set to true to log messages in json format. -#logging.json: false +#logging.json: true # Set to true, to log messages with minimal required Elastic Common Schema (ECS) -# information. Recommended to use in combination with `logging.json=true` -# Defaults to false. -#logging.ecs: false +# information. Recommended to use in combination with `logging.json=true`. +#logging.ecs: true #=============================== HTTP Endpoint =============================== @@ -1150,10 +1288,22 @@ output.elasticsearch: #ssl.enabled: true # Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`. - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # + # Control the verification of Elasticsearch certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * strict, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. If the Subject Alternative + # Name is empty, it returns an error. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions 1.0 up to diff --git a/_meta/fields.common.yml b/_meta/fields.common.yml deleted file mode 100644 index 807961a35bb..00000000000 --- a/_meta/fields.common.yml +++ /dev/null @@ -1,793 +0,0 @@ -- key: apm - title: General APM - description: > - Fields common to various APM events. - fields: - - name: processor.name - type: keyword - description: Processor name. - - - name: processor.event - type: keyword - description: Processor event. - - - name: timestamp - type: group - fields: - - name: us - type: long - count: 1 - description: > - Timestamp of the event in microseconds since Unix epoch. - - - name: url - type: group - description: > - A complete Url, with scheme, host and path. - dynamic: false - fields: - - - name: scheme - type: keyword - description: > - The protocol of the request, e.g. "https:". - overwrite: true - - - name: full - type: keyword - description: > - The full, possibly agent-assembled URL of the request, e.g https://example.com:443/search?q=elasticsearch#top. - overwrite: true - - - name: domain - type: keyword - description: > - The hostname of the request, e.g. "example.com". - overwrite: true - - - name: port - type: long - description: > - The port of the request, e.g. 443. - overwrite: true - - - name: path - type: keyword - description: > - The path of the request, e.g. "/search". - overwrite: true - - - name: query - type: keyword - description: > - The query string of the request, e.g. "q=elasticsearch". - overwrite: true - - - name: fragment - type: keyword - description: > - A fragment specifying a location in a web page , e.g. "top". - overwrite: true - - - name: http - type: group - dynamic: false - fields: - - - name: version - type: keyword - description: > - The http version of the request leading to this event. - overwrite: true - - - name: request - type: group - fields: - - - name: method - type: keyword - description: > - The http method of the request leading to this event. - overwrite: true - - - name: headers - type: object - enabled: false - description: > - The canonical headers of the monitored HTTP request. - - - name: referrer - type: keyword - ignore_above: 1024 - overwrite: true - description: Referrer for this HTTP request. - - - name: response - type: group - fields: - - - name: status_code - type: long - description: > - The status code of the HTTP response. - overwrite: true - - - name: finished - type: boolean - description: > - Used by the Node agent to indicate when in the response life cycle an error has occurred. - overwrite: true - - - name: headers - type: object - enabled: false - description: > - The canonical headers of the monitored HTTP response. - - - name: labels - type: object - object_type_params: - - object_type: keyword - - object_type: boolean - - object_type: scaled_float - scaling_factor: 1000000 - dynamic: true - overwrite: true - description: > - A flat mapping of user-defined labels with string, boolean or number values. - - - name: service - type: group - dynamic: false - description: > - Service fields. - fields: - - name: name - type: keyword - description: > - Immutable name of the service emitting this event. - overwrite: true - - - name: version - type: keyword - description: > - Version of the service emitting this event. - overwrite: true - - - name: environment - type: keyword - description: > - Service environment. - - - name: node - type: group - fields: - - name: name - type: keyword - description: > - Unique meaningful name of the service node. - overwrite: true - - - name: language - type: group - fields: - - - name: name - type: keyword - description: > - Name of the programming language used. - - - name: version - type: keyword - description: > - Version of the programming language used. - - - name: runtime - type: group - fields: - - - name: name - type: keyword - description: > - Name of the runtime used. - - - name: version - type: keyword - description: > - Version of the runtime used. - - - name: framework - type: group - fields: - - - name: name - type: keyword - description: > - Name of the framework used. - - - name: version - type: keyword - description: > - Version of the framework used. - - - name: transaction - type: group - dynamic: false - fields: - - name: id - type: keyword - description: > - The transaction ID. - - name: sampled - type: boolean - description: > - Transactions that are 'sampled' will include all available information. Transactions that are not sampled will not have spans or context. - - name: type - type: keyword - description: > - Keyword of specific relevance in the service's domain (eg. 'request', 'backgroundjob', etc) - - name: name - type: keyword - multi_fields: - - name: text - type: text - description: > - Generic designation of a transaction in the scope of a single service (eg. 'GET /users/:id'). - - - name: duration - type: group - fields: - - name: count - type: long - - name: sum - type: group - fields: - - name: us - type: long - - - name: self_time - type: group - description: > - Portion of the transaction's duration where no direct child was running - fields: - - name: count - type: long - - name: sum - type: group - fields: - - name: us - type: long - - - name: breakdown - type: group - description: > - Counter for collected breakdowns for the transaction - fields: - - name: count - type: long - - - name: span - type: group - dynamic: false - fields: - - name: type - type: keyword - count: 1 - description: > - Keyword of specific relevance in the service's domain (eg: 'db.postgresql.query', 'template.erb', 'cache', etc). - - - name: subtype - type: keyword - count: 1 - description: > - A further sub-division of the type (e.g. postgresql, elasticsearch) - - - name: self_time - type: group - description: > - Portion of the span's duration where no direct child was running - fields: - - name: count - type: long - - name: sum - type: group - fields: - - name: us - type: long - - - name: trace - type: group - dynamic: false - fields: - - name: id - type: keyword - description: > - The ID of the trace to which the event belongs to. - - - name: parent - type: group - dynamic: false - fields: - - name: id - type: keyword - description: > - The ID of the parent event. - - - name: agent - type: group - dynamic: false - fields: - - - name: name - type: keyword - description: > - Name of the agent used. - overwrite: true - - - name: version - type: keyword - description: > - Version of the agent used. - overwrite: true - - - name: ephemeral_id - type: keyword - description: > - The Ephemeral ID identifies a running process. - overwrite: true - - - name: container - type: group - dynamic: false - title: Container - description: > - Container fields are used for meta information about the specific container - that is the source of information. These fields help correlate data based - containers from any runtime. - fields: - - - name: id - type: keyword - description: > - Unique container id. - overwrite: true - - - name: kubernetes - type: group - dynamic: false - title: Kubernetes - description: > - Kubernetes metadata reported by agents - fields: - - - name: namespace - type: keyword - description: > - Kubernetes namespace - overwrite: true - - - name: node - type: group - fields: - - name: name - type: keyword - description: > - Kubernetes node name - overwrite: true - - - name: pod - type: group - fields: - - - name: name - type: keyword - description: > - Kubernetes pod name - overwrite: true - - - name: uid - type: keyword - description: > - Kubernetes Pod UID - overwrite: true - - - name: host - type: group - dynamic: false - description: > - Optional host fields. - fields: - - - name: architecture - type: keyword - description: > - The architecture of the host the event was recorded on. - overwrite: true - - - name: hostname - type: keyword - description: > - The hostname of the host the event was recorded on. - overwrite: true - - - name: name - type: keyword - description: > - Name of the host the event was recorded on. - It can contain same information as host.hostname or a name specified by the user. - overwrite: true - - - name: ip - type: ip - description: > - IP of the host that records the event. - overwrite: true - - - name: os - title: Operating System - group: 2 - description: > - The OS fields contain information about the operating system. - type: group - fields: - - name: platform - type: keyword - description: > - The platform of the host the event was recorded on. - overwrite: true - - - name: process - type: group - dynamic: false - description: > - Information pertaining to the running process where the data was collected - fields: - - name: args - level: extended - type: keyword - description: > - Process arguments. - - May be filtered to protect sensitive information. - overwrite: true - - - name: pid - type: long - description: > - Numeric process ID of the service process. - overwrite: true - - - name: ppid - type: long - description: > - Numeric ID of the service's parent process. - overwrite: true - - - name: title - type: keyword - description: > - Service process title. - overwrite: true - - - name: observer - type: group - dynamic: false - fields: - - - name: listening - type: keyword - description: > - Address the server is listening on. - - - name: hostname - type: keyword - overwrite: true - description: > - Hostname of the APM Server. - - - name: version - type: keyword - overwrite: true - description: > - APM Server version. - - - name: version_major - type: byte - description: > - Major version number of the observer - - - name: type - type: keyword - overwrite: true - description: > - The type will be set to `apm-server`. - - - name: user - type: group - dynamic: false - fields: - - - name: name - type: keyword - description: > - The username of the logged in user. - overwrite: true - - - name: id - type: keyword - description: > - Identifier of the logged in user. - overwrite: true - - - name: email - type: keyword - description: > - Email of the logged in user. - overwrite: true - - - name: client - dynamic: false - type: group - fields: - - name: ip - type: ip - description: > - IP address of the client of a recorded event. - This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. - overwrite: true - - - name: source - dynamic: false - type: group - fields: - - name: ip - type: ip - description: > - IP address of the source of a recorded event. - This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. - overwrite: true - - - name: destination - title: Destination - group: 2 - description: 'Destination fields describe details about the destination of a packet/event. - - Destination fields are usually populated in conjunction with source fields.' - type: group - fields: - - name: address - level: extended - type: keyword - ignore_above: 1024 - description: 'Some event destination addresses are defined ambiguously. The - event will sometimes list an IP, a domain or a unix socket. You should always - store the raw address in the `.address` field. - - Then it should be duplicated to `.ip` or `.domain`, depending on which one - it is.' - overwrite: true - - - name: ip - level: core - type: ip - description: 'IP addess of the destination. - - Can be one of multiple IPv4 or IPv6 addresses.' - overwrite: true - - - name: port - level: core - type: long - format: string - description: Port of the destination. - overwrite: true - - - name: user_agent - dynamic: false - title: User agent - description: > - The user_agent fields normally come from a browser request. They often - show up in web service logs coming from the parsed user agent string. - type: group - overwrite: true - fields: - - - name: original - type: keyword - description: > - Unparsed version of the user_agent. - example: "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1" - overwrite: true - - multi_fields: - - name: text - type: text - description: > - Software agent acting in behalf of a user, eg. a web browser / OS combination. - overwrite: true - - - name: name - type: keyword - overwrite: true - example: Safari - description: > - Name of the user agent. - - - name: version - type: keyword - overwrite: true - description: > - Version of the user agent. - example: 12.0 - - - name: device - type: group - overwrite: true - title: Device - description: > - Information concerning the device. - fields: - - - name: name - type: keyword - overwrite: true - example: iPhone - description: > - Name of the device. - - - name: os - type: group - overwrite: true - title: Operating System - description: > - The OS fields contain information about the operating system. - fields: - - - name: platform - type: keyword - overwrite: true - description: > - Operating system platform (such centos, ubuntu, windows). - example: darwin - - - name: name - type: keyword - overwrite: true - example: "Mac OS X" - description: > - Operating system name, without the version. - - - name: full - type: keyword - overwrite: true - example: "Mac OS Mojave" - description: > - Operating system name, including the version or code name. - - - name: family - type: keyword - overwrite: true - example: "debian" - description: > - OS family (such as redhat, debian, freebsd, windows). - - - name: version - type: keyword - overwrite: true - example: "10.14.1" - description: > - Operating system version as a raw string. - - - name: kernel - type: keyword - overwrite: true - example: "4.4.0-112-generic" - description: > - Operating system kernel version as a raw string. - - - name: experimental - type: object - dynamic: true - description: Additional experimental data sent by the agents. - - - name: cloud - title: Cloud - group: 2 - type: group - description: > - Cloud metadata reported by agents - fields: - - name: account - type: group - dynamic: false - fields: - - name: id - level: extended - type: keyword - ignore_above: 1024 - description: Cloud account ID - overwrite: true - - name: name - level: extended - type: keyword - ignore_above: 1024 - description: Cloud account name - overwrite: true - - name: availability_zone - level: extended - type: keyword - ignore_above: 1024 - description: Cloud availability zone name - example: us-east1-a - overwrite: true - - name: instance - type: group - dynamic: false - fields: - - name: id - level: extended - type: keyword - ignore_above: 1024 - description: Cloud instance/machine ID - overwrite: true - - name: name - level: extended - type: keyword - ignore_above: 1024 - description: Cloud instance/machine name - overwrite: true - - name: machine - type: group - dynamic: false - fields: - - name: type - level: extended - type: keyword - ignore_above: 1024 - description: Cloud instance/machine type - example: t2.medium - overwrite: true - - name: project - type: group - dynamic: false - fields: - - name: id - level: extended - type: keyword - ignore_above: 1024 - description: Cloud project ID - overwrite: true - - name: name - level: extended - type: keyword - ignore_above: 1024 - description: Cloud project name - overwrite: true - - name: provider - level: extended - type: keyword - ignore_above: 1024 - description: Cloud provider name - example: gcp - overwrite: true - - name: region - level: extended - type: keyword - ignore_above: 1024 - description: Cloud region name - example: us-east1 - overwrite: true - - - name: event - type: group - fields: - - - name: outcome - level: core - type: keyword - ignore_above: 1024 - description: > - `event.outcome` simply denotes whether the event represents a success or a - failure from the perspective of the entity that produced the event. - example: success - overwrite: true diff --git a/agentcfg/fetch.go b/agentcfg/fetch.go index 7b975f6d82a..1d76caedcb2 100644 --- a/agentcfg/fetch.go +++ b/agentcfg/fetch.go @@ -18,7 +18,10 @@ package agentcfg import ( + "bytes" "context" + "encoding/json" + "fmt" "io" "io/ioutil" "net/http" @@ -30,17 +33,24 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" - "github.com/elastic/apm-server/convert" + "github.com/elastic/apm-server/beater/config" "github.com/elastic/apm-server/kibana" - "github.com/elastic/apm-server/utility" ) // Error Messages used to signal fetching errors const ( - ErrMsgSendToKibanaFailed = "sending request to kibana failed" - ErrMsgReadKibanaResponse = "unable to read Kibana response body" - ErrUnauthorized = "Unauthorized" - TransactionSamplingRateKey = "transaction_sample_rate" + ErrMsgKibanaDisabled = "disabled Kibana configuration" + ErrMsgKibanaVersionNotCompatible = "not a compatible Kibana version" + ErrMsgNoKibanaConnection = "unable to retrieve connection to Kibana" + ErrMsgReadKibanaResponse = "unable to read Kibana response body" + ErrMsgSendToKibanaFailed = "sending request to kibana failed" + ErrUnauthorized = "Unauthorized" + TransactionSamplingRateKey = "transaction_sample_rate" +) + +var ( + errMsgKibanaDisabled = errors.New(ErrMsgKibanaDisabled) + errMsgNoKibanaConnection = errors.New(ErrMsgNoKibanaConnection) ) // KibanaMinVersion specifies the minimal required version of Kibana @@ -49,34 +59,108 @@ var KibanaMinVersion = common.MustNewVersion("7.5.0") const endpoint = "/api/apm/settings/agent-configuration/search" -// Fetcher holds static information and information shared between requests. +// Fetcher defines a common interface to retrieving agent config. +type Fetcher interface { + Fetch(context.Context, Query) (Result, error) +} + +// NewFetcher returns a new Fetcher based on the provided config. +func NewFetcher(cfg *config.Config) Fetcher { + if cfg.AgentConfigs != nil || !cfg.Kibana.Enabled { + // Direct agent configuration is present, disable communication + // with kibana. + return NewDirectFetcher(cfg.AgentConfigs) + } + var client kibana.Client + if cfg.Kibana.Enabled { + client = kibana.NewConnectingClient(&cfg.Kibana) + } + return NewKibanaFetcher(client, cfg.KibanaAgentConfig.Cache.Expiration) +} + +// KibanaFetcher holds static information and information shared between requests. // It implements the Fetch method to retrieve agent configuration information. -type Fetcher struct { +type KibanaFetcher struct { *cache logger *logp.Logger client kibana.Client } -// NewFetcher returns a Fetcher instance. -func NewFetcher(client kibana.Client, cacheExpiration time.Duration) *Fetcher { +// NewKibanaFetcher returns a KibanaFetcher instance. +func NewKibanaFetcher(client kibana.Client, cacheExpiration time.Duration) *KibanaFetcher { logger := logp.NewLogger("agentcfg") - return &Fetcher{ + return &KibanaFetcher{ client: client, logger: logger, cache: newCache(logger, cacheExpiration), } } +// ValidationError encapsulates a validation error from the KibanaFetcher. +// ValidationError implements the error interface. +type ValidationError struct { + keyword, body string + err error +} + +// Keyword returns the keyword for the ValidationError. +func (v *ValidationError) Keyword() string { return v.keyword } + +// Body returns the body for the ValidationError. +func (v *ValidationError) Body() string { return v.body } + +// Error() implements the error interface. +func (v *ValidationError) Error() string { return v.err.Error() } + +// Validate validates the currently configured KibanaFetcher. +func (f *KibanaFetcher) validate(ctx context.Context) *ValidationError { + if f.client == nil { + return &ValidationError{ + keyword: ErrMsgKibanaDisabled, + body: ErrMsgKibanaDisabled, + err: errMsgKibanaDisabled, + } + } + if supported, err := f.client.SupportsVersion(ctx, KibanaMinVersion, true); !supported { + if err != nil { + return &ValidationError{ + keyword: ErrMsgNoKibanaConnection, + body: ErrMsgNoKibanaConnection, + err: errMsgNoKibanaConnection, + } + } + + version, _ := f.client.GetVersion(ctx) + errMsg := fmt.Sprintf( + "%s: min version %+v, configured version %+v", + ErrMsgKibanaVersionNotCompatible, KibanaMinVersion, version.String(), + ) + return &ValidationError{ + keyword: ErrMsgKibanaVersionNotCompatible, + body: errMsg, + err: errors.New(errMsg), + } + } + return nil +} + // Fetch retrieves agent configuration, fetched from Kibana or a local temporary cache. -func (f *Fetcher) Fetch(ctx context.Context, query Query) (Result, error) { +func (f *KibanaFetcher) Fetch(ctx context.Context, query Query) (Result, error) { + if err := f.validate(ctx); err != nil { + return zeroResult(), err + } req := func() (Result, error) { - return newResult(f.request(ctx, convert.ToReader(query))) + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(query); err != nil { + return Result{}, err + } + return newResult(f.request(ctx, &buf)) } result, err := f.fetch(query, req) return sanitize(query.InsecureAgents, result), err } -func (f *Fetcher) request(ctx context.Context, r io.Reader) ([]byte, error) { +func (f *KibanaFetcher) request(ctx context.Context, r io.Reader) ([]byte, error) { resp, err := f.client.Send(ctx, http.MethodPost, endpoint, nil, nil, r) if err != nil { return nil, errors.Wrap(err, ErrMsgSendToKibanaFailed) @@ -107,7 +191,7 @@ func sanitize(insecureAgents []string, result Result) Result { } settings := Settings{} for k, v := range result.Source.Settings { - if utility.Contains(k, UnrestrictedSettings) { + if UnrestrictedSettings[k] { settings[k] = v } } @@ -122,3 +206,59 @@ func containsAnyPrefix(s string, prefixes []string) bool { } return false } + +type DirectFetcher struct { + cfgs []config.AgentConfig +} + +func NewDirectFetcher(cfgs []config.AgentConfig) *DirectFetcher { + return &DirectFetcher{cfgs} +} + +// Fetch finds a matching AgentConfig based on the received Query. +// Order of precedence: +// - service.name and service.environment match an AgentConfig +// - service.name matches an AgentConfig, service.environment == "" +// - service.environment matches an AgentConfig, service.name == "" +// - an AgentConfig without a name or environment set +// Return an empty result if no matching result is found. +func (f *DirectFetcher) Fetch(_ context.Context, query Query) (Result, error) { + name, env := query.Service.Name, query.Service.Environment + result := zeroResult() + var nameConf, envConf, defaultConf *config.AgentConfig + + for i, cfg := range f.cfgs { + if cfg.Service.Name == name && cfg.Service.Environment == env { + nameConf = &f.cfgs[i] + break + } else if cfg.Service.Name == name && cfg.Service.Environment == "" { + nameConf = &f.cfgs[i] + } else if cfg.Service.Name == "" && cfg.Service.Environment == env { + envConf = &f.cfgs[i] + } else if cfg.Service.Name == "" && cfg.Service.Environment == "" { + defaultConf = &f.cfgs[i] + } + } + + if nameConf != nil { + result = Result{Source{ + Settings: nameConf.Config, + Etag: nameConf.Etag, + Agent: nameConf.AgentName, + }} + } else if envConf != nil { + result = Result{Source{ + Settings: envConf.Config, + Etag: envConf.Etag, + Agent: envConf.AgentName, + }} + } else if defaultConf != nil { + result = Result{Source{ + Settings: defaultConf.Config, + Etag: defaultConf.Etag, + Agent: defaultConf.AgentName, + }} + } + + return sanitize(query.InsecureAgents, result), nil +} diff --git a/agentcfg/fetch_test.go b/agentcfg/fetch_test.go index d59bec079af..d8450f901de 100644 --- a/agentcfg/fetch_test.go +++ b/agentcfg/fetch_test.go @@ -29,49 +29,50 @@ import ( "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/apm-server/beater/config" "github.com/elastic/apm-server/kibana" - "github.com/elastic/apm-server/tests" + "github.com/elastic/apm-server/kibana/kibanatest" ) type m map[string]interface{} var ( testExpiration = time.Nanosecond - mockVersion = *common.MustNewVersion("7.3.0") + mockVersion = *common.MustNewVersion("7.5.0") ) func TestFetcher_Fetch(t *testing.T) { t.Run("ExpectationFailed", func(t *testing.T) { - kb := tests.MockKibana(http.StatusExpectationFailed, m{"error": "an error"}, mockVersion, true) - _, err := NewFetcher(kb, testExpiration).Fetch(context.Background(), query(t.Name())) + kb := kibanatest.MockKibana(http.StatusExpectationFailed, m{"error": "an error"}, mockVersion, true) + _, err := NewKibanaFetcher(kb, testExpiration).Fetch(context.Background(), query(t.Name())) require.Error(t, err) - assert.Equal(t, "{\"error\":\"an error\"}", err.Error()) + assert.Equal(t, "{\"error\":\"an error\"}"+"\n", err.Error()) }) t.Run("NotFound", func(t *testing.T) { - kb := tests.MockKibana(http.StatusNotFound, m{}, mockVersion, true) - result, err := NewFetcher(kb, testExpiration).Fetch(context.Background(), query(t.Name())) + kb := kibanatest.MockKibana(http.StatusNotFound, m{}, mockVersion, true) + result, err := NewKibanaFetcher(kb, testExpiration).Fetch(context.Background(), query(t.Name())) require.NoError(t, err) assert.Equal(t, zeroResult(), result) }) t.Run("Success", func(t *testing.T) { - kb := tests.MockKibana(http.StatusOK, mockDoc(0.5), mockVersion, true) + kb := kibanatest.MockKibana(http.StatusOK, mockDoc(0.5), mockVersion, true) b, err := json.Marshal(mockDoc(0.5)) expectedResult, err := newResult(b, err) require.NoError(t, err) - result, err := NewFetcher(kb, testExpiration).Fetch(context.Background(), query(t.Name())) + result, err := NewKibanaFetcher(kb, testExpiration).Fetch(context.Background(), query(t.Name())) require.NoError(t, err) assert.Equal(t, expectedResult, result) }) t.Run("FetchFromCache", func(t *testing.T) { - fetch := func(f *Fetcher, kibanaSamplingRate, expectedSamplingRate float64) { + fetch := func(f *KibanaFetcher, kibanaSamplingRate, expectedSamplingRate float64) { client := func(samplingRate float64) kibana.Client { - return tests.MockKibana(http.StatusOK, mockDoc(samplingRate), mockVersion, true) + return kibanatest.MockKibana(http.StatusOK, mockDoc(samplingRate), mockVersion, true) } f.client = client(kibanaSamplingRate) @@ -85,7 +86,7 @@ func TestFetcher_Fetch(t *testing.T) { assert.Equal(t, expectedResult, result) } - fetcher := NewFetcher(nil, time.Minute) + fetcher := NewKibanaFetcher(nil, time.Minute) // nothing cached yet fetch(fetcher, 0.5, 0.5) @@ -105,10 +106,12 @@ func TestSanitize(t *testing.T) { Agent: "python", Settings: Settings{"transaction_sample_rate": "0.1", "capture_body": "false"}}} // full result as not requested for an insecure agent - assert.Equal(t, input, sanitize([]string{}, input)) + res := sanitize([]string{}, input) + assert.Equal(t, input, res) // no result for insecure agent - assert.Equal(t, zeroResult(), sanitize([]string{"rum-js"}, input)) + res = sanitize([]string{"rum-js"}, input) + assert.Equal(t, zeroResult(), res) // limited result for insecure agent insecureAgents := []string{"rum-js"} @@ -123,7 +126,8 @@ func TestSanitize(t *testing.T) { // no result for insecure agent prefix insecureAgents = []string{"Python"} input.Source.Agent = "Jaeger/Python" - assert.Equal(t, zeroResult(), sanitize(insecureAgents, input)) + res = sanitize(insecureAgents, input) + assert.Equal(t, zeroResult(), res) } func TestCustomJSON(t *testing.T) { @@ -151,3 +155,209 @@ func mockDoc(sampleRate float64) m { }, } } + +func TestDirectConfigurationPrecedence(t *testing.T) { + for _, tc := range []struct { + query Query + agentConfigs []config.AgentConfig + expectedSettings map[string]string + }{ + { + query: Query{ + Service: Service{ + Name: "service1", + Environment: "production", + }, + }, + agentConfigs: []config.AgentConfig{ + { + Service: config.Service{Name: "", Environment: "production"}, + Config: map[string]string{"key1": "val2", "key2": "val2"}, + Etag: "def456", + }, + { + Service: config.Service{Name: "service1", Environment: ""}, + Config: map[string]string{"key3": "val3"}, + Etag: "abc123", + }, + { + Service: config.Service{Name: "service1", Environment: "production"}, + Config: map[string]string{"key1": "val1"}, + Etag: "abc123", + }, + }, + expectedSettings: map[string]string{ + "key1": "val1", + }, + }, + { + query: Query{ + Service: Service{ + Name: "service1", + Environment: "production", + }, + }, + agentConfigs: []config.AgentConfig{ + { + Service: config.Service{Name: "", Environment: "production"}, + Config: map[string]string{"key3": "val3"}, + Etag: "def456", + }, + { + Service: config.Service{Name: "service1", Environment: ""}, + Config: map[string]string{"key1": "val1", "key2": "val2"}, + Etag: "abc123", + }, + }, + expectedSettings: map[string]string{ + "key1": "val1", + "key2": "val2", + }, + }, + { + query: Query{ + InsecureAgents: []string{"Jaeger"}, + Service: Service{ + Name: "service1", + Environment: "production", + }, + }, + agentConfigs: []config.AgentConfig{ + { + Service: config.Service{Name: "", Environment: "production"}, + Config: map[string]string{"key3": "val3"}, + Etag: "def456", + }, + { + Service: config.Service{Name: "service1", Environment: ""}, + Config: map[string]string{"key1": "val1", "key2": "val2"}, + Etag: "abc123", + }, + }, + expectedSettings: map[string]string{}, + }, + { + query: Query{ + InsecureAgents: []string{"Jaeger"}, + Service: Service{ + Name: "service1", + Environment: "production", + }, + }, + agentConfigs: []config.AgentConfig{ + { + Service: config.Service{Name: "", Environment: "production"}, + Config: map[string]string{"key3": "val3"}, + Etag: "def456", + }, + { + Service: config.Service{Name: "service1", Environment: ""}, + AgentName: "Jaeger/Python", + Config: map[string]string{"key1": "val1", "key2": "val2", "transaction_sample_rate": "0.1"}, + Etag: "abc123", + }, + }, + expectedSettings: map[string]string{ + "transaction_sample_rate": "0.1", + }, + }, + { + query: Query{ + Service: Service{ + Name: "service1", + Environment: "production", + }, + }, + agentConfigs: []config.AgentConfig{ + { + Service: config.Service{Name: "service2", Environment: ""}, + Config: map[string]string{"key1": "val1", "key2": "val2"}, + Etag: "abc123", + }, + { + Service: config.Service{Name: "", Environment: "production"}, + Config: map[string]string{"key3": "val3"}, + Etag: "def456", + }, + }, + expectedSettings: map[string]string{ + "key3": "val3", + }, + }, + { + query: Query{ + Service: Service{ + Name: "service1", + Environment: "production", + }, + }, + agentConfigs: []config.AgentConfig{ + { + Service: config.Service{Name: "not-found", Environment: ""}, + Config: map[string]string{"key1": "val1"}, + Etag: "abc123", + }, + }, + expectedSettings: map[string]string{}, + }, + { + query: Query{ + Service: Service{ + Name: "service2", + Environment: "production", + }, + }, + agentConfigs: []config.AgentConfig{ + { + Service: config.Service{Name: "service1", Environment: ""}, + Config: map[string]string{"key1": "val1", "key2": "val2"}, + Etag: "abc123", + }, + { + Service: config.Service{Name: "service2", Environment: ""}, + Config: map[string]string{"key1": "val4", "key2": "val5"}, + Etag: "abc123", + }, + }, + expectedSettings: map[string]string{ + "key1": "val4", + "key2": "val5", + }, + }, + { + query: Query{ + Service: Service{ + Name: "service2", + Environment: "staging", + }, + }, + agentConfigs: []config.AgentConfig{ + { + Service: config.Service{Name: "service1", Environment: ""}, + Config: map[string]string{"key1": "val1", "key2": "val2"}, + Etag: "abc123", + }, + { + Service: config.Service{Name: "", Environment: "production"}, + Config: map[string]string{"key1": "val4", "key2": "val5"}, + Etag: "abc123", + }, + { + Service: config.Service{Name: "", Environment: ""}, + Config: map[string]string{"key3": "val5", "key4": "val6"}, + Etag: "abc123", + }, + }, + expectedSettings: map[string]string{ + "key3": "val5", + "key4": "val6", + }, + }, + } { + f := NewDirectFetcher(tc.agentConfigs) + result, err := f.Fetch(context.Background(), tc.query) + require.NoError(t, err) + + assert.Equal(t, Settings(tc.expectedSettings), result.Source.Settings) + } +} diff --git a/agentcfg/model.go b/agentcfg/model.go index d9916a67104..f21630376d7 100644 --- a/agentcfg/model.go +++ b/agentcfg/model.go @@ -36,7 +36,7 @@ const ( var ( // UnrestrictedSettings are settings considered safe to be returned to all requesters, // including unauthenticated ones such as RUM. - UnrestrictedSettings = []string{"transaction_sample_rate"} + UnrestrictedSettings = map[string]bool{"transaction_sample_rate": true} ) // Result models a Kibana response @@ -66,7 +66,7 @@ type Query struct { // agent requests the Etag should be set, instead of the AppliedByAgent setting. // Use this flag when building queries for third party integrations, // such as Jaeger, that do not send an Etag in their request. - MarkAsAppliedByAgent *bool `json:"mark_as_applied_by_agent,omitempty"` + MarkAsAppliedByAgent bool `json:"mark_as_applied_by_agent,omitempty"` // InsecureAgents holds a set of prefixes for restricting results to those whose // agent name matches any of the specified prefixes. diff --git a/agentcfg/reporter.go b/agentcfg/reporter.go new file mode 100644 index 00000000000..a98a631addc --- /dev/null +++ b/agentcfg/reporter.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package agentcfg + +import ( + "context" + "sync" + "time" + + "github.com/elastic/apm-server/model" + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/logp" +) + +type Reporter struct { + f Fetcher + p model.BatchProcessor + interval time.Duration + logger *logp.Logger + resultc chan Result +} + +func NewReporter(f Fetcher, batchProcessor model.BatchProcessor, interval time.Duration) Reporter { + logger := logp.NewLogger("agentcfg") + return Reporter{ + f: f, + p: batchProcessor, + interval: interval, + logger: logger, + resultc: make(chan Result), + } +} + +func (r Reporter) Fetch(ctx context.Context, query Query) (Result, error) { + result, err := r.f.Fetch(ctx, query) + if err != nil { + return Result{}, err + } + // Only report configs when the query etag == current config etag, or + // when the agent indicates it has been applied. + if query.Etag == result.Source.Etag || query.MarkAsAppliedByAgent { + select { + case <-ctx.Done(): + return Result{}, ctx.Err() + case r.resultc <- result: + } + } + + return result, err +} + +func (r Reporter) Run(ctx context.Context) error { + var wg sync.WaitGroup + defer wg.Wait() + + // applied tracks the etags of agent config that has been applied. + applied := make(map[string]struct{}) + t := time.NewTicker(r.interval) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return ctx.Err() + case result := <-r.resultc: + if _, ok := applied[result.Source.Etag]; !ok { + applied[result.Source.Etag] = struct{}{} + } + continue + case <-t.C: + } + batch := make(model.Batch, 0, len(applied)) + for etag := range applied { + batch = append(batch, model.APMEvent{ + Processor: model.MetricsetProcessor, + Labels: common.MapStr{"etag": etag}, + Metricset: &model.Metricset{ + Name: "agent_config", + Samples: map[string]model.MetricsetSample{ + "agent_config_applied": {Value: 1}, + }, + }, + }) + } + // Reset applied map, so that we report only configs applied + // during a given iteration. + applied = make(map[string]struct{}) + wg.Add(1) + go func() { + defer wg.Done() + if err := r.p.ProcessBatch(ctx, &batch); err != nil { + r.logger.Errorf("error sending applied agent configs to kibana: %v", err) + } + }() + } +} diff --git a/agentcfg/reporter_test.go b/agentcfg/reporter_test.go new file mode 100644 index 00000000000..af9bd45d0de --- /dev/null +++ b/agentcfg/reporter_test.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package agentcfg + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "golang.org/x/sync/errgroup" + + "github.com/elastic/apm-server/model" +) + +func TestReportFetch(t *testing.T) { + interval := 10 * time.Millisecond + receivedc := make(chan struct{}) + defer close(receivedc) + bp := &batchProcessor{receivedc: receivedc} + r := NewReporter(fauxFetcher{}, bp, interval) + + var g errgroup.Group + ctx, cancel := context.WithCancel(context.Background()) + g.Go(func() error { return r.Run(ctx) }) + + query1 := Query{ + Service: Service{Name: "webapp", Environment: "production"}, + Etag: "abc123", + } + query2 := Query{ + Etag: "def456", + MarkAsAppliedByAgent: true, + } + query3 := Query{ + Etag: "old-etag", + } + r.Fetch(ctx, query1) + r.Fetch(ctx, query2) + r.Fetch(ctx, query3) + <-receivedc + <-receivedc + <-receivedc + + // cancel the context to stop processing + cancel() + g.Wait() + + // We use assert.ElementsMatch because the etags may not be + // reported in exactly the same order they were fetched. + etags := make([]string, len(bp.received)) + for i, received := range bp.received { + etags[i] = received.Labels["etag"].(string) + } + assert.ElementsMatch(t, []string{"abc123", "def456"}, etags) +} + +type fauxFetcher struct{} + +func (f fauxFetcher) Fetch(_ context.Context, q Query) (Result, error) { + if q.Etag == "old-etag" { + return Result{ + Source: Source{ + Etag: "new-etag", + }, + }, nil + } + return Result{ + Source: Source{ + Etag: q.Etag, + }, + }, nil +} + +type batchProcessor struct { + receivedc chan struct{} + received []model.APMEvent + mu sync.Mutex +} + +func (p *batchProcessor) ProcessBatch(_ context.Context, b *model.Batch) error { + p.mu.Lock() + defer p.mu.Unlock() + for _, event := range *b { + p.received = append(p.received, event) + } + p.receivedc <- struct{}{} + return nil +} diff --git a/apm-server.docker.yml b/apm-server.docker.yml index de24488ba0b..70e3929cd09 100644 --- a/apm-server.docker.yml +++ b/apm-server.docker.yml @@ -6,6 +6,47 @@ apm-server: # Defines the host and port the server is listening on. Use "unix:/path/to.sock" to listen on a unix domain socket. host: "0.0.0.0:8200" + # Agent authorization configuration. If no methods are defined, all requests will be allowed. + #auth: + # Agent authorization using Elasticsearch API Keys. + #api_key: + #enabled: false + # + # Restrict how many unique API keys are allowed per minute. Should be set to at least the amount of different + # API keys configured in your monitored services. Every unique API key triggers one request to Elasticsearch. + #limit: 100 + + # Define a shared secret token for authorizing agents using the "Bearer" authorization method. + #secret_token: + + # Allow anonymous access only for specified agents and/or services. This is primarily intended to allow + # limited access for untrusted agents, such as Real User Monitoring. + #anonymous: + # By default anonymous auth is automatically enabled when either auth.api_key or + # auth.secret_token is enabled, and RUM is enabled. Otherwise, anonymous auth is + # disabled by default. + # + # When anonymous auth is enabled, only agents matching allow_agent and services + # matching allow_service are allowed. See below for details on default values for + # allow_agent. + #enabled: + + # Allow anonymous access only for specified agents. + #allow_agent: [rum-js, js-base] + + # Allow anonymous access only for specified service names. By default, all service names are allowed. + #allow_service: [] + + # Rate-limit anonymous access by IP and number of events. + #rate_limit: + # Rate limiting is defined per unique client IP address, for a limited number of IP addresses. + # Sites with many concurrent clients should consider increasing this limit. Defaults to 1000. + #ip_limit: 1000 + + # Defines the maximum amount of events allowed per IP per second. Defaults to 300. The overall + # maximum event throughput for anonymous access is (event_limit * ip_limit). + #event_limit: 300 + # Maximum permitted size in bytes of a request's header accepted by the server to be processed. #max_header_size: 1048576 @@ -27,10 +68,19 @@ apm-server: # Maximum number of new connections to accept simultaneously (0 means unlimited). #max_connections: 0 + # Custom HTTP headers to add to all HTTP responses, e.g. for security policy compliance. + #response_headers: + # X-My-Header: Contents of the header + # If true (default), APM Server captures the IP of the instrumented service # or the IP and User Agent of the real user (RUM requests). #capture_personal_data: true + # If specified, APM Server will record this value in events which have no service environment + # defined, and add it to agent configuration queries to Kibana when none is specified in the + # request from the agent. + #default_service_environment: + # Enable APM Server Golang expvar support (https://golang.org/pkg/expvar/). #expvar: #enabled: false @@ -93,12 +143,17 @@ apm-server: # Agents include the token in the following format: Authorization: Bearer . # It is recommended to use an authorization token in combination with SSL enabled, # and save the token in the apm-server keystore. + # + # WARNING: This configuration is deprecated and replaced with `apm-server.auth.secret_token`, and will be removed + # in the 8.0 release. If that config is defined, this one will be ignored. #secret_token: # Enable API key authorization by setting enabled to true. By default API key support is disabled. # Agents include a valid API key in the following format: Authorization: ApiKey . # The key must be the base64 encoded representation of the API key's "id:key". - # This is an experimental feature, use with care. + # + # WARNING: This configuration is deprecated and replaced with `apm-server.auth.api_key`, and will be removed + # in the 8.0 release. If that config is defined, this one will be ignored. #api_key: #enabled: false @@ -133,10 +188,22 @@ apm-server: #ssl.enabled: true # Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`. - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # + # Control the verification of Elasticsearch certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * strict, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. If the Subject Alternative + # Name is empty, it returns an error. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions 1.0 up to @@ -175,6 +242,10 @@ apm-server: #rum: #enabled: false + # Rate-limit RUM agents. + # + # WARNING: This configuration is deprecated and replaced with `apm-server.auth.anonymous.rate_limit`, + # and will be removed in the 8.0 release. If that config is defined, this one will be ignored. #event_rate: # Defines the maximum amount of events allowed to be sent to the APM Server RUM @@ -188,19 +259,27 @@ apm-server: #-- General RUM settings + # A list of service names to allow, to limit service-specific indices and data streams + # created for unauthenticated RUM events. + # If the list is empty, any service name is allowed. + # + # WARNING: This configuration is deprecated and replaced with `apm-server.auth.anonymous.allow_service`, + # and will be removed in the 8.0 release. If that config is defined, this one will be ignored. + #allow_service_names: [] + # A list of permitted origins for real user monitoring. # User-agents will send an origin header that will be validated against this list. # An origin is made of a protocol scheme, host and port, without the url path. # Allowed origins in this setting can have * to match anything (eg.: http://*.example.com) # If an item in the list is a single '*', everything will be allowed. - #allow_origins : ['*'] + #allow_origins: ['*'] # A list of Access-Control-Allow-Headers to allow RUM requests, in addition to "Content-Type", # "Content-Encoding", and "Accept" - #allow_headers : [] + #allow_headers: [] # Custom HTTP headers to add to RUM responses, e.g. for security policy compliance. - #response_headers : + #response_headers: # X-My-Header: Contents of the header # Regexp to be matched against a stacktrace frame's `file_name` and `abs_path` attributes. @@ -219,9 +298,15 @@ apm-server: # Sourcemapping is enabled by default. #enabled: true - # Source maps are always fetched from Elasticsearch, by default using the output.elasticsearch configuration. + # Timeout for fetching source maps. + #timeout: 5s + + # Source maps may be fetched from Elasticsearch by using the + # output.elasticsearch configuration. # A different instance must be configured when using any other output. # This setting only affects sourcemap reads - the output determines where sourcemaps are written. + # Note: Configuring elasticsearch is not supported if apm-server is being + # managed by Fleet. #elasticsearch: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (`http` and `9200`). @@ -274,10 +359,22 @@ apm-server: #ssl.enabled: true # Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`. - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # + # Control the verification of Kibana certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * strict, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. If the Subject Alternative + # Name is empty, it returns an error. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions 1.0 up to @@ -384,6 +481,12 @@ apm-server: # When enabling Jaeger integration, APM Server acts as Jaeger collector. It supports jaeger.thrift over HTTP # and gRPC. This is an experimental feature, use with care. + # + # WARNING: This configuration is deprecated, and will be removed in the 8.0 release. + # + # Jaeger gRPC is now served on the same port as Elastic APM agents, defined by the + # "apm-server.host" configuration; it is implicitly enabled, and an agent tag called + # "elastic-apm-auth" is required when auth is enabled. #jaeger: #grpc: # Set to true to enable the Jaeger gRPC collector service. @@ -607,10 +710,22 @@ output.elasticsearch: #ssl.enabled: true # Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`. - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # + # Control the verification of Elasticsearch certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * strict, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. If the Subject Alternative + # Name is empty, it returns an error. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions 1.0 up to @@ -736,10 +851,22 @@ output.elasticsearch: #ssl.enabled: false # Optional SSL configuration options. SSL is off by default. - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # + # Control the verification of Logstash certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * strict, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. If the Subject Alternative + # Name is empty, it returns an error. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions 1.0 up to @@ -883,10 +1010,22 @@ output.elasticsearch: #ssl.enabled: false # Optional SSL configuration options. SSL is off by default. - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # + # Control the verification of Kafka certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * strict, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. If the Subject Alternative + # Name is empty, it returns an error. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions 1.0 up to @@ -1055,12 +1194,11 @@ output.elasticsearch: #interval: 0 # Set to true to log messages in json format. -#logging.json: false +#logging.json: true # Set to true, to log messages with minimal required Elastic Common Schema (ECS) -# information. Recommended to use in combination with `logging.json=true` -# Defaults to false. -#logging.ecs: false +# information. Recommended to use in combination with `logging.json=true`. +#logging.ecs: true #=============================== HTTP Endpoint =============================== @@ -1150,10 +1288,22 @@ output.elasticsearch: #ssl.enabled: true # Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`. - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # + # Control the verification of Elasticsearch certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * strict, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. If the Subject Alternative + # Name is empty, it returns an error. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions 1.0 up to diff --git a/apm-server.yml b/apm-server.yml index 38a179dda99..effc133a902 100644 --- a/apm-server.yml +++ b/apm-server.yml @@ -6,6 +6,47 @@ apm-server: # Defines the host and port the server is listening on. Use "unix:/path/to.sock" to listen on a unix domain socket. host: "localhost:8200" + # Agent authorization configuration. If no methods are defined, all requests will be allowed. + #auth: + # Agent authorization using Elasticsearch API Keys. + #api_key: + #enabled: false + # + # Restrict how many unique API keys are allowed per minute. Should be set to at least the amount of different + # API keys configured in your monitored services. Every unique API key triggers one request to Elasticsearch. + #limit: 100 + + # Define a shared secret token for authorizing agents using the "Bearer" authorization method. + #secret_token: + + # Allow anonymous access only for specified agents and/or services. This is primarily intended to allow + # limited access for untrusted agents, such as Real User Monitoring. + #anonymous: + # By default anonymous auth is automatically enabled when either auth.api_key or + # auth.secret_token is enabled, and RUM is enabled. Otherwise, anonymous auth is + # disabled by default. + # + # When anonymous auth is enabled, only agents matching allow_agent and services + # matching allow_service are allowed. See below for details on default values for + # allow_agent. + #enabled: + + # Allow anonymous access only for specified agents. + #allow_agent: [rum-js, js-base] + + # Allow anonymous access only for specified service names. By default, all service names are allowed. + #allow_service: [] + + # Rate-limit anonymous access by IP and number of events. + #rate_limit: + # Rate limiting is defined per unique client IP address, for a limited number of IP addresses. + # Sites with many concurrent clients should consider increasing this limit. Defaults to 1000. + #ip_limit: 1000 + + # Defines the maximum amount of events allowed per IP per second. Defaults to 300. The overall + # maximum event throughput for anonymous access is (event_limit * ip_limit). + #event_limit: 300 + # Maximum permitted size in bytes of a request's header accepted by the server to be processed. #max_header_size: 1048576 @@ -27,10 +68,19 @@ apm-server: # Maximum number of new connections to accept simultaneously (0 means unlimited). #max_connections: 0 + # Custom HTTP headers to add to all HTTP responses, e.g. for security policy compliance. + #response_headers: + # X-My-Header: Contents of the header + # If true (default), APM Server captures the IP of the instrumented service # or the IP and User Agent of the real user (RUM requests). #capture_personal_data: true + # If specified, APM Server will record this value in events which have no service environment + # defined, and add it to agent configuration queries to Kibana when none is specified in the + # request from the agent. + #default_service_environment: + # Enable APM Server Golang expvar support (https://golang.org/pkg/expvar/). #expvar: #enabled: false @@ -93,12 +143,17 @@ apm-server: # Agents include the token in the following format: Authorization: Bearer . # It is recommended to use an authorization token in combination with SSL enabled, # and save the token in the apm-server keystore. + # + # WARNING: This configuration is deprecated and replaced with `apm-server.auth.secret_token`, and will be removed + # in the 8.0 release. If that config is defined, this one will be ignored. #secret_token: # Enable API key authorization by setting enabled to true. By default API key support is disabled. # Agents include a valid API key in the following format: Authorization: ApiKey . # The key must be the base64 encoded representation of the API key's "id:key". - # This is an experimental feature, use with care. + # + # WARNING: This configuration is deprecated and replaced with `apm-server.auth.api_key`, and will be removed + # in the 8.0 release. If that config is defined, this one will be ignored. #api_key: #enabled: false @@ -133,10 +188,22 @@ apm-server: #ssl.enabled: true # Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`. - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # + # Control the verification of Elasticsearch certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * strict, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. If the Subject Alternative + # Name is empty, it returns an error. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions 1.0 up to @@ -175,6 +242,10 @@ apm-server: #rum: #enabled: false + # Rate-limit RUM agents. + # + # WARNING: This configuration is deprecated and replaced with `apm-server.auth.anonymous.rate_limit`, + # and will be removed in the 8.0 release. If that config is defined, this one will be ignored. #event_rate: # Defines the maximum amount of events allowed to be sent to the APM Server RUM @@ -188,19 +259,27 @@ apm-server: #-- General RUM settings + # A list of service names to allow, to limit service-specific indices and data streams + # created for unauthenticated RUM events. + # If the list is empty, any service name is allowed. + # + # WARNING: This configuration is deprecated and replaced with `apm-server.auth.anonymous.allow_service`, + # and will be removed in the 8.0 release. If that config is defined, this one will be ignored. + #allow_service_names: [] + # A list of permitted origins for real user monitoring. # User-agents will send an origin header that will be validated against this list. # An origin is made of a protocol scheme, host and port, without the url path. # Allowed origins in this setting can have * to match anything (eg.: http://*.example.com) # If an item in the list is a single '*', everything will be allowed. - #allow_origins : ['*'] + #allow_origins: ['*'] # A list of Access-Control-Allow-Headers to allow RUM requests, in addition to "Content-Type", # "Content-Encoding", and "Accept" - #allow_headers : [] + #allow_headers: [] # Custom HTTP headers to add to RUM responses, e.g. for security policy compliance. - #response_headers : + #response_headers: # X-My-Header: Contents of the header # Regexp to be matched against a stacktrace frame's `file_name` and `abs_path` attributes. @@ -219,9 +298,15 @@ apm-server: # Sourcemapping is enabled by default. #enabled: true - # Source maps are always fetched from Elasticsearch, by default using the output.elasticsearch configuration. + # Timeout for fetching source maps. + #timeout: 5s + + # Source maps may be fetched from Elasticsearch by using the + # output.elasticsearch configuration. # A different instance must be configured when using any other output. # This setting only affects sourcemap reads - the output determines where sourcemaps are written. + # Note: Configuring elasticsearch is not supported if apm-server is being + # managed by Fleet. #elasticsearch: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (`http` and `9200`). @@ -274,10 +359,22 @@ apm-server: #ssl.enabled: true # Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`. - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # + # Control the verification of Kibana certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * strict, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. If the Subject Alternative + # Name is empty, it returns an error. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions 1.0 up to @@ -384,6 +481,12 @@ apm-server: # When enabling Jaeger integration, APM Server acts as Jaeger collector. It supports jaeger.thrift over HTTP # and gRPC. This is an experimental feature, use with care. + # + # WARNING: This configuration is deprecated, and will be removed in the 8.0 release. + # + # Jaeger gRPC is now served on the same port as Elastic APM agents, defined by the + # "apm-server.host" configuration; it is implicitly enabled, and an agent tag called + # "elastic-apm-auth" is required when auth is enabled. #jaeger: #grpc: # Set to true to enable the Jaeger gRPC collector service. @@ -607,10 +710,22 @@ output.elasticsearch: #ssl.enabled: true # Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`. - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # + # Control the verification of Elasticsearch certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * strict, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. If the Subject Alternative + # Name is empty, it returns an error. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions 1.0 up to @@ -736,10 +851,22 @@ output.elasticsearch: #ssl.enabled: false # Optional SSL configuration options. SSL is off by default. - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # + # Control the verification of Logstash certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * strict, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. If the Subject Alternative + # Name is empty, it returns an error. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions 1.0 up to @@ -883,10 +1010,22 @@ output.elasticsearch: #ssl.enabled: false # Optional SSL configuration options. SSL is off by default. - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # + # Control the verification of Kafka certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * strict, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. If the Subject Alternative + # Name is empty, it returns an error. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions 1.0 up to @@ -1055,12 +1194,11 @@ output.elasticsearch: #interval: 0 # Set to true to log messages in json format. -#logging.json: false +#logging.json: true # Set to true, to log messages with minimal required Elastic Common Schema (ECS) -# information. Recommended to use in combination with `logging.json=true` -# Defaults to false. -#logging.ecs: false +# information. Recommended to use in combination with `logging.json=true`. +#logging.ecs: true #=============================== HTTP Endpoint =============================== @@ -1150,10 +1288,22 @@ output.elasticsearch: #ssl.enabled: true # Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`. - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. + # + # Control the verification of Elasticsearch certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * strict, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. If the Subject Alternative + # Name is empty, it returns an error. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions 1.0 up to diff --git a/apmpackage/README.md b/apmpackage/README.md new file mode 100644 index 00000000000..52559087d17 --- /dev/null +++ b/apmpackage/README.md @@ -0,0 +1,129 @@ +## Developer documentation + +### ~Requirements + +- Checkout `elastic/package-registry`, `elastic/package-storage` and `elastic/beats` +- Have `elastic/package-spec` at hand + +### Guide + +#### Update / fix a package + +1. Actual work + - Make changes in `apmpackage/apm` and/or code as needed + - Run `make update`. That will update fields, pipeline and doc files based on apm-server fields and pipeline defs. + +2. Run the registry + - Checkout a fresh master from the registry and run `mage build` + - Copy `apmpackage/apm` in apm-server to `build/package-storage/packages/apm/` in the registry + - `go run .` + +3. Run the stack + - Update Kibana settings with `xpack.fleet.registryUrl: http://localhost:8080` + - Start Kibana and Elasticsearch with X-Pack enabled. One way to do it is with a local Kibana: + - `yarn es snapshot --license trial --ssl -E xpack.security.authc.api_key.enabled=true` + - `yarn start --ssl` + +4. Test + - Go to the Fleet UI, install the integration and test what you need. You generally will want to have a look at the + installed assets (ie. templates and pipelines), and the generated `apm` input in the policy. + - If you need to change the package, you *must* remove the installed integration first. You can use the UI + or the API, eg: `curl -X DELETE -k -u elastic:changeme https://localhost:5601/abc/api/fleet/epm/packages/apm-0.1.0 -H 'kbn-xsrf: xyz'` + See [API docs](https://github.com/elastic/kibana/tree/master/x-pack/plugins/fleet/dev_docs/api) for details. + You normally don't need to restart the registry (an exception to this is eg. if you change a `hbs` template file). + +5. Upload to the snapshot registry + - When everything works and `apmpackage/apm/` changes have been merged to `master`, copy the new package to + `package-storage/packages/apm/` in the `package-storage` repo, `snapshot` branch. + Do *NOT* override any existing packages. Instead, bump the qualifier version (eg: `0.1.0-dev.1` to `0.1.0-dev.2`) + both in the folder name and the content (`manifest.yml` and `default.json` pipelines) + - You can `cd script && python copy_package.py` for this. + +#### Create a new package version + +Follow steps described above, except: + +1. New local version + - Bump the version in `apmpackage/apm/manifest.yml` + - Then do any changes in the new folder. The rest of the steps are the same. + +2. First dev version + - When copying to the `package-storage`, create the first version qualifier instead of bumping the last one. + Eg: `apm/0.2.0` -> `apm/0.2.0-dev.1` + + +#### Run the Elastic Agent + +If you do code changes or a whole new version, you need to run the Elastic Agent locally. +Most of the work here is done in `beats/x-pack/elastic-agent` + +0. Optional: Update the spec + + The spec informs whether the Elastic Agent should or should not start apm-server based on the policy file, + and what settings to pass via GRPC call. + - Edit `spec/apm-server.yml` + - `mage update` + +1. Build / Package + + *Always* + - `mage clean` + + *First time* + - `DEV=true PLATFORMS=darwin mage package` (replace platform as needed) + - Untar `build/distributions` contents + + *Every time after* + - `DEV=true mage build` + - Copy `build/elastic-agent` to `build/distributions/elastic-agent--/data/elastic-agent-/` + + *Snapshots* + - If you need the Elastic Agent to grab the snapshot apm-server artifact, prepend `SNAPSHOT=true` to the `mage` command + - Note: as of 14/12/20 `SNAPSHOT=true mage package` is broken for some of us, but `SNAPSHOT=true mage build` works fine + +2. Optional: Override policy / apm-server + - Use the right `elastic-agent.yml` policy + + It might be one you just generated with the UI, or one you have at hand with an apm input. + Copy to `build/distributions/elastic-agent--/elastic-agent.yml` + + - Override apm-server in `install` and `downloads` folders. Approximately: + ``` + # compile apm-server + cd ~//apm-server + make && make update + + # tar and compress + cp build/fields/fields.yml . + tar cvf apm-server--.tar apm-server LICENSE.txt NOTICE.txt README.md apm-server.yml ingest fields.yml + gzip apm-server--.tar + sha512sum apm-server--.tar.gz | tee apm-server--.tar.gz.sha512 + + # delete old stuff + cd ~//beats/x-pack/elastic-agent/build/distributions/elastic-agent--/data/elastic-agent-/downloads + rm apm* + rm -rf ../install/apm* + + # copy new files + mv /apm-server--.tar* . + mkdir -p ../install/apm-server-- + tar zxvf apm-server-- -C ../install/apm-server-- + ``` +3. Run the Elastic Agent + - `./build/distributions//elastic-agent -e` + - Check apm-server logs at `build/distributions//data//logs/default` + + (The last default in the path comes from the namespace in the policy) + +#### Promote a package + +Generally it should be done between FF and release. +1. Remove the qualifier version from the package +2. Push to the corresponding production branch(es) + + +### Caveats + +Fleet is under active development and this guide might become obsolete quickly. + +Take everything with a grain of salt. diff --git a/apmpackage/apm/README.template.md b/apmpackage/apm/README.template.md new file mode 100644 index 00000000000..e1a31ffee8f --- /dev/null +++ b/apmpackage/apm/README.template.md @@ -0,0 +1,107 @@ +# APM Integration + +The APM integration installs Elasticsearch templates and ingest node pipelines for APM data. + +### Quick start + +Ready to jump in? Read the [APM quick start](https://ela.st/quick-start-apm). + +### How to use this integration + +Add the APM integration to an Elastic Agent policy to create an `apm` input. +Any Elastic Agents set up with this policy will run an APM Server binary locally. +Don't forget to configure the APM Server `host` if it needs to be accessed from outside, like when running in Docker. +Then, configure your APM agents to communicate with APM Server. + +If you have Real User Monitoring (RUM) enabled, you must run Elastic Agent centrally. +Otherwise, you can run it on edge machines by downloading and installing Elastic Agent +on the same machines that your instrumented services run. + +#### Data Streams + +When using the APM integration, apm events are indexed into data streams. Data stream names contain the event type, +service name, and a user-configurable namespace. + +There is no specific recommendation for what to use as a namespace; it is intentionally flexible. +You might use the environment, like `production`, `testing`, or `development`, +or you could namespace data by business unit. It is your choice. + +See [APM data streams](https://ela.st/apm-data-streams) for more information. + +## Compatibility and limitations + +The APM integration requires Kibana v7.12 and Elasticsearch with at least the basic license. +This version is experimental and has some limitations, listed bellow: + +- Sourcemaps need to be uploaded to Elasticsearch directly. +- You need to create specific API keys for sourcemaps and central configuration. +- You can't use an Elastic Agent enrolled before 7.12. +- Not all settings are supported. +- The `apm` templates, pipelines, and ILM settings that ship with this integration cannot be configured or changed with Fleet; +changes must be made with Elasticsearch APIs or Kibana's Stack Management. + +See [APM integration limitations](https://ela.st/apm-integration-limitations) for more information. + +IMPORTANT: If you run APM Server with Elastic Agent manually in standalone mode, you must install the APM integration before ingestion starts. + +## Traces + +Traces are comprised of [spans and transactions](https://www.elastic.co/guide/en/apm/get-started/current/apm-data-model.html). +Traces are written to `traces-apm.*` indices. + +**Exported Fields** + +| Field | Description | Type | ECS | +|---|---|---|:---:| +{{range .Traces -}} +| {{- Trim .Name | EscapeMarkdown -}} | {{- Trim .Description | EscapeMarkdown -}} | {{- Trim .Type | EscapeMarkdown -}} | {{if .IsECS}} ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) {{else}} ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) {{end}} | +{{end}} + +#### Examples + +```json +{{.TransactionExample}} +``` + +```json +{{.SpanExample}} +``` + + +## Metrics + +Metrics include application-based metrics and some basic system metrics. +Metrics are written to `metrics-apm.app.*`, `metrics-apm.internal.*`, and `metrics-apm.profiling.*` indices. + +**Exported Fields** + +| Field | Description | Type | ECS | +|---|---|---|:---:| +{{range .Metrics -}} +| {{- Trim .Name | EscapeMarkdown -}} | {{- Trim .Description | EscapeMarkdown -}} | {{- Trim .Type | EscapeMarkdown -}} | {{if .IsECS}} ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) {{else}} ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) {{end}} | +{{end}} + +### Example + +```json +{{.MetricsExample}} +``` + +## Logs + +Logs are application error events. +Logs are written to `logs-apm.error.*` indices. + +**Exported Fields** + +| Field | Description | Type | ECS | +|---|---|---|:---:| +{{range .Logs -}} +| {{- Trim .Name | EscapeMarkdown -}} | {{- Trim .Description | EscapeMarkdown -}} | {{- Trim .Type | EscapeMarkdown -}} | {{if .IsECS}} ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) {{else}} ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) {{end}} | +{{end}} + +### Example + +```json +{{.ErrorExample}} +``` diff --git a/apmpackage/apm/agent/input/template.yml.hbs b/apmpackage/apm/agent/input/template.yml.hbs new file mode 100644 index 00000000000..aadb595aa78 --- /dev/null +++ b/apmpackage/apm/agent/input/template.yml.hbs @@ -0,0 +1,65 @@ +apm-server: + auth: + anonymous: + allow_agent: + {{#each anonymous_allow_agent}} + - {{this}} + {{/each}} + allow_service: + {{#each anonymous_allow_service}} + - {{this}} + {{/each}} + enabled: {{anonymous_enabled}} + rate_limit: + event_limit: {{anonymous_rate_limit_event_limit}} + ip_limit: {{anonymous_rate_limit_ip_limit}} + api_key: + enabled: {{api_key_enabled}} + limit: {{api_key_limit}} + secret_token: {{secret_token}} + capture_personal_data: {{capture_personal_data}} + idle_timeout: {{idle_timeout}} + default_service_environment: {{default_service_environment}} + expvar.enabled: {{expvar_enabled}} + host: {{host}} + max_connections: {{max_connections}} + max_event_size: {{max_event_bytes}} + max_header_size: {{max_header_bytes}} + read_timeout: {{read_timeout}} + response_headers: {{response_headers}} + {{#if enable_rum}} + rum: + allow_headers: + {{#each rum_allow_headers}} + - {{this}} + {{/each}} + allow_origins: + {{#each rum_allow_origins}} + - {{this}} + {{/each}} + enabled: {{enable_rum}} + exclude_from_grouping: {{rum_exclude_from_grouping}} + library_pattern: {{rum_library_pattern}} + response_headers: {{rum_response_headers}} + {{/if}} + shutdown_timeout: {{shutdown_timeout}} + {{#if tls_enabled}} + ssl: + enabled: {{tls_enabled}} + certificate: {{tls_certificate}} + key: {{tls_key}} + key_passphrase: {{tls_key_passphrase}} + supported_protocols: + {{#each tls_supported_protocols}} + - {{this}} + {{/each}} + cipher_suites: + {{#each tls_cipher_suites}} + - {{this}} + {{/each}} + curve_types: + {{#each tls_curve_types}} + - {{this}} + {{/each}} + {{/if}} + write_timeout: {{write_timeout}} diff --git a/apmpackage/apm/changelog.yml b/apmpackage/apm/changelog.yml new file mode 100644 index 00000000000..6c7445ababd --- /dev/null +++ b/apmpackage/apm/changelog.yml @@ -0,0 +1,40 @@ +# newer versions go on top +# +# change type can be one of: enhancement, bugfix, breaking-change +- version: "0.5.0" + changes: + - description: define index sorting for internal metrics + type: enhancement + link: https://github.com/elastic/apm-server/pull/6116 + - description: add histogram dynamic_template to app metrics data stream + type: enhancement + link: https://github.com/elastic/apm-server/pull/6043 +- version: "0.4.0" + changes: + - description: add anonymous auth config, replace some RUM config + type: breaking-change + link: https://github.com/elastic/apm-server/pull/5623 + - description: use new apm-server.auth config + type: breaking-change + link: https://github.com/elastic/apm-server/pull/5691 +- version: "0.3.0" + changes: + - description: added apm-server.url config + type: enhancement + link: https://github.com/elastic/apm-server/pull/5332 + - description: removed apm-server.kibana.api_key config + type: enhancement + link: https://github.com/elastic/apm-server/pull/5380 +- version: "0.2.0" + changes: + - description: added support for apm-server.rum.allow_service_names + type: enhancement + link: https://github.com/elastic/apm-server/pull/5030 + - description: introduce a configurable default service environment + type: enhancement + link: https://github.com/elastic/apm-server/pull/4861 +- version: "0.1.0" + changes: + - description: initial release + type: enhancement + link: https://github.com/elastic/apm-server/ diff --git a/apmpackage/apm/data_stream/app_metrics/elasticsearch/ilm/default_policy.json b/apmpackage/apm/data_stream/app_metrics/elasticsearch/ilm/default_policy.json new file mode 100644 index 00000000000..97262a5ede9 --- /dev/null +++ b/apmpackage/apm/data_stream/app_metrics/elasticsearch/ilm/default_policy.json @@ -0,0 +1,26 @@ +{ + "policy": { + "phases": { + "warm": { + "min_age": "30d", + "actions": { + "readonly": {}, + "set_priority": { + "priority": 50 + } + } + }, + "hot": { + "actions": { + "rollover": { + "max_age": "30d", + "max_size": "50gb" + }, + "set_priority": { + "priority": 100 + } + } + } + } + } +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/app_metrics/elasticsearch/ingest_pipeline/apm_data_stream_migration.json b/apmpackage/apm/data_stream/app_metrics/elasticsearch/ingest_pipeline/apm_data_stream_migration.json new file mode 100644 index 00000000000..24218404aaf --- /dev/null +++ b/apmpackage/apm/data_stream/app_metrics/elasticsearch/ingest_pipeline/apm_data_stream_migration.json @@ -0,0 +1,30 @@ +{ + "description": "Migrate APM events to data streams", + "processors": [ + { + "script": { + "if": "ctx.processor?.event == 'span' || ctx.processor?.event == 'transaction'", + "source": "ctx.data_stream = [\"type\": \"traces\", \"dataset\": \"apm\", \"namespace\": \"migrated\"]\n" + } + }, + { + "script": { + "if": "ctx.processor?.event == 'error'", + "source": "ctx.data_stream = [\"type\": \"logs\", \"dataset\": \"apm.error\", \"namespace\": \"migrated\"]\n" + } + }, + { + "script": { + "if": "ctx.processor?.event == 'metric'", + "source": "String dataset;\nif (ctx[\"metricset.name\"] != \"app\") {\n dataset = \"apm.internal\";\n} else {\n String serviceName = ctx.service.name;\n serviceName = serviceName.toLowerCase();\n serviceName = /[\\\\\\/*?\"<>| ,#:-]/.matcher(serviceName).replaceAll('_');\n dataset = \"apm.app.\" + serviceName;\n}\nctx.data_stream = [\"type\": \"metrics\", \"dataset\": dataset, \"namespace\": \"migrated\"];\n" + } + }, + { + "set": { + "if": "ctx.data_stream != null", + "field": "_index", + "value": "{{data_stream.type}}-{{data_stream.dataset}}-{{data_stream.namespace}}" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/app_metrics/elasticsearch/ingest_pipeline/apm_error_grouping_name.json b/apmpackage/apm/data_stream/app_metrics/elasticsearch/ingest_pipeline/apm_error_grouping_name.json new file mode 100644 index 00000000000..0969e60d739 --- /dev/null +++ b/apmpackage/apm/data_stream/app_metrics/elasticsearch/ingest_pipeline/apm_error_grouping_name.json @@ -0,0 +1,18 @@ +{ + "description": "Set error.grouping_name for APM error events", + "processors": [ + { + "script": { + "source": "ctx.error.grouping_name = ctx.error.exception[0].message", + "if": "ctx.error?.exception?.length != null && ctx.error?.exception?.length > 0" + } + }, + { + "set": { + "field": "error.grouping_name", + "value": "{{error.log.message}}", + "if": "ctx.error?.log?.message != null" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/app_metrics/elasticsearch/ingest_pipeline/apm_ingest_timestamp.json b/apmpackage/apm/data_stream/app_metrics/elasticsearch/ingest_pipeline/apm_ingest_timestamp.json new file mode 100644 index 00000000000..a4df9734715 --- /dev/null +++ b/apmpackage/apm/data_stream/app_metrics/elasticsearch/ingest_pipeline/apm_ingest_timestamp.json @@ -0,0 +1,12 @@ +{ + "description": "Add an ingest timestamp for APM events", + "processors": [ + { + "set": { + "if": "ctx.processor?.event != 'span'", + "field": "event.ingested", + "value": "{{_ingest.timestamp}}" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/app_metrics/elasticsearch/ingest_pipeline/apm_metrics_dynamic_template.json b/apmpackage/apm/data_stream/app_metrics/elasticsearch/ingest_pipeline/apm_metrics_dynamic_template.json new file mode 100644 index 00000000000..ec15aecf007 --- /dev/null +++ b/apmpackage/apm/data_stream/app_metrics/elasticsearch/ingest_pipeline/apm_metrics_dynamic_template.json @@ -0,0 +1,11 @@ +{ + "description": "Set dynamic_templates for application metrics", + "processors": [ + { + "script": { + "if": "ctx._metric_descriptions != null", + "source": "Map dynamic_templates = new HashMap();\nfor (entry in ctx._metric_descriptions.entrySet()) {\n String name = entry.getKey();\n Map description = entry.getValue();\n String metric_type = description.type;\n if (metric_type == \"histogram\") {\n dynamic_templates[name] = \"histogram\";\n }\n}\nctx._dynamic_templates = dynamic_templates;\nctx.remove(\"_metric_descriptions\");\n" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/app_metrics/elasticsearch/ingest_pipeline/apm_remove_span_metadata.json b/apmpackage/apm/data_stream/app_metrics/elasticsearch/ingest_pipeline/apm_remove_span_metadata.json new file mode 100644 index 00000000000..52e108a3472 --- /dev/null +++ b/apmpackage/apm/data_stream/app_metrics/elasticsearch/ingest_pipeline/apm_remove_span_metadata.json @@ -0,0 +1,25 @@ +{ + "description": "Removes metadata fields available already on the parent transaction, to save storage", + "processors": [ + { + "remove": { + "if": "ctx.processor?.event == 'span'", + "field": [ + "host", + "process", + "user", + "user_agent", + "container", + "kubernetes", + "service.node", + "service.version", + "service.language", + "service.runtime", + "service.framework" + ], + "ignore_missing": true, + "ignore_failure": true + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/app_metrics/elasticsearch/ingest_pipeline/apm_user_agent.json b/apmpackage/apm/data_stream/app_metrics/elasticsearch/ingest_pipeline/apm_user_agent.json new file mode 100644 index 00000000000..bdab7d4bbac --- /dev/null +++ b/apmpackage/apm/data_stream/app_metrics/elasticsearch/ingest_pipeline/apm_user_agent.json @@ -0,0 +1,13 @@ +{ + "description": "Add user agent information for APM events", + "processors": [ + { + "user_agent": { + "field": "user_agent.original", + "target_field": "user_agent", + "ignore_missing": true, + "ignore_failure": true + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/app_metrics/elasticsearch/ingest_pipeline/apm_user_geo.json b/apmpackage/apm/data_stream/app_metrics/elasticsearch/ingest_pipeline/apm_user_geo.json new file mode 100644 index 00000000000..4f6ae66105b --- /dev/null +++ b/apmpackage/apm/data_stream/app_metrics/elasticsearch/ingest_pipeline/apm_user_geo.json @@ -0,0 +1,22 @@ +{ + "description": "Add user geo information for APM events", + "processors": [ + { + "geoip": { + "database_file": "GeoLite2-City.mmdb", + "field": "client.ip", + "target_field": "client.geo", + "ignore_missing": true, + "on_failure": [ + { + "remove": { + "field": "client.ip", + "ignore_missing": true, + "ignore_failure": true + } + } + ] + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/app_metrics/elasticsearch/ingest_pipeline/default.json b/apmpackage/apm/data_stream/app_metrics/elasticsearch/ingest_pipeline/default.json new file mode 100644 index 00000000000..0daeb454a8c --- /dev/null +++ b/apmpackage/apm/data_stream/app_metrics/elasticsearch/ingest_pipeline/default.json @@ -0,0 +1,37 @@ +{ + "description": "Default enrichment for APM events", + "processors": [ + { + "pipeline": { + "name": "metrics-apm.app-0.5.0-apm_ingest_timestamp" + } + }, + { + "pipeline": { + "name": "metrics-apm.app-0.5.0-apm_user_agent" + } + }, + { + "pipeline": { + "name": "metrics-apm.app-0.5.0-apm_user_geo" + } + }, + { + "pipeline": { + "name": "metrics-apm.app-0.5.0-apm_remove_span_metadata" + } + }, + { + "pipeline": { + "name": "metrics-apm.app-0.5.0-apm_error_grouping_name", + "if": "ctx.processor?.event == 'error'" + } + }, + { + "pipeline": { + "name": "metrics-apm.app-0.5.0-apm_metrics_dynamic_template", + "if": "ctx.processor?.event == 'metric'" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/app_metrics/fields/base-fields.yml b/apmpackage/apm/data_stream/app_metrics/fields/base-fields.yml new file mode 100644 index 00000000000..bf9b70e13de --- /dev/null +++ b/apmpackage/apm/data_stream/app_metrics/fields/base-fields.yml @@ -0,0 +1,15 @@ +- name: '@timestamp' + type: date + description: Event timestamp. +- name: data_stream.type + type: constant_keyword + description: Data stream type. +- name: data_stream.dataset + type: constant_keyword + description: Data stream dataset. +- name: data_stream.namespace + type: constant_keyword + description: Data stream namespace. +- name: ecs.version + type: keyword + description: ECS version the event conforms to. diff --git a/apmpackage/apm/data_stream/app_metrics/fields/ecs.yml b/apmpackage/apm/data_stream/app_metrics/fields/ecs.yml new file mode 100644 index 00000000000..5a95c5dc42b --- /dev/null +++ b/apmpackage/apm/data_stream/app_metrics/fields/ecs.yml @@ -0,0 +1,329 @@ +- name: agent + type: group + fields: + - name: ephemeral_id + type: keyword + description: | + The Ephemeral ID identifies a running process. + - name: name + type: keyword + description: | + Name of the agent used. + - name: version + type: keyword + description: | + Version of the agent used. +- name: client + type: group + fields: + - name: domain + type: keyword + description: | + Client domain. + ignore_above: 1024 + - name: ip + type: ip + description: | + IP address of the client of a recorded event. This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + - name: port + type: long + description: | + Port of the client. +- name: cloud + title: Cloud + type: group + description: | + Cloud metadata reported by agents + fields: + - name: account + type: group + fields: + - name: id + level: extended + type: keyword + description: Cloud account ID + ignore_above: 1024 + - name: name + level: extended + type: keyword + description: Cloud account name + ignore_above: 1024 + - name: availability_zone + level: extended + type: keyword + description: Cloud availability zone name + ignore_above: 1024 + - name: instance + type: group + fields: + - name: id + level: extended + type: keyword + description: Cloud instance/machine ID + ignore_above: 1024 + - name: name + level: extended + type: keyword + description: Cloud instance/machine name + ignore_above: 1024 + - name: machine + type: group + fields: + - name: type + level: extended + type: keyword + description: Cloud instance/machine type + ignore_above: 1024 + - name: project + type: group + fields: + - name: id + level: extended + type: keyword + description: Cloud project ID + ignore_above: 1024 + - name: name + level: extended + type: keyword + description: Cloud project name + ignore_above: 1024 + - name: provider + level: extended + type: keyword + description: Cloud provider name + ignore_above: 1024 + - name: region + level: extended + type: keyword + description: Cloud region name + ignore_above: 1024 + - name: service + type: group + fields: + - name: name + level: extended + type: keyword + description: | + Cloud service name, intended to distinguish services running on different platforms within a provider. + ignore_above: 1024 +- name: container + title: Container + type: group + description: | + Container fields are used for meta information about the specific container that is the source of information. These fields help correlate data based containers from any runtime. + fields: + - name: id + type: keyword + description: | + Unique container id. +- name: destination + title: Destination + type: group + description: |- + Destination fields describe details about the destination of a packet/event. + Destination fields are usually populated in conjunction with source fields. + fields: + - name: address + level: extended + type: keyword + description: Some event destination addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. + ignore_above: 1024 + - name: ip + level: core + type: ip + description: IP addess of the destination. Can be one of multiple IPv4 or IPv6 addresses. + - name: port + level: core + type: long + format: string + description: Port of the destination. +- name: host + type: group + description: | + Optional host fields. + fields: + - name: architecture + type: keyword + description: | + The architecture of the host the event was recorded on. + - name: hostname + type: keyword + description: | + The hostname of the host the event was recorded on. + - name: ip + type: ip + description: | + IP of the host that records the event. + - name: name + type: keyword + description: | + Name of the host the event was recorded on. It can contain same information as host.hostname or a name specified by the user. + - name: os + title: Operating System + type: group + description: | + The OS fields contain information about the operating system. + fields: + - name: platform + type: keyword + description: | + The platform of the host the event was recorded on. +- name: labels + type: object + description: | + A flat mapping of user-defined labels with string, boolean or number values. + dynamic: true + object_type_params: + - object_type: keyword + - object_type: boolean + - object_type: scaled_float + scaling_factor: 1000000 +- name: observer + type: group + fields: + - name: hostname + type: keyword + description: | + Hostname of the APM Server. + - name: type + type: keyword + description: | + The type will be set to `apm-server`. + - name: version + type: keyword + description: | + APM Server version. +- name: process + type: group + description: | + Information pertaining to the running process where the data was collected + fields: + - name: args + level: extended + type: keyword + description: | + Process arguments. May be filtered to protect sensitive information. + - name: pid + type: long + description: | + Numeric process ID of the service process. + - name: ppid + type: long + description: | + Numeric ID of the service's parent process. + - name: title + type: keyword + description: | + Service process title. +- name: service + type: group + description: | + Service fields. + fields: + - name: name + type: keyword + description: | + Immutable name of the service emitting this event. + - name: node + type: group + fields: + - name: name + type: keyword + description: | + Unique meaningful name of the service node. + - name: version + type: keyword + description: | + Version of the service emitting this event. +- name: source + type: group + fields: + - name: domain + type: keyword + description: | + Source domain. + ignore_above: 1024 + - name: ip + type: ip + description: | + IP address of the source of a recorded event. This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + - name: port + type: long + description: | + Port of the source. +- name: user + type: group + fields: + - name: email + type: keyword + description: | + Email of the logged in user. + - name: id + type: keyword + description: | + Identifier of the logged in user. + - name: name + type: keyword + description: | + The username of the logged in user. +- name: user_agent + title: User agent + type: group + description: | + The user_agent fields normally come from a browser request. They often show up in web service logs coming from the parsed user agent string. + fields: + - name: device + title: Device + type: group + description: | + Information concerning the device. + fields: + - name: name + type: keyword + description: | + Name of the device. + - name: name + type: keyword + description: | + Name of the user agent. + - name: original + type: keyword + description: | + Unparsed version of the user_agent. + multi_fields: + - name: text + type: text + - name: os + title: Operating System + type: group + description: | + The OS fields contain information about the operating system. + fields: + - name: family + type: keyword + description: | + OS family (such as redhat, debian, freebsd, windows). + - name: full + type: keyword + description: | + Operating system name, including the version or code name. + - name: kernel + type: keyword + description: | + Operating system kernel version as a raw string. + - name: name + type: keyword + description: | + Operating system name, without the version. + - name: platform + type: keyword + description: | + Operating system platform (such centos, ubuntu, windows). + - name: version + type: keyword + description: | + Operating system version as a raw string. + - name: version + type: keyword + description: | + Version of the user agent. diff --git a/apmpackage/apm/data_stream/app_metrics/fields/fields.yml b/apmpackage/apm/data_stream/app_metrics/fields/fields.yml new file mode 100644 index 00000000000..8ff494dc7af --- /dev/null +++ b/apmpackage/apm/data_stream/app_metrics/fields/fields.yml @@ -0,0 +1,297 @@ +- name: kubernetes + title: Kubernetes + type: group + description: | + Kubernetes metadata reported by agents + fields: + - name: namespace + type: keyword + description: | + Kubernetes namespace + - name: node + type: group + fields: + - name: name + type: keyword + description: | + Kubernetes node name + - name: pod + type: group + fields: + - name: name + type: keyword + description: | + Kubernetes pod name + - name: uid + type: keyword + description: | + Kubernetes Pod UID +- name: metricset + type: group + fields: + - name: name + type: keyword + description: | + Name of the set of metrics. +- name: metricset.period + type: long + description: Current data collection period for this event in milliseconds. + unit: ms +- name: network + type: group + description: | + Optional network fields + fields: + - name: carrier + type: group + description: | + Network operator + fields: + - name: icc + type: keyword + description: | + ISO country code, eg. US + - name: mcc + type: keyword + description: | + Mobile country code + - name: mnc + type: keyword + description: | + Mobile network code + - name: name + type: keyword + description: | + Carrier name, eg. Vodafone, T-Mobile, etc. + - name: connection + type: group + description: | + Network connection details + fields: + - name: subtype + type: keyword + description: | + Detailed network connection sub-type, e.g. "LTE", "CDMA" + - name: type + type: keyword + description: | + Network connection type, eg. "wifi", "cell" +- name: observer + type: group + fields: + - name: ephemeral_id + type: keyword + description: | + Ephemeral identifier of the APM Server. + - name: id + type: keyword + description: | + Unique identifier of the APM Server. + - name: listening + type: keyword + description: | + Address the server is listening on. + - name: version_major + type: byte + description: | + Major version number of the observer +- name: processor.event + type: keyword + description: Processor event. +- name: processor.name + type: keyword + description: Processor name. +- name: service + type: group + description: | + Service fields. + fields: + - name: environment + type: keyword + description: | + Service environment. + - name: framework + type: group + fields: + - name: name + type: keyword + description: | + Name of the framework used. + - name: version + type: keyword + description: | + Version of the framework used. + - name: language + type: group + fields: + - name: name + type: keyword + description: | + Name of the programming language used. + - name: version + type: keyword + description: | + Version of the programming language used. + - name: runtime + type: group + fields: + - name: name + type: keyword + description: | + Name of the runtime used. + - name: version + type: keyword + description: | + Version of the runtime used. +- name: system + type: group + description: | + `system` contains local system metrics. + fields: + - name: cpu + type: group + description: | + `cpu` contains local CPU stats. + fields: + - name: total.norm.pct + type: scaled_float + format: percent + description: | + The percentage of CPU time spent by the process since the last event. This value is normalized by the number of CPU cores and it ranges from 0 to 100%. + metric_type: gauge + unit: percent + - name: memory + type: group + description: | + `memory` contains local memory stats. + fields: + - name: actual + type: group + description: | + Actual memory used and free. + fields: + - name: free + type: long + format: bytes + description: | + Actual free memory in bytes. It is calculated based on the OS. On Linux it consists of the free memory plus caches and buffers. On OSX it is a sum of free memory and the inactive memory. On Windows, it is equal to `system.memory.free`. + metric_type: gauge + unit: byte + - name: total + type: long + format: bytes + description: | + Total memory. + metric_type: gauge + unit: byte + - name: process + type: group + description: | + `process` contains process metadata, CPU metrics, and memory metrics. + fields: + - name: cgroup + type: group + description: Metrics and limits for the cgroup, collected by APM agents on Linux. + fields: + - name: cpu + type: group + description: CPU-specific cgroup metrics and limits. + fields: + - name: cfs + type: group + description: Completely Fair Scheduler (CFS) cgroup metrics. + fields: + - name: period.us + type: long + description: CFS period in microseconds. + metric_type: gauge + unit: micros + - name: quota.us + type: long + description: CFS quota in microseconds. + metric_type: gauge + unit: micros + - name: id + type: keyword + description: ID for the current cgroup CPU. + - name: stats.periods + type: long + description: Number of periods seen by the CPU. + metric_type: counter + - name: stats.throttled.ns + type: long + description: Nanoseconds spent throttled seen by the CPU. + metric_type: counter + unit: nanos + - name: stats.throttled.periods + type: long + description: Number of throttled periods seen by the CPU. + metric_type: counter + - name: cpuacct + type: group + description: CPU Accounting-specific cgroup metrics and limits. + fields: + - name: id + type: keyword + description: ID for the current cgroup CPU. + - name: total.ns + type: long + description: Total CPU time for the current cgroup CPU in nanoseconds. + metric_type: counter + unit: nanos + - name: memory + type: group + description: Memory-specific cgroup metrics and limits. + fields: + - name: mem.limit.bytes + type: long + format: bytes + description: Memory limit for the current cgroup slice. + metric_type: gauge + unit: byte + - name: mem.usage.bytes + type: long + format: bytes + description: Memory usage by the current cgroup slice. + metric_type: gauge + unit: byte + - name: cpu + type: group + description: | + `cpu` contains local CPU stats. + fields: + - name: total.norm.pct + type: scaled_float + format: percent + description: | + The percentage of CPU time spent by the process since the last event. This value is normalized by the number of CPU cores and it ranges from 0 to 100%. + metric_type: gauge + unit: percent + - name: memory + type: group + description: Memory-specific statistics per process. + fields: + - name: rss.bytes + type: long + format: bytes + description: | + The Resident Set Size. The amount of memory the process occupied in main memory (RAM). + metric_type: gauge + unit: byte + - name: size + type: long + format: bytes + description: | + The total virtual memory the process has. + metric_type: gauge + unit: byte +- name: timeseries.instance + type: keyword + description: Time series instance ID +- name: timestamp + type: group + fields: + - name: us + type: long + description: | + Timestamp of the event in microseconds since Unix epoch. diff --git a/apmpackage/apm/data_stream/app_metrics/manifest.yml b/apmpackage/apm/data_stream/app_metrics/manifest.yml new file mode 100644 index 00000000000..c625fd1b7c7 --- /dev/null +++ b/apmpackage/apm/data_stream/app_metrics/manifest.yml @@ -0,0 +1,18 @@ +title: APM application metrics +type: metrics +dataset: apm.app +dataset_is_prefix: true +ilm_policy: metrics-apm.app_metrics-default_policy +elasticsearch: + index_template: + mappings: + # Application metrics must be dynamically mapped, + # as their names are application-specific and not + # known ahead of time. + dynamic: true + # Install dynamic templates for use in dynamically + # mapping complex application metrics. + dynamic_templates: + - histogram: + mapping: + type: histogram diff --git a/apmpackage/apm/data_stream/error_logs/elasticsearch/ilm/default_policy.json b/apmpackage/apm/data_stream/error_logs/elasticsearch/ilm/default_policy.json new file mode 100644 index 00000000000..97262a5ede9 --- /dev/null +++ b/apmpackage/apm/data_stream/error_logs/elasticsearch/ilm/default_policy.json @@ -0,0 +1,26 @@ +{ + "policy": { + "phases": { + "warm": { + "min_age": "30d", + "actions": { + "readonly": {}, + "set_priority": { + "priority": 50 + } + } + }, + "hot": { + "actions": { + "rollover": { + "max_age": "30d", + "max_size": "50gb" + }, + "set_priority": { + "priority": 100 + } + } + } + } + } +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/error_logs/elasticsearch/ingest_pipeline/apm_data_stream_migration.json b/apmpackage/apm/data_stream/error_logs/elasticsearch/ingest_pipeline/apm_data_stream_migration.json new file mode 100644 index 00000000000..24218404aaf --- /dev/null +++ b/apmpackage/apm/data_stream/error_logs/elasticsearch/ingest_pipeline/apm_data_stream_migration.json @@ -0,0 +1,30 @@ +{ + "description": "Migrate APM events to data streams", + "processors": [ + { + "script": { + "if": "ctx.processor?.event == 'span' || ctx.processor?.event == 'transaction'", + "source": "ctx.data_stream = [\"type\": \"traces\", \"dataset\": \"apm\", \"namespace\": \"migrated\"]\n" + } + }, + { + "script": { + "if": "ctx.processor?.event == 'error'", + "source": "ctx.data_stream = [\"type\": \"logs\", \"dataset\": \"apm.error\", \"namespace\": \"migrated\"]\n" + } + }, + { + "script": { + "if": "ctx.processor?.event == 'metric'", + "source": "String dataset;\nif (ctx[\"metricset.name\"] != \"app\") {\n dataset = \"apm.internal\";\n} else {\n String serviceName = ctx.service.name;\n serviceName = serviceName.toLowerCase();\n serviceName = /[\\\\\\/*?\"<>| ,#:-]/.matcher(serviceName).replaceAll('_');\n dataset = \"apm.app.\" + serviceName;\n}\nctx.data_stream = [\"type\": \"metrics\", \"dataset\": dataset, \"namespace\": \"migrated\"];\n" + } + }, + { + "set": { + "if": "ctx.data_stream != null", + "field": "_index", + "value": "{{data_stream.type}}-{{data_stream.dataset}}-{{data_stream.namespace}}" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/error_logs/elasticsearch/ingest_pipeline/apm_error_grouping_name.json b/apmpackage/apm/data_stream/error_logs/elasticsearch/ingest_pipeline/apm_error_grouping_name.json new file mode 100644 index 00000000000..0969e60d739 --- /dev/null +++ b/apmpackage/apm/data_stream/error_logs/elasticsearch/ingest_pipeline/apm_error_grouping_name.json @@ -0,0 +1,18 @@ +{ + "description": "Set error.grouping_name for APM error events", + "processors": [ + { + "script": { + "source": "ctx.error.grouping_name = ctx.error.exception[0].message", + "if": "ctx.error?.exception?.length != null && ctx.error?.exception?.length > 0" + } + }, + { + "set": { + "field": "error.grouping_name", + "value": "{{error.log.message}}", + "if": "ctx.error?.log?.message != null" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/error_logs/elasticsearch/ingest_pipeline/apm_ingest_timestamp.json b/apmpackage/apm/data_stream/error_logs/elasticsearch/ingest_pipeline/apm_ingest_timestamp.json new file mode 100644 index 00000000000..a4df9734715 --- /dev/null +++ b/apmpackage/apm/data_stream/error_logs/elasticsearch/ingest_pipeline/apm_ingest_timestamp.json @@ -0,0 +1,12 @@ +{ + "description": "Add an ingest timestamp for APM events", + "processors": [ + { + "set": { + "if": "ctx.processor?.event != 'span'", + "field": "event.ingested", + "value": "{{_ingest.timestamp}}" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/error_logs/elasticsearch/ingest_pipeline/apm_metrics_dynamic_template.json b/apmpackage/apm/data_stream/error_logs/elasticsearch/ingest_pipeline/apm_metrics_dynamic_template.json new file mode 100644 index 00000000000..ec15aecf007 --- /dev/null +++ b/apmpackage/apm/data_stream/error_logs/elasticsearch/ingest_pipeline/apm_metrics_dynamic_template.json @@ -0,0 +1,11 @@ +{ + "description": "Set dynamic_templates for application metrics", + "processors": [ + { + "script": { + "if": "ctx._metric_descriptions != null", + "source": "Map dynamic_templates = new HashMap();\nfor (entry in ctx._metric_descriptions.entrySet()) {\n String name = entry.getKey();\n Map description = entry.getValue();\n String metric_type = description.type;\n if (metric_type == \"histogram\") {\n dynamic_templates[name] = \"histogram\";\n }\n}\nctx._dynamic_templates = dynamic_templates;\nctx.remove(\"_metric_descriptions\");\n" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/error_logs/elasticsearch/ingest_pipeline/apm_remove_span_metadata.json b/apmpackage/apm/data_stream/error_logs/elasticsearch/ingest_pipeline/apm_remove_span_metadata.json new file mode 100644 index 00000000000..52e108a3472 --- /dev/null +++ b/apmpackage/apm/data_stream/error_logs/elasticsearch/ingest_pipeline/apm_remove_span_metadata.json @@ -0,0 +1,25 @@ +{ + "description": "Removes metadata fields available already on the parent transaction, to save storage", + "processors": [ + { + "remove": { + "if": "ctx.processor?.event == 'span'", + "field": [ + "host", + "process", + "user", + "user_agent", + "container", + "kubernetes", + "service.node", + "service.version", + "service.language", + "service.runtime", + "service.framework" + ], + "ignore_missing": true, + "ignore_failure": true + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/error_logs/elasticsearch/ingest_pipeline/apm_user_agent.json b/apmpackage/apm/data_stream/error_logs/elasticsearch/ingest_pipeline/apm_user_agent.json new file mode 100644 index 00000000000..bdab7d4bbac --- /dev/null +++ b/apmpackage/apm/data_stream/error_logs/elasticsearch/ingest_pipeline/apm_user_agent.json @@ -0,0 +1,13 @@ +{ + "description": "Add user agent information for APM events", + "processors": [ + { + "user_agent": { + "field": "user_agent.original", + "target_field": "user_agent", + "ignore_missing": true, + "ignore_failure": true + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/error_logs/elasticsearch/ingest_pipeline/apm_user_geo.json b/apmpackage/apm/data_stream/error_logs/elasticsearch/ingest_pipeline/apm_user_geo.json new file mode 100644 index 00000000000..4f6ae66105b --- /dev/null +++ b/apmpackage/apm/data_stream/error_logs/elasticsearch/ingest_pipeline/apm_user_geo.json @@ -0,0 +1,22 @@ +{ + "description": "Add user geo information for APM events", + "processors": [ + { + "geoip": { + "database_file": "GeoLite2-City.mmdb", + "field": "client.ip", + "target_field": "client.geo", + "ignore_missing": true, + "on_failure": [ + { + "remove": { + "field": "client.ip", + "ignore_missing": true, + "ignore_failure": true + } + } + ] + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/error_logs/elasticsearch/ingest_pipeline/default.json b/apmpackage/apm/data_stream/error_logs/elasticsearch/ingest_pipeline/default.json new file mode 100644 index 00000000000..a63ed66d357 --- /dev/null +++ b/apmpackage/apm/data_stream/error_logs/elasticsearch/ingest_pipeline/default.json @@ -0,0 +1,37 @@ +{ + "description": "Default enrichment for APM events", + "processors": [ + { + "pipeline": { + "name": "logs-apm.error-0.5.0-apm_ingest_timestamp" + } + }, + { + "pipeline": { + "name": "logs-apm.error-0.5.0-apm_user_agent" + } + }, + { + "pipeline": { + "name": "logs-apm.error-0.5.0-apm_user_geo" + } + }, + { + "pipeline": { + "name": "logs-apm.error-0.5.0-apm_remove_span_metadata" + } + }, + { + "pipeline": { + "name": "logs-apm.error-0.5.0-apm_error_grouping_name", + "if": "ctx.processor?.event == 'error'" + } + }, + { + "pipeline": { + "name": "logs-apm.error-0.5.0-apm_metrics_dynamic_template", + "if": "ctx.processor?.event == 'metric'" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/error_logs/fields/base-fields.yml b/apmpackage/apm/data_stream/error_logs/fields/base-fields.yml new file mode 100644 index 00000000000..bf9b70e13de --- /dev/null +++ b/apmpackage/apm/data_stream/error_logs/fields/base-fields.yml @@ -0,0 +1,15 @@ +- name: '@timestamp' + type: date + description: Event timestamp. +- name: data_stream.type + type: constant_keyword + description: Data stream type. +- name: data_stream.dataset + type: constant_keyword + description: Data stream dataset. +- name: data_stream.namespace + type: constant_keyword + description: Data stream namespace. +- name: ecs.version + type: keyword + description: ECS version the event conforms to. diff --git a/apmpackage/apm/data_stream/error_logs/fields/ecs.yml b/apmpackage/apm/data_stream/error_logs/fields/ecs.yml new file mode 100644 index 00000000000..91d0efbd3ef --- /dev/null +++ b/apmpackage/apm/data_stream/error_logs/fields/ecs.yml @@ -0,0 +1,417 @@ +- name: agent + type: group + fields: + - name: ephemeral_id + type: keyword + description: | + The Ephemeral ID identifies a running process. + - name: name + type: keyword + description: | + Name of the agent used. + - name: version + type: keyword + description: | + Version of the agent used. +- name: client + type: group + fields: + - name: domain + type: keyword + description: | + Client domain. + ignore_above: 1024 + - name: ip + type: ip + description: | + IP address of the client of a recorded event. This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + - name: port + type: long + description: | + Port of the client. +- name: cloud + title: Cloud + type: group + description: | + Cloud metadata reported by agents + fields: + - name: account + type: group + fields: + - name: id + level: extended + type: keyword + description: Cloud account ID + ignore_above: 1024 + - name: name + level: extended + type: keyword + description: Cloud account name + ignore_above: 1024 + - name: availability_zone + level: extended + type: keyword + description: Cloud availability zone name + ignore_above: 1024 + - name: instance + type: group + fields: + - name: id + level: extended + type: keyword + description: Cloud instance/machine ID + ignore_above: 1024 + - name: name + level: extended + type: keyword + description: Cloud instance/machine name + ignore_above: 1024 + - name: machine + type: group + fields: + - name: type + level: extended + type: keyword + description: Cloud instance/machine type + ignore_above: 1024 + - name: project + type: group + fields: + - name: id + level: extended + type: keyword + description: Cloud project ID + ignore_above: 1024 + - name: name + level: extended + type: keyword + description: Cloud project name + ignore_above: 1024 + - name: provider + level: extended + type: keyword + description: Cloud provider name + ignore_above: 1024 + - name: region + level: extended + type: keyword + description: Cloud region name + ignore_above: 1024 + - name: service + type: group + fields: + - name: name + level: extended + type: keyword + description: | + Cloud service name, intended to distinguish services running on different platforms within a provider. + ignore_above: 1024 +- name: container + title: Container + type: group + description: | + Container fields are used for meta information about the specific container that is the source of information. These fields help correlate data based containers from any runtime. + fields: + - name: id + type: keyword + description: | + Unique container id. +- name: destination + title: Destination + type: group + description: |- + Destination fields describe details about the destination of a packet/event. + Destination fields are usually populated in conjunction with source fields. + fields: + - name: address + level: extended + type: keyword + description: Some event destination addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. + ignore_above: 1024 + - name: ip + level: core + type: ip + description: IP addess of the destination. Can be one of multiple IPv4 or IPv6 addresses. + - name: port + level: core + type: long + format: string + description: Port of the destination. +- name: error + type: group + description: | + Data captured by an agent representing an event occurring in a monitored service. + fields: + - name: id + type: keyword + description: | + The ID of the error. +- name: host + type: group + description: | + Optional host fields. + fields: + - name: architecture + type: keyword + description: | + The architecture of the host the event was recorded on. + - name: hostname + type: keyword + description: | + The hostname of the host the event was recorded on. + - name: ip + type: ip + description: | + IP of the host that records the event. + - name: name + type: keyword + description: | + Name of the host the event was recorded on. It can contain same information as host.hostname or a name specified by the user. + - name: os + title: Operating System + type: group + description: | + The OS fields contain information about the operating system. + fields: + - name: platform + type: keyword + description: | + The platform of the host the event was recorded on. +- name: http + type: group + fields: + - name: request + type: group + fields: + - name: method + type: keyword + description: | + The http method of the request leading to this event. + - name: referrer + type: keyword + description: Referrer for this HTTP request. + ignore_above: 1024 + - name: response + type: group + fields: + - name: status_code + type: long + description: | + The status code of the HTTP response. + - name: version + type: keyword + description: | + The http version of the request leading to this event. +- name: labels + type: object + description: | + A flat mapping of user-defined labels with string, boolean or number values. + dynamic: true + object_type_params: + - object_type: keyword + - object_type: boolean + - object_type: scaled_float + scaling_factor: 1000000 +- name: message + type: text + description: The original error message. +- name: observer + type: group + fields: + - name: hostname + type: keyword + description: | + Hostname of the APM Server. + - name: type + type: keyword + description: | + The type will be set to `apm-server`. + - name: version + type: keyword + description: | + APM Server version. +- name: process + type: group + description: | + Information pertaining to the running process where the data was collected + fields: + - name: args + level: extended + type: keyword + description: | + Process arguments. May be filtered to protect sensitive information. + - name: pid + type: long + description: | + Numeric process ID of the service process. + - name: ppid + type: long + description: | + Numeric ID of the service's parent process. + - name: title + type: keyword + description: | + Service process title. +- name: service + type: group + description: | + Service fields. + fields: + - name: name + type: keyword + description: | + Immutable name of the service emitting this event. + - name: node + type: group + fields: + - name: name + type: keyword + description: | + Unique meaningful name of the service node. + - name: version + type: keyword + description: | + Version of the service emitting this event. +- name: source + type: group + fields: + - name: domain + type: keyword + description: | + Source domain. + ignore_above: 1024 + - name: ip + type: ip + description: | + IP address of the source of a recorded event. This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + - name: port + type: long + description: | + Port of the source. +- name: trace + type: group + fields: + - name: id + type: keyword + description: | + The ID of the trace to which the event belongs to. +- name: transaction + type: group + fields: + - name: id + type: keyword + description: | + The transaction ID. +- name: url + type: group + description: | + A complete Url, with scheme, host and path. + fields: + - name: domain + type: keyword + description: | + The hostname of the request, e.g. "example.com". + - name: fragment + type: keyword + description: | + A fragment specifying a location in a web page , e.g. "top". + - name: full + type: keyword + description: | + The full, possibly agent-assembled URL of the request, e.g https://example.com:443/search?q=elasticsearch#top. + - name: path + type: keyword + description: | + The path of the request, e.g. "/search". + - name: port + type: long + description: | + The port of the request, e.g. 443. + - name: query + type: keyword + description: | + The query string of the request, e.g. "q=elasticsearch". + - name: scheme + type: keyword + description: | + The protocol of the request, e.g. "https:". +- name: user + type: group + fields: + - name: domain + type: keyword + description: | + Domain of the logged in user. + - name: email + type: keyword + description: | + Email of the logged in user. + - name: id + type: keyword + description: | + Identifier of the logged in user. + - name: name + type: keyword + description: | + The username of the logged in user. +- name: user_agent + title: User agent + type: group + description: | + The user_agent fields normally come from a browser request. They often show up in web service logs coming from the parsed user agent string. + fields: + - name: device + title: Device + type: group + description: | + Information concerning the device. + fields: + - name: name + type: keyword + description: | + Name of the device. + - name: name + type: keyword + description: | + Name of the user agent. + - name: original + type: keyword + description: | + Unparsed version of the user_agent. + multi_fields: + - name: text + type: text + - name: os + title: Operating System + type: group + description: | + The OS fields contain information about the operating system. + fields: + - name: family + type: keyword + description: | + OS family (such as redhat, debian, freebsd, windows). + - name: full + type: keyword + description: | + Operating system name, including the version or code name. + - name: kernel + type: keyword + description: | + Operating system kernel version as a raw string. + - name: name + type: keyword + description: | + Operating system name, without the version. + - name: platform + type: keyword + description: | + Operating system platform (such centos, ubuntu, windows). + - name: version + type: keyword + description: | + Operating system version as a raw string. + - name: version + type: keyword + description: | + Version of the user agent. diff --git a/apmpackage/apm/data_stream/error_logs/fields/fields.yml b/apmpackage/apm/data_stream/error_logs/fields/fields.yml new file mode 100644 index 00000000000..e6657c7e8f2 --- /dev/null +++ b/apmpackage/apm/data_stream/error_logs/fields/fields.yml @@ -0,0 +1,241 @@ +- name: error + type: group + description: | + Data captured by an agent representing an event occurring in a monitored service. + fields: + - name: culprit + type: keyword + description: Function call which was the primary perpetrator of this event. + - name: exception + type: group + description: | + Information about the originally thrown error. + fields: + - name: code + type: keyword + description: The error code set when the error happened, e.g. database error code. + - name: handled + type: boolean + description: Indicator whether the error was caught somewhere in the code or not. + - name: message + type: text + description: The original error message. + - name: module + type: keyword + description: The module namespace of the original error. + - name: type + type: keyword + description: The type of the original error, e.g. the Java exception class name. + - name: grouping_key + type: keyword + description: | + Hash of select properties of the logged error for grouping purposes. + - name: grouping_name + type: keyword + description: | + Name to associate with an error group. Errors belonging to the same group (same grouping_key) may have differing values for grouping_name. Consumers may choose one arbitrarily. + - name: log + type: group + description: | + Additional information added by logging the error. + fields: + - name: level + type: keyword + description: The severity of the record. + - name: logger_name + type: keyword + description: The name of the logger instance used. + - name: message + type: text + description: The additionally logged error message. + - name: param_message + type: keyword + description: | + A parametrized message. E.g. 'Could not connect to %s'. The property message is still required, and should be equal to the param_message, but with placeholders replaced. In some situations the param_message is used to group errors together. +- name: http + type: group + fields: + - name: request + type: group + fields: + - name: headers + type: object + description: | + The canonical headers of the monitored HTTP request. + - name: response + type: group + fields: + - name: finished + type: boolean + description: | + Used by the Node agent to indicate when in the response life cycle an error has occurred. + - name: headers + type: object + description: | + The canonical headers of the monitored HTTP response. +- name: kubernetes + title: Kubernetes + type: group + description: | + Kubernetes metadata reported by agents + fields: + - name: namespace + type: keyword + description: | + Kubernetes namespace + - name: node + type: group + fields: + - name: name + type: keyword + description: | + Kubernetes node name + - name: pod + type: group + fields: + - name: name + type: keyword + description: | + Kubernetes pod name + - name: uid + type: keyword + description: | + Kubernetes Pod UID +- name: network + type: group + description: | + Optional network fields + fields: + - name: carrier + type: group + description: | + Network operator + fields: + - name: icc + type: keyword + description: | + ISO country code, eg. US + - name: mcc + type: keyword + description: | + Mobile country code + - name: mnc + type: keyword + description: | + Mobile network code + - name: name + type: keyword + description: | + Carrier name, eg. Vodafone, T-Mobile, etc. + - name: connection + type: group + description: | + Network connection details + fields: + - name: subtype + type: keyword + description: | + Detailed network connection sub-type, e.g. "LTE", "CDMA" + - name: type + type: keyword + description: | + Network connection type, eg. "wifi", "cell" +- name: observer + type: group + fields: + - name: ephemeral_id + type: keyword + description: | + Ephemeral identifier of the APM Server. + - name: id + type: keyword + description: | + Unique identifier of the APM Server. + - name: listening + type: keyword + description: | + Address the server is listening on. + - name: version_major + type: byte + description: | + Major version number of the observer +- name: parent + type: group + fields: + - name: id + type: keyword + description: | + The ID of the parent event. +- name: processor.event + type: keyword + description: Processor event. +- name: processor.name + type: keyword + description: Processor name. +- name: service + type: group + description: | + Service fields. + fields: + - name: environment + type: keyword + description: | + Service environment. + - name: framework + type: group + fields: + - name: name + type: keyword + description: | + Name of the framework used. + - name: version + type: keyword + description: | + Version of the framework used. + - name: language + type: group + fields: + - name: name + type: keyword + description: | + Name of the programming language used. + - name: version + type: keyword + description: | + Version of the programming language used. + - name: runtime + type: group + fields: + - name: name + type: keyword + description: | + Name of the runtime used. + - name: version + type: keyword + description: | + Version of the runtime used. +- name: timestamp + type: group + fields: + - name: us + type: long + description: | + Timestamp of the event in microseconds since Unix epoch. +- name: transaction + type: group + fields: + - name: name + type: keyword + description: | + Generic designation of a transaction in the scope of a single service (eg. 'GET /users/:id'). + multi_fields: + - name: text + type: text + - name: sampled + type: boolean + description: | + Transactions that are 'sampled' will include all available information. Transactions that are not sampled will not have spans or context. + - name: type + type: keyword + description: | + Keyword of specific relevance in the service's domain (eg. 'request', 'backgroundjob', etc) diff --git a/apmpackage/apm/data_stream/error_logs/manifest.yml b/apmpackage/apm/data_stream/error_logs/manifest.yml new file mode 100644 index 00000000000..06d22a1a4e7 --- /dev/null +++ b/apmpackage/apm/data_stream/error_logs/manifest.yml @@ -0,0 +1,12 @@ +title: APM logs and errors +type: logs +dataset: apm.error +ilm_policy: logs-apm.error_logs-default_policy +elasticsearch: + index_template: + mappings: + # TODO(axw) investigate setting `dynamic: runtime`, so that fields are + # runtime searchable by default. That way users can, for example, perform + # ad-hoc searches on HTTP request headers without incurring storage cost + # for users who do not need this capability. + dynamic: false diff --git a/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ilm/default_policy.json b/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ilm/default_policy.json new file mode 100644 index 00000000000..97262a5ede9 --- /dev/null +++ b/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ilm/default_policy.json @@ -0,0 +1,26 @@ +{ + "policy": { + "phases": { + "warm": { + "min_age": "30d", + "actions": { + "readonly": {}, + "set_priority": { + "priority": 50 + } + } + }, + "hot": { + "actions": { + "rollover": { + "max_age": "30d", + "max_size": "50gb" + }, + "set_priority": { + "priority": 100 + } + } + } + } + } +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ingest_pipeline/apm_data_stream_migration.json b/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ingest_pipeline/apm_data_stream_migration.json new file mode 100644 index 00000000000..24218404aaf --- /dev/null +++ b/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ingest_pipeline/apm_data_stream_migration.json @@ -0,0 +1,30 @@ +{ + "description": "Migrate APM events to data streams", + "processors": [ + { + "script": { + "if": "ctx.processor?.event == 'span' || ctx.processor?.event == 'transaction'", + "source": "ctx.data_stream = [\"type\": \"traces\", \"dataset\": \"apm\", \"namespace\": \"migrated\"]\n" + } + }, + { + "script": { + "if": "ctx.processor?.event == 'error'", + "source": "ctx.data_stream = [\"type\": \"logs\", \"dataset\": \"apm.error\", \"namespace\": \"migrated\"]\n" + } + }, + { + "script": { + "if": "ctx.processor?.event == 'metric'", + "source": "String dataset;\nif (ctx[\"metricset.name\"] != \"app\") {\n dataset = \"apm.internal\";\n} else {\n String serviceName = ctx.service.name;\n serviceName = serviceName.toLowerCase();\n serviceName = /[\\\\\\/*?\"<>| ,#:-]/.matcher(serviceName).replaceAll('_');\n dataset = \"apm.app.\" + serviceName;\n}\nctx.data_stream = [\"type\": \"metrics\", \"dataset\": dataset, \"namespace\": \"migrated\"];\n" + } + }, + { + "set": { + "if": "ctx.data_stream != null", + "field": "_index", + "value": "{{data_stream.type}}-{{data_stream.dataset}}-{{data_stream.namespace}}" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ingest_pipeline/apm_error_grouping_name.json b/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ingest_pipeline/apm_error_grouping_name.json new file mode 100644 index 00000000000..0969e60d739 --- /dev/null +++ b/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ingest_pipeline/apm_error_grouping_name.json @@ -0,0 +1,18 @@ +{ + "description": "Set error.grouping_name for APM error events", + "processors": [ + { + "script": { + "source": "ctx.error.grouping_name = ctx.error.exception[0].message", + "if": "ctx.error?.exception?.length != null && ctx.error?.exception?.length > 0" + } + }, + { + "set": { + "field": "error.grouping_name", + "value": "{{error.log.message}}", + "if": "ctx.error?.log?.message != null" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ingest_pipeline/apm_ingest_timestamp.json b/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ingest_pipeline/apm_ingest_timestamp.json new file mode 100644 index 00000000000..a4df9734715 --- /dev/null +++ b/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ingest_pipeline/apm_ingest_timestamp.json @@ -0,0 +1,12 @@ +{ + "description": "Add an ingest timestamp for APM events", + "processors": [ + { + "set": { + "if": "ctx.processor?.event != 'span'", + "field": "event.ingested", + "value": "{{_ingest.timestamp}}" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ingest_pipeline/apm_metrics_dynamic_template.json b/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ingest_pipeline/apm_metrics_dynamic_template.json new file mode 100644 index 00000000000..ec15aecf007 --- /dev/null +++ b/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ingest_pipeline/apm_metrics_dynamic_template.json @@ -0,0 +1,11 @@ +{ + "description": "Set dynamic_templates for application metrics", + "processors": [ + { + "script": { + "if": "ctx._metric_descriptions != null", + "source": "Map dynamic_templates = new HashMap();\nfor (entry in ctx._metric_descriptions.entrySet()) {\n String name = entry.getKey();\n Map description = entry.getValue();\n String metric_type = description.type;\n if (metric_type == \"histogram\") {\n dynamic_templates[name] = \"histogram\";\n }\n}\nctx._dynamic_templates = dynamic_templates;\nctx.remove(\"_metric_descriptions\");\n" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ingest_pipeline/apm_remove_span_metadata.json b/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ingest_pipeline/apm_remove_span_metadata.json new file mode 100644 index 00000000000..52e108a3472 --- /dev/null +++ b/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ingest_pipeline/apm_remove_span_metadata.json @@ -0,0 +1,25 @@ +{ + "description": "Removes metadata fields available already on the parent transaction, to save storage", + "processors": [ + { + "remove": { + "if": "ctx.processor?.event == 'span'", + "field": [ + "host", + "process", + "user", + "user_agent", + "container", + "kubernetes", + "service.node", + "service.version", + "service.language", + "service.runtime", + "service.framework" + ], + "ignore_missing": true, + "ignore_failure": true + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ingest_pipeline/apm_user_agent.json b/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ingest_pipeline/apm_user_agent.json new file mode 100644 index 00000000000..bdab7d4bbac --- /dev/null +++ b/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ingest_pipeline/apm_user_agent.json @@ -0,0 +1,13 @@ +{ + "description": "Add user agent information for APM events", + "processors": [ + { + "user_agent": { + "field": "user_agent.original", + "target_field": "user_agent", + "ignore_missing": true, + "ignore_failure": true + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ingest_pipeline/apm_user_geo.json b/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ingest_pipeline/apm_user_geo.json new file mode 100644 index 00000000000..4f6ae66105b --- /dev/null +++ b/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ingest_pipeline/apm_user_geo.json @@ -0,0 +1,22 @@ +{ + "description": "Add user geo information for APM events", + "processors": [ + { + "geoip": { + "database_file": "GeoLite2-City.mmdb", + "field": "client.ip", + "target_field": "client.geo", + "ignore_missing": true, + "on_failure": [ + { + "remove": { + "field": "client.ip", + "ignore_missing": true, + "ignore_failure": true + } + } + ] + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ingest_pipeline/default.json b/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ingest_pipeline/default.json new file mode 100644 index 00000000000..2fd09661093 --- /dev/null +++ b/apmpackage/apm/data_stream/internal_metrics/elasticsearch/ingest_pipeline/default.json @@ -0,0 +1,37 @@ +{ + "description": "Default enrichment for APM events", + "processors": [ + { + "pipeline": { + "name": "metrics-apm.internal-0.5.0-apm_ingest_timestamp" + } + }, + { + "pipeline": { + "name": "metrics-apm.internal-0.5.0-apm_user_agent" + } + }, + { + "pipeline": { + "name": "metrics-apm.internal-0.5.0-apm_user_geo" + } + }, + { + "pipeline": { + "name": "metrics-apm.internal-0.5.0-apm_remove_span_metadata" + } + }, + { + "pipeline": { + "name": "metrics-apm.internal-0.5.0-apm_error_grouping_name", + "if": "ctx.processor?.event == 'error'" + } + }, + { + "pipeline": { + "name": "metrics-apm.internal-0.5.0-apm_metrics_dynamic_template", + "if": "ctx.processor?.event == 'metric'" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/internal_metrics/fields/base-fields.yml b/apmpackage/apm/data_stream/internal_metrics/fields/base-fields.yml new file mode 100644 index 00000000000..bf9b70e13de --- /dev/null +++ b/apmpackage/apm/data_stream/internal_metrics/fields/base-fields.yml @@ -0,0 +1,15 @@ +- name: '@timestamp' + type: date + description: Event timestamp. +- name: data_stream.type + type: constant_keyword + description: Data stream type. +- name: data_stream.dataset + type: constant_keyword + description: Data stream dataset. +- name: data_stream.namespace + type: constant_keyword + description: Data stream namespace. +- name: ecs.version + type: keyword + description: ECS version the event conforms to. diff --git a/apmpackage/apm/data_stream/internal_metrics/fields/ecs.yml b/apmpackage/apm/data_stream/internal_metrics/fields/ecs.yml new file mode 100644 index 00000000000..181d9ce8d43 --- /dev/null +++ b/apmpackage/apm/data_stream/internal_metrics/fields/ecs.yml @@ -0,0 +1,345 @@ +- name: agent + type: group + fields: + - name: ephemeral_id + type: keyword + description: | + The Ephemeral ID identifies a running process. + - name: name + type: keyword + description: | + Name of the agent used. + - name: version + type: keyword + description: | + Version of the agent used. +- name: client + type: group + fields: + - name: domain + type: keyword + description: | + Client domain. + ignore_above: 1024 + - name: ip + type: ip + description: | + IP address of the client of a recorded event. This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + - name: port + type: long + description: | + Port of the client. +- name: cloud + title: Cloud + type: group + description: | + Cloud metadata reported by agents + fields: + - name: account + type: group + fields: + - name: id + level: extended + type: keyword + description: Cloud account ID + ignore_above: 1024 + - name: name + level: extended + type: keyword + description: Cloud account name + ignore_above: 1024 + - name: availability_zone + level: extended + type: keyword + description: Cloud availability zone name + ignore_above: 1024 + - name: instance + type: group + fields: + - name: id + level: extended + type: keyword + description: Cloud instance/machine ID + ignore_above: 1024 + - name: name + level: extended + type: keyword + description: Cloud instance/machine name + ignore_above: 1024 + - name: machine + type: group + fields: + - name: type + level: extended + type: keyword + description: Cloud instance/machine type + ignore_above: 1024 + - name: project + type: group + fields: + - name: id + level: extended + type: keyword + description: Cloud project ID + ignore_above: 1024 + - name: name + level: extended + type: keyword + description: Cloud project name + ignore_above: 1024 + - name: provider + level: extended + type: keyword + description: Cloud provider name + ignore_above: 1024 + - name: region + level: extended + type: keyword + description: Cloud region name + ignore_above: 1024 + - name: service + type: group + fields: + - name: name + level: extended + type: keyword + description: | + Cloud service name, intended to distinguish services running on different platforms within a provider. + ignore_above: 1024 +- name: container + title: Container + type: group + description: | + Container fields are used for meta information about the specific container that is the source of information. These fields help correlate data based containers from any runtime. + fields: + - name: id + type: keyword + description: | + Unique container id. +- name: destination + title: Destination + type: group + description: |- + Destination fields describe details about the destination of a packet/event. + Destination fields are usually populated in conjunction with source fields. + fields: + - name: address + level: extended + type: keyword + description: Some event destination addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. + ignore_above: 1024 + - name: ip + level: core + type: ip + description: IP addess of the destination. Can be one of multiple IPv4 or IPv6 addresses. + - name: port + level: core + type: long + format: string + description: Port of the destination. +- name: event + type: group + fields: + - name: outcome + level: core + type: keyword + description: | + `event.outcome` simply denotes whether the event represents a success or a failure from the perspective of the entity that produced the event. + ignore_above: 1024 +- name: host + type: group + description: | + Optional host fields. + fields: + - name: architecture + type: keyword + description: | + The architecture of the host the event was recorded on. + - name: hostname + type: keyword + description: | + The hostname of the host the event was recorded on. + - name: ip + type: ip + description: | + IP of the host that records the event. + - name: name + type: keyword + description: | + Name of the host the event was recorded on. It can contain same information as host.hostname or a name specified by the user. + - name: os + title: Operating System + type: group + description: | + The OS fields contain information about the operating system. + fields: + - name: platform + type: keyword + description: | + The platform of the host the event was recorded on. +- name: labels + type: object + description: | + A flat mapping of user-defined labels with string, boolean or number values. + dynamic: true + object_type_params: + - object_type: keyword + - object_type: boolean + - object_type: scaled_float + scaling_factor: 1000000 +- name: observer + type: group + fields: + - name: hostname + type: keyword + description: | + Hostname of the APM Server. + - name: type + type: keyword + description: | + The type will be set to `apm-server`. + - name: version + type: keyword + description: | + APM Server version. +- name: process + type: group + description: | + Information pertaining to the running process where the data was collected + fields: + - name: args + level: extended + type: keyword + description: | + Process arguments. May be filtered to protect sensitive information. + - name: pid + type: long + description: | + Numeric process ID of the service process. + - name: ppid + type: long + description: | + Numeric ID of the service's parent process. + - name: title + type: keyword + description: | + Service process title. +- name: service + type: group + description: | + Service fields. + fields: + - name: name + type: keyword + description: | + Immutable name of the service emitting this event. + - name: node + type: group + fields: + - name: name + type: keyword + description: | + Unique meaningful name of the service node. + - name: version + type: keyword + description: | + Version of the service emitting this event. +- name: source + type: group + fields: + - name: domain + type: keyword + description: | + Source domain. + ignore_above: 1024 + - name: ip + type: ip + description: | + IP address of the source of a recorded event. This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + - name: port + type: long + description: | + Port of the source. +- name: transaction + type: group + fields: + - name: id + type: keyword + description: | + The transaction ID. +- name: user + type: group + fields: + - name: email + type: keyword + description: | + Email of the logged in user. + - name: id + type: keyword + description: | + Identifier of the logged in user. + - name: name + type: keyword + description: | + The username of the logged in user. +- name: user_agent + title: User agent + type: group + description: | + The user_agent fields normally come from a browser request. They often show up in web service logs coming from the parsed user agent string. + fields: + - name: device + title: Device + type: group + description: | + Information concerning the device. + fields: + - name: name + type: keyword + description: | + Name of the device. + - name: name + type: keyword + description: | + Name of the user agent. + - name: original + type: keyword + description: | + Unparsed version of the user_agent. + multi_fields: + - name: text + type: text + - name: os + title: Operating System + type: group + description: | + The OS fields contain information about the operating system. + fields: + - name: family + type: keyword + description: | + OS family (such as redhat, debian, freebsd, windows). + - name: full + type: keyword + description: | + Operating system name, including the version or code name. + - name: kernel + type: keyword + description: | + Operating system kernel version as a raw string. + - name: name + type: keyword + description: | + Operating system name, without the version. + - name: platform + type: keyword + description: | + Operating system platform (such centos, ubuntu, windows). + - name: version + type: keyword + description: | + Operating system version as a raw string. + - name: version + type: keyword + description: | + Version of the user agent. diff --git a/apmpackage/apm/data_stream/internal_metrics/fields/fields.yml b/apmpackage/apm/data_stream/internal_metrics/fields/fields.yml new file mode 100644 index 00000000000..063b7806e91 --- /dev/null +++ b/apmpackage/apm/data_stream/internal_metrics/fields/fields.yml @@ -0,0 +1,411 @@ +- name: kubernetes + title: Kubernetes + type: group + description: | + Kubernetes metadata reported by agents + fields: + - name: namespace + type: keyword + description: | + Kubernetes namespace + - name: node + type: group + fields: + - name: name + type: keyword + description: | + Kubernetes node name + - name: pod + type: group + fields: + - name: name + type: keyword + description: | + Kubernetes pod name + - name: uid + type: keyword + description: | + Kubernetes Pod UID +- name: metricset + type: group + fields: + - name: name + type: keyword + description: | + Name of the set of metrics. +- name: metricset.period + type: long + description: Current data collection period for this event in milliseconds. + unit: ms +- name: network + type: group + description: | + Optional network fields + fields: + - name: carrier + type: group + description: | + Network operator + fields: + - name: icc + type: keyword + description: | + ISO country code, eg. US + - name: mcc + type: keyword + description: | + Mobile country code + - name: mnc + type: keyword + description: | + Mobile network code + - name: name + type: keyword + description: | + Carrier name, eg. Vodafone, T-Mobile, etc. + - name: connection + type: group + description: | + Network connection details + fields: + - name: subtype + type: keyword + description: | + Detailed network connection sub-type, e.g. "LTE", "CDMA" + - name: type + type: keyword + description: | + Network connection type, eg. "wifi", "cell" +- name: observer + type: group + fields: + - name: ephemeral_id + type: keyword + description: | + Ephemeral identifier of the APM Server. + - name: id + type: keyword + description: | + Unique identifier of the APM Server. + - name: listening + type: keyword + description: | + Address the server is listening on. + - name: version_major + type: byte + description: | + Major version number of the observer +- name: processor.event + type: keyword + description: Processor event. +- name: processor.name + type: keyword + description: Processor name. +- name: service + type: group + description: | + Service fields. + fields: + - name: environment + type: keyword + description: | + Service environment. + - name: framework + type: group + fields: + - name: name + type: keyword + description: | + Name of the framework used. + - name: version + type: keyword + description: | + Version of the framework used. + - name: language + type: group + fields: + - name: name + type: keyword + description: | + Name of the programming language used. + - name: version + type: keyword + description: | + Version of the programming language used. + - name: runtime + type: group + fields: + - name: name + type: keyword + description: | + Name of the runtime used. + - name: version + type: keyword + description: | + Version of the runtime used. +- name: span + type: group + fields: + - name: destination + type: group + fields: + - name: service + type: group + description: Destination service context + fields: + - name: resource + type: keyword + description: | + Identifier for the destination service resource being operated on (e.g. 'http://elastic.co:80', 'elasticsearch', 'rabbitmq/queue_name') + - name: destination.service + type: group + fields: + - name: response_time.count + type: long + description: Number of aggregated outgoing requests. + - name: response_time.sum.us + type: long + description: Aggregated duration of outgoing requests, in microseconds. + unit: micros + - name: self_time + type: group + description: | + Portion of the span's duration where no direct child was running + fields: + - name: count + type: long + description: Number of aggregated spans. + - name: sum + type: group + fields: + - name: us + type: long + description: | + Aggregated span duration, excluding the time periods where a direct child was running, in microseconds. + unit: micros + - name: subtype + type: keyword + description: | + A further sub-division of the type (e.g. postgresql, elasticsearch) + - name: type + type: keyword + description: | + Keyword of specific relevance in the service's domain (eg: 'db.postgresql.query', 'template.erb', 'cache', etc). +- name: system + type: group + description: | + `system` contains local system metrics. + fields: + - name: cpu + type: group + description: | + `cpu` contains local CPU stats. + fields: + - name: total.norm.pct + type: scaled_float + format: percent + description: | + The percentage of CPU time spent by the process since the last event. This value is normalized by the number of CPU cores and it ranges from 0 to 100%. + metric_type: gauge + unit: percent + - name: memory + type: group + description: | + `memory` contains local memory stats. + fields: + - name: actual + type: group + description: | + Actual memory used and free. + fields: + - name: free + type: long + format: bytes + description: | + Actual free memory in bytes. It is calculated based on the OS. On Linux it consists of the free memory plus caches and buffers. On OSX it is a sum of free memory and the inactive memory. On Windows, it is equal to `system.memory.free`. + metric_type: gauge + unit: byte + - name: total + type: long + format: bytes + description: | + Total memory. + metric_type: gauge + unit: byte + - name: process + type: group + description: | + `process` contains process metadata, CPU metrics, and memory metrics. + fields: + - name: cgroup + type: group + description: Metrics and limits for the cgroup, collected by APM agents on Linux. + fields: + - name: cpu + type: group + description: CPU-specific cgroup metrics and limits. + fields: + - name: cfs + type: group + description: Completely Fair Scheduler (CFS) cgroup metrics. + fields: + - name: period.us + type: long + description: CFS period in microseconds. + metric_type: gauge + unit: micros + - name: quota.us + type: long + description: CFS quota in microseconds. + metric_type: gauge + unit: micros + - name: id + type: keyword + description: ID for the current cgroup CPU. + - name: stats.periods + type: long + description: Number of periods seen by the CPU. + metric_type: counter + - name: stats.throttled.ns + type: long + description: Nanoseconds spent throttled seen by the CPU. + metric_type: counter + unit: nanos + - name: stats.throttled.periods + type: long + description: Number of throttled periods seen by the CPU. + metric_type: counter + - name: cpuacct + type: group + description: CPU Accounting-specific cgroup metrics and limits. + fields: + - name: id + type: keyword + description: ID for the current cgroup CPU. + - name: total.ns + type: long + description: Total CPU time for the current cgroup CPU in nanoseconds. + metric_type: counter + unit: nanos + - name: memory + type: group + description: Memory-specific cgroup metrics and limits. + fields: + - name: mem.limit.bytes + type: long + format: bytes + description: Memory limit for the current cgroup slice. + metric_type: gauge + unit: byte + - name: mem.usage.bytes + type: long + format: bytes + description: Memory usage by the current cgroup slice. + metric_type: gauge + unit: byte + - name: cpu + type: group + description: | + `cpu` contains local CPU stats. + fields: + - name: total.norm.pct + type: scaled_float + format: percent + description: | + The percentage of CPU time spent by the process since the last event. This value is normalized by the number of CPU cores and it ranges from 0 to 100%. + metric_type: gauge + unit: percent + - name: memory + type: group + description: Memory-specific statistics per process. + fields: + - name: rss.bytes + type: long + format: bytes + description: | + The Resident Set Size. The amount of memory the process occupied in main memory (RAM). + metric_type: gauge + unit: byte + - name: size + type: long + format: bytes + description: | + The total virtual memory the process has. + metric_type: gauge + unit: byte +- name: timeseries.instance + type: keyword + description: Time series instance ID +- name: timestamp + type: group + fields: + - name: us + type: long + description: | + Timestamp of the event in microseconds since Unix epoch. +- name: transaction + type: group + fields: + - name: breakdown + type: group + fields: + - name: count + type: long + description: | + Counter for collected breakdowns for the transaction + - name: duration + type: group + fields: + - name: count + type: long + description: Number of aggregated transactions. + - name: histogram + type: histogram + description: | + Pre-aggregated histogram of transaction durations. + - name: sum + type: group + fields: + - name: us + type: long + description: Aggregated transaction duration, in microseconds. + unit: micros + - name: name + type: keyword + description: | + Generic designation of a transaction in the scope of a single service (eg. 'GET /users/:id'). + multi_fields: + - name: text + type: text + - name: result + type: keyword + description: | + The result of the transaction. HTTP status code for HTTP-related transactions. + - name: root + type: boolean + description: | + Identifies metrics for root transactions. This can be used for calculating metrics for traces. + - name: sampled + type: boolean + description: | + Transactions that are 'sampled' will include all available information. Transactions that are not sampled will not have spans or context. + - name: self_time + type: group + description: | + Portion of the transaction's duration where no direct child was running + fields: + - name: count + type: long + description: Number of aggregated transactions. + - name: sum + type: group + fields: + - name: us + type: long + description: | + Aggregated transaction duration, excluding the time periods where a direct child was running, in microseconds. + unit: micros + - name: type + type: keyword + description: | + Keyword of specific relevance in the service's domain (eg. 'request', 'backgroundjob', etc) diff --git a/apmpackage/apm/data_stream/internal_metrics/manifest.yml b/apmpackage/apm/data_stream/internal_metrics/manifest.yml new file mode 100644 index 00000000000..2f3492c77d3 --- /dev/null +++ b/apmpackage/apm/data_stream/internal_metrics/manifest.yml @@ -0,0 +1,14 @@ +title: APM internal metrics +type: metrics +dataset: apm.internal +ilm_policy: metrics-apm.internal_metrics-default_policy +elasticsearch: + index_template: + mappings: + # Internal metrics should have all fields strictly mapped; + # we are in full control of the field names. + dynamic: strict + settings: + index: + sort.field: "@timestamp" + sort.order: desc diff --git a/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ilm/default_policy.json b/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ilm/default_policy.json new file mode 100644 index 00000000000..97262a5ede9 --- /dev/null +++ b/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ilm/default_policy.json @@ -0,0 +1,26 @@ +{ + "policy": { + "phases": { + "warm": { + "min_age": "30d", + "actions": { + "readonly": {}, + "set_priority": { + "priority": 50 + } + } + }, + "hot": { + "actions": { + "rollover": { + "max_age": "30d", + "max_size": "50gb" + }, + "set_priority": { + "priority": 100 + } + } + } + } + } +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ingest_pipeline/apm_data_stream_migration.json b/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ingest_pipeline/apm_data_stream_migration.json new file mode 100644 index 00000000000..24218404aaf --- /dev/null +++ b/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ingest_pipeline/apm_data_stream_migration.json @@ -0,0 +1,30 @@ +{ + "description": "Migrate APM events to data streams", + "processors": [ + { + "script": { + "if": "ctx.processor?.event == 'span' || ctx.processor?.event == 'transaction'", + "source": "ctx.data_stream = [\"type\": \"traces\", \"dataset\": \"apm\", \"namespace\": \"migrated\"]\n" + } + }, + { + "script": { + "if": "ctx.processor?.event == 'error'", + "source": "ctx.data_stream = [\"type\": \"logs\", \"dataset\": \"apm.error\", \"namespace\": \"migrated\"]\n" + } + }, + { + "script": { + "if": "ctx.processor?.event == 'metric'", + "source": "String dataset;\nif (ctx[\"metricset.name\"] != \"app\") {\n dataset = \"apm.internal\";\n} else {\n String serviceName = ctx.service.name;\n serviceName = serviceName.toLowerCase();\n serviceName = /[\\\\\\/*?\"<>| ,#:-]/.matcher(serviceName).replaceAll('_');\n dataset = \"apm.app.\" + serviceName;\n}\nctx.data_stream = [\"type\": \"metrics\", \"dataset\": dataset, \"namespace\": \"migrated\"];\n" + } + }, + { + "set": { + "if": "ctx.data_stream != null", + "field": "_index", + "value": "{{data_stream.type}}-{{data_stream.dataset}}-{{data_stream.namespace}}" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ingest_pipeline/apm_error_grouping_name.json b/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ingest_pipeline/apm_error_grouping_name.json new file mode 100644 index 00000000000..0969e60d739 --- /dev/null +++ b/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ingest_pipeline/apm_error_grouping_name.json @@ -0,0 +1,18 @@ +{ + "description": "Set error.grouping_name for APM error events", + "processors": [ + { + "script": { + "source": "ctx.error.grouping_name = ctx.error.exception[0].message", + "if": "ctx.error?.exception?.length != null && ctx.error?.exception?.length > 0" + } + }, + { + "set": { + "field": "error.grouping_name", + "value": "{{error.log.message}}", + "if": "ctx.error?.log?.message != null" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ingest_pipeline/apm_ingest_timestamp.json b/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ingest_pipeline/apm_ingest_timestamp.json new file mode 100644 index 00000000000..a4df9734715 --- /dev/null +++ b/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ingest_pipeline/apm_ingest_timestamp.json @@ -0,0 +1,12 @@ +{ + "description": "Add an ingest timestamp for APM events", + "processors": [ + { + "set": { + "if": "ctx.processor?.event != 'span'", + "field": "event.ingested", + "value": "{{_ingest.timestamp}}" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ingest_pipeline/apm_metrics_dynamic_template.json b/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ingest_pipeline/apm_metrics_dynamic_template.json new file mode 100644 index 00000000000..ec15aecf007 --- /dev/null +++ b/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ingest_pipeline/apm_metrics_dynamic_template.json @@ -0,0 +1,11 @@ +{ + "description": "Set dynamic_templates for application metrics", + "processors": [ + { + "script": { + "if": "ctx._metric_descriptions != null", + "source": "Map dynamic_templates = new HashMap();\nfor (entry in ctx._metric_descriptions.entrySet()) {\n String name = entry.getKey();\n Map description = entry.getValue();\n String metric_type = description.type;\n if (metric_type == \"histogram\") {\n dynamic_templates[name] = \"histogram\";\n }\n}\nctx._dynamic_templates = dynamic_templates;\nctx.remove(\"_metric_descriptions\");\n" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ingest_pipeline/apm_remove_span_metadata.json b/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ingest_pipeline/apm_remove_span_metadata.json new file mode 100644 index 00000000000..52e108a3472 --- /dev/null +++ b/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ingest_pipeline/apm_remove_span_metadata.json @@ -0,0 +1,25 @@ +{ + "description": "Removes metadata fields available already on the parent transaction, to save storage", + "processors": [ + { + "remove": { + "if": "ctx.processor?.event == 'span'", + "field": [ + "host", + "process", + "user", + "user_agent", + "container", + "kubernetes", + "service.node", + "service.version", + "service.language", + "service.runtime", + "service.framework" + ], + "ignore_missing": true, + "ignore_failure": true + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ingest_pipeline/apm_user_agent.json b/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ingest_pipeline/apm_user_agent.json new file mode 100644 index 00000000000..bdab7d4bbac --- /dev/null +++ b/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ingest_pipeline/apm_user_agent.json @@ -0,0 +1,13 @@ +{ + "description": "Add user agent information for APM events", + "processors": [ + { + "user_agent": { + "field": "user_agent.original", + "target_field": "user_agent", + "ignore_missing": true, + "ignore_failure": true + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ingest_pipeline/apm_user_geo.json b/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ingest_pipeline/apm_user_geo.json new file mode 100644 index 00000000000..4f6ae66105b --- /dev/null +++ b/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ingest_pipeline/apm_user_geo.json @@ -0,0 +1,22 @@ +{ + "description": "Add user geo information for APM events", + "processors": [ + { + "geoip": { + "database_file": "GeoLite2-City.mmdb", + "field": "client.ip", + "target_field": "client.geo", + "ignore_missing": true, + "on_failure": [ + { + "remove": { + "field": "client.ip", + "ignore_missing": true, + "ignore_failure": true + } + } + ] + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ingest_pipeline/default.json b/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ingest_pipeline/default.json new file mode 100644 index 00000000000..c119dd040cb --- /dev/null +++ b/apmpackage/apm/data_stream/profile_metrics/elasticsearch/ingest_pipeline/default.json @@ -0,0 +1,37 @@ +{ + "description": "Default enrichment for APM events", + "processors": [ + { + "pipeline": { + "name": "metrics-apm.profiling-0.5.0-apm_ingest_timestamp" + } + }, + { + "pipeline": { + "name": "metrics-apm.profiling-0.5.0-apm_user_agent" + } + }, + { + "pipeline": { + "name": "metrics-apm.profiling-0.5.0-apm_user_geo" + } + }, + { + "pipeline": { + "name": "metrics-apm.profiling-0.5.0-apm_remove_span_metadata" + } + }, + { + "pipeline": { + "name": "metrics-apm.profiling-0.5.0-apm_error_grouping_name", + "if": "ctx.processor?.event == 'error'" + } + }, + { + "pipeline": { + "name": "metrics-apm.profiling-0.5.0-apm_metrics_dynamic_template", + "if": "ctx.processor?.event == 'metric'" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/profile_metrics/fields/base-fields.yml b/apmpackage/apm/data_stream/profile_metrics/fields/base-fields.yml new file mode 100644 index 00000000000..bf9b70e13de --- /dev/null +++ b/apmpackage/apm/data_stream/profile_metrics/fields/base-fields.yml @@ -0,0 +1,15 @@ +- name: '@timestamp' + type: date + description: Event timestamp. +- name: data_stream.type + type: constant_keyword + description: Data stream type. +- name: data_stream.dataset + type: constant_keyword + description: Data stream dataset. +- name: data_stream.namespace + type: constant_keyword + description: Data stream namespace. +- name: ecs.version + type: keyword + description: ECS version the event conforms to. diff --git a/apmpackage/apm/data_stream/profile_metrics/fields/ecs.yml b/apmpackage/apm/data_stream/profile_metrics/fields/ecs.yml new file mode 100644 index 00000000000..5a95c5dc42b --- /dev/null +++ b/apmpackage/apm/data_stream/profile_metrics/fields/ecs.yml @@ -0,0 +1,329 @@ +- name: agent + type: group + fields: + - name: ephemeral_id + type: keyword + description: | + The Ephemeral ID identifies a running process. + - name: name + type: keyword + description: | + Name of the agent used. + - name: version + type: keyword + description: | + Version of the agent used. +- name: client + type: group + fields: + - name: domain + type: keyword + description: | + Client domain. + ignore_above: 1024 + - name: ip + type: ip + description: | + IP address of the client of a recorded event. This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + - name: port + type: long + description: | + Port of the client. +- name: cloud + title: Cloud + type: group + description: | + Cloud metadata reported by agents + fields: + - name: account + type: group + fields: + - name: id + level: extended + type: keyword + description: Cloud account ID + ignore_above: 1024 + - name: name + level: extended + type: keyword + description: Cloud account name + ignore_above: 1024 + - name: availability_zone + level: extended + type: keyword + description: Cloud availability zone name + ignore_above: 1024 + - name: instance + type: group + fields: + - name: id + level: extended + type: keyword + description: Cloud instance/machine ID + ignore_above: 1024 + - name: name + level: extended + type: keyword + description: Cloud instance/machine name + ignore_above: 1024 + - name: machine + type: group + fields: + - name: type + level: extended + type: keyword + description: Cloud instance/machine type + ignore_above: 1024 + - name: project + type: group + fields: + - name: id + level: extended + type: keyword + description: Cloud project ID + ignore_above: 1024 + - name: name + level: extended + type: keyword + description: Cloud project name + ignore_above: 1024 + - name: provider + level: extended + type: keyword + description: Cloud provider name + ignore_above: 1024 + - name: region + level: extended + type: keyword + description: Cloud region name + ignore_above: 1024 + - name: service + type: group + fields: + - name: name + level: extended + type: keyword + description: | + Cloud service name, intended to distinguish services running on different platforms within a provider. + ignore_above: 1024 +- name: container + title: Container + type: group + description: | + Container fields are used for meta information about the specific container that is the source of information. These fields help correlate data based containers from any runtime. + fields: + - name: id + type: keyword + description: | + Unique container id. +- name: destination + title: Destination + type: group + description: |- + Destination fields describe details about the destination of a packet/event. + Destination fields are usually populated in conjunction with source fields. + fields: + - name: address + level: extended + type: keyword + description: Some event destination addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. + ignore_above: 1024 + - name: ip + level: core + type: ip + description: IP addess of the destination. Can be one of multiple IPv4 or IPv6 addresses. + - name: port + level: core + type: long + format: string + description: Port of the destination. +- name: host + type: group + description: | + Optional host fields. + fields: + - name: architecture + type: keyword + description: | + The architecture of the host the event was recorded on. + - name: hostname + type: keyword + description: | + The hostname of the host the event was recorded on. + - name: ip + type: ip + description: | + IP of the host that records the event. + - name: name + type: keyword + description: | + Name of the host the event was recorded on. It can contain same information as host.hostname or a name specified by the user. + - name: os + title: Operating System + type: group + description: | + The OS fields contain information about the operating system. + fields: + - name: platform + type: keyword + description: | + The platform of the host the event was recorded on. +- name: labels + type: object + description: | + A flat mapping of user-defined labels with string, boolean or number values. + dynamic: true + object_type_params: + - object_type: keyword + - object_type: boolean + - object_type: scaled_float + scaling_factor: 1000000 +- name: observer + type: group + fields: + - name: hostname + type: keyword + description: | + Hostname of the APM Server. + - name: type + type: keyword + description: | + The type will be set to `apm-server`. + - name: version + type: keyword + description: | + APM Server version. +- name: process + type: group + description: | + Information pertaining to the running process where the data was collected + fields: + - name: args + level: extended + type: keyword + description: | + Process arguments. May be filtered to protect sensitive information. + - name: pid + type: long + description: | + Numeric process ID of the service process. + - name: ppid + type: long + description: | + Numeric ID of the service's parent process. + - name: title + type: keyword + description: | + Service process title. +- name: service + type: group + description: | + Service fields. + fields: + - name: name + type: keyword + description: | + Immutable name of the service emitting this event. + - name: node + type: group + fields: + - name: name + type: keyword + description: | + Unique meaningful name of the service node. + - name: version + type: keyword + description: | + Version of the service emitting this event. +- name: source + type: group + fields: + - name: domain + type: keyword + description: | + Source domain. + ignore_above: 1024 + - name: ip + type: ip + description: | + IP address of the source of a recorded event. This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + - name: port + type: long + description: | + Port of the source. +- name: user + type: group + fields: + - name: email + type: keyword + description: | + Email of the logged in user. + - name: id + type: keyword + description: | + Identifier of the logged in user. + - name: name + type: keyword + description: | + The username of the logged in user. +- name: user_agent + title: User agent + type: group + description: | + The user_agent fields normally come from a browser request. They often show up in web service logs coming from the parsed user agent string. + fields: + - name: device + title: Device + type: group + description: | + Information concerning the device. + fields: + - name: name + type: keyword + description: | + Name of the device. + - name: name + type: keyword + description: | + Name of the user agent. + - name: original + type: keyword + description: | + Unparsed version of the user_agent. + multi_fields: + - name: text + type: text + - name: os + title: Operating System + type: group + description: | + The OS fields contain information about the operating system. + fields: + - name: family + type: keyword + description: | + OS family (such as redhat, debian, freebsd, windows). + - name: full + type: keyword + description: | + Operating system name, including the version or code name. + - name: kernel + type: keyword + description: | + Operating system kernel version as a raw string. + - name: name + type: keyword + description: | + Operating system name, without the version. + - name: platform + type: keyword + description: | + Operating system platform (such centos, ubuntu, windows). + - name: version + type: keyword + description: | + Operating system version as a raw string. + - name: version + type: keyword + description: | + Version of the user agent. diff --git a/apmpackage/apm/data_stream/profile_metrics/fields/fields.yml b/apmpackage/apm/data_stream/profile_metrics/fields/fields.yml new file mode 100644 index 00000000000..fd308cfa855 --- /dev/null +++ b/apmpackage/apm/data_stream/profile_metrics/fields/fields.yml @@ -0,0 +1,242 @@ +- name: kubernetes + title: Kubernetes + type: group + description: | + Kubernetes metadata reported by agents + fields: + - name: namespace + type: keyword + description: | + Kubernetes namespace + - name: node + type: group + fields: + - name: name + type: keyword + description: | + Kubernetes node name + - name: pod + type: group + fields: + - name: name + type: keyword + description: | + Kubernetes pod name + - name: uid + type: keyword + description: | + Kubernetes Pod UID +- name: network + type: group + description: | + Optional network fields + fields: + - name: carrier + type: group + description: | + Network operator + fields: + - name: icc + type: keyword + description: | + ISO country code, eg. US + - name: mcc + type: keyword + description: | + Mobile country code + - name: mnc + type: keyword + description: | + Mobile network code + - name: name + type: keyword + description: | + Carrier name, eg. Vodafone, T-Mobile, etc. + - name: connection + type: group + description: | + Network connection details + fields: + - name: subtype + type: keyword + description: | + Detailed network connection sub-type, e.g. "LTE", "CDMA" + - name: type + type: keyword + description: | + Network connection type, eg. "wifi", "cell" +- name: observer + type: group + fields: + - name: ephemeral_id + type: keyword + description: | + Ephemeral identifier of the APM Server. + - name: id + type: keyword + description: | + Unique identifier of the APM Server. + - name: listening + type: keyword + description: | + Address the server is listening on. + - name: version_major + type: byte + description: | + Major version number of the observer +- name: processor.event + type: keyword + description: Processor event. +- name: processor.name + type: keyword + description: Processor name. +- name: profile + type: group + fields: + - name: alloc_objects + type: group + fields: + - name: count + type: long + description: | + Number of objects allocated since the process started. + - name: alloc_space + type: group + fields: + - name: bytes + type: long + description: | + Amount of memory allocated, in bytes, since the process started. + - name: cpu + type: group + fields: + - name: ns + type: long + description: | + Amount of CPU time profiled, in nanoseconds. + unit: nanos + - name: duration + type: long + description: | + Duration of the profile, in nanoseconds. All samples within a profile will have the same duration. To aggregate durations, you should first group by the profile ID. + unit: nanos + - name: id + type: keyword + description: | + Unique ID for the profile. All samples within a profile will have the same profile ID. + - name: inuse_objects + type: group + fields: + - name: count + type: long + description: | + Number of objects allocated and currently in use. + - name: inuse_space + type: group + fields: + - name: bytes + type: long + description: | + Amount of memory allocated, in bytes, and currently in use. + - name: samples + type: group + fields: + - name: count + type: long + description: | + Number of profile samples for the profiling period. + - name: stack + type: group + fields: + - name: filename + type: keyword + description: | + Source code filename for a stack frame. + - name: function + type: keyword + description: | + Function name for a stack frame. + - name: id + type: keyword + description: | + Unique ID for a stack frame in the context of its callers. + - name: line + type: long + description: | + Source code line number for a stack frame. + - name: top + type: group + fields: + - name: filename + type: keyword + description: | + Source code filename for the top stack frame. + - name: function + type: keyword + description: | + Function name for the top stack frame. + - name: id + type: keyword + description: | + Unique ID for the top stack frame in the context of its callers. + - name: line + type: long + description: | + Source code line number for the top stack frame. + - name: wall + type: group + fields: + - name: us + type: long + description: | + Amount of wall time profiled, in microseconds. + unit: micros +- name: service + type: group + description: | + Service fields. + fields: + - name: environment + type: keyword + description: | + Service environment. + - name: framework + type: group + fields: + - name: name + type: keyword + description: | + Name of the framework used. + - name: version + type: keyword + description: | + Version of the framework used. + - name: language + type: group + fields: + - name: name + type: keyword + description: | + Name of the programming language used. + - name: version + type: keyword + description: | + Version of the programming language used. + - name: runtime + type: group + fields: + - name: name + type: keyword + description: | + Name of the runtime used. + - name: version + type: keyword + description: | + Version of the runtime used. +- name: timestamp + type: group + fields: + - name: us + type: long + description: | + Timestamp of the event in microseconds since Unix epoch. diff --git a/apmpackage/apm/data_stream/profile_metrics/manifest.yml b/apmpackage/apm/data_stream/profile_metrics/manifest.yml new file mode 100644 index 00000000000..121e328ca14 --- /dev/null +++ b/apmpackage/apm/data_stream/profile_metrics/manifest.yml @@ -0,0 +1,10 @@ +title: APM profiles +type: metrics +dataset: apm.profiling +ilm_policy: metrics-apm.profile_metrics-default_policy +elasticsearch: + index_template: + mappings: + # Profile metrics currently must be dynamically + # mapped, as pprof metric names may be customised. + dynamic: true diff --git a/apmpackage/apm/data_stream/sampled_traces/elasticsearch/ilm/default_policy.json b/apmpackage/apm/data_stream/sampled_traces/elasticsearch/ilm/default_policy.json new file mode 100644 index 00000000000..2272bb61436 --- /dev/null +++ b/apmpackage/apm/data_stream/sampled_traces/elasticsearch/ilm/default_policy.json @@ -0,0 +1,19 @@ +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_age": "1h" + } + } + }, + "delete": { + "min_age": "1h", + "actions": { + "delete": {} + } + } + } + } +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/sampled_traces/elasticsearch/ingest_pipeline/default.json b/apmpackage/apm/data_stream/sampled_traces/elasticsearch/ingest_pipeline/default.json new file mode 100644 index 00000000000..ac07d8ee66e --- /dev/null +++ b/apmpackage/apm/data_stream/sampled_traces/elasticsearch/ingest_pipeline/default.json @@ -0,0 +1,11 @@ +{ + "description": "Ingest pipeline for sampled trace documents", + "processors": [ + { + "set": { + "field": "event.ingested", + "value": "{{_ingest.timestamp}}" + } + } + ] +} diff --git a/apmpackage/apm/data_stream/sampled_traces/fields/base-fields.yml b/apmpackage/apm/data_stream/sampled_traces/fields/base-fields.yml new file mode 100644 index 00000000000..bf9b70e13de --- /dev/null +++ b/apmpackage/apm/data_stream/sampled_traces/fields/base-fields.yml @@ -0,0 +1,15 @@ +- name: '@timestamp' + type: date + description: Event timestamp. +- name: data_stream.type + type: constant_keyword + description: Data stream type. +- name: data_stream.dataset + type: constant_keyword + description: Data stream dataset. +- name: data_stream.namespace + type: constant_keyword + description: Data stream namespace. +- name: ecs.version + type: keyword + description: ECS version the event conforms to. diff --git a/apmpackage/apm/data_stream/sampled_traces/fields/ecs.yml b/apmpackage/apm/data_stream/sampled_traces/fields/ecs.yml new file mode 100644 index 00000000000..690ea7daf4a --- /dev/null +++ b/apmpackage/apm/data_stream/sampled_traces/fields/ecs.yml @@ -0,0 +1,8 @@ +- name: event.ingested + type: date + description: | + Timestamp when an event arrived in the central data store. +- name: trace.id + type: keyword + description: | + The ID of the sampled trace. diff --git a/apmpackage/apm/data_stream/sampled_traces/fields/fields.yml b/apmpackage/apm/data_stream/sampled_traces/fields/fields.yml new file mode 100644 index 00000000000..370b8752662 --- /dev/null +++ b/apmpackage/apm/data_stream/sampled_traces/fields/fields.yml @@ -0,0 +1,6 @@ +# When changing fields or ILM policy, make sure to update +# x-pack/apm-server/sampling/pubsub/datastream.go. +- name: observer.id + type: keyword + description: | + The ID of the APM Server that indexed the sampled trace ID. diff --git a/apmpackage/apm/data_stream/sampled_traces/manifest.yml b/apmpackage/apm/data_stream/sampled_traces/manifest.yml new file mode 100644 index 00000000000..27d9752c8a5 --- /dev/null +++ b/apmpackage/apm/data_stream/sampled_traces/manifest.yml @@ -0,0 +1,15 @@ +title: APM tail-sampled traces +type: traces +dataset: apm.sampled +ilm_policy: traces-apm.sampled-default_policy +elasticsearch: + index_template: + settings: + # Create a single shard per index, so we can use + # global checkpoints as a way of limiting search + # results. + number_of_shards: 1 + mappings: + # Sampled traces should have all fields strictly mapped; + # we are in full control of the field names. + dynamic: strict diff --git a/apmpackage/apm/data_stream/traces/elasticsearch/ilm/default_policy.json b/apmpackage/apm/data_stream/traces/elasticsearch/ilm/default_policy.json new file mode 100644 index 00000000000..97262a5ede9 --- /dev/null +++ b/apmpackage/apm/data_stream/traces/elasticsearch/ilm/default_policy.json @@ -0,0 +1,26 @@ +{ + "policy": { + "phases": { + "warm": { + "min_age": "30d", + "actions": { + "readonly": {}, + "set_priority": { + "priority": 50 + } + } + }, + "hot": { + "actions": { + "rollover": { + "max_age": "30d", + "max_size": "50gb" + }, + "set_priority": { + "priority": 100 + } + } + } + } + } +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/traces/elasticsearch/ingest_pipeline/apm_data_stream_migration.json b/apmpackage/apm/data_stream/traces/elasticsearch/ingest_pipeline/apm_data_stream_migration.json new file mode 100644 index 00000000000..24218404aaf --- /dev/null +++ b/apmpackage/apm/data_stream/traces/elasticsearch/ingest_pipeline/apm_data_stream_migration.json @@ -0,0 +1,30 @@ +{ + "description": "Migrate APM events to data streams", + "processors": [ + { + "script": { + "if": "ctx.processor?.event == 'span' || ctx.processor?.event == 'transaction'", + "source": "ctx.data_stream = [\"type\": \"traces\", \"dataset\": \"apm\", \"namespace\": \"migrated\"]\n" + } + }, + { + "script": { + "if": "ctx.processor?.event == 'error'", + "source": "ctx.data_stream = [\"type\": \"logs\", \"dataset\": \"apm.error\", \"namespace\": \"migrated\"]\n" + } + }, + { + "script": { + "if": "ctx.processor?.event == 'metric'", + "source": "String dataset;\nif (ctx[\"metricset.name\"] != \"app\") {\n dataset = \"apm.internal\";\n} else {\n String serviceName = ctx.service.name;\n serviceName = serviceName.toLowerCase();\n serviceName = /[\\\\\\/*?\"<>| ,#:-]/.matcher(serviceName).replaceAll('_');\n dataset = \"apm.app.\" + serviceName;\n}\nctx.data_stream = [\"type\": \"metrics\", \"dataset\": dataset, \"namespace\": \"migrated\"];\n" + } + }, + { + "set": { + "if": "ctx.data_stream != null", + "field": "_index", + "value": "{{data_stream.type}}-{{data_stream.dataset}}-{{data_stream.namespace}}" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/traces/elasticsearch/ingest_pipeline/apm_error_grouping_name.json b/apmpackage/apm/data_stream/traces/elasticsearch/ingest_pipeline/apm_error_grouping_name.json new file mode 100644 index 00000000000..0969e60d739 --- /dev/null +++ b/apmpackage/apm/data_stream/traces/elasticsearch/ingest_pipeline/apm_error_grouping_name.json @@ -0,0 +1,18 @@ +{ + "description": "Set error.grouping_name for APM error events", + "processors": [ + { + "script": { + "source": "ctx.error.grouping_name = ctx.error.exception[0].message", + "if": "ctx.error?.exception?.length != null && ctx.error?.exception?.length > 0" + } + }, + { + "set": { + "field": "error.grouping_name", + "value": "{{error.log.message}}", + "if": "ctx.error?.log?.message != null" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/traces/elasticsearch/ingest_pipeline/apm_ingest_timestamp.json b/apmpackage/apm/data_stream/traces/elasticsearch/ingest_pipeline/apm_ingest_timestamp.json new file mode 100644 index 00000000000..a4df9734715 --- /dev/null +++ b/apmpackage/apm/data_stream/traces/elasticsearch/ingest_pipeline/apm_ingest_timestamp.json @@ -0,0 +1,12 @@ +{ + "description": "Add an ingest timestamp for APM events", + "processors": [ + { + "set": { + "if": "ctx.processor?.event != 'span'", + "field": "event.ingested", + "value": "{{_ingest.timestamp}}" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/traces/elasticsearch/ingest_pipeline/apm_metrics_dynamic_template.json b/apmpackage/apm/data_stream/traces/elasticsearch/ingest_pipeline/apm_metrics_dynamic_template.json new file mode 100644 index 00000000000..ec15aecf007 --- /dev/null +++ b/apmpackage/apm/data_stream/traces/elasticsearch/ingest_pipeline/apm_metrics_dynamic_template.json @@ -0,0 +1,11 @@ +{ + "description": "Set dynamic_templates for application metrics", + "processors": [ + { + "script": { + "if": "ctx._metric_descriptions != null", + "source": "Map dynamic_templates = new HashMap();\nfor (entry in ctx._metric_descriptions.entrySet()) {\n String name = entry.getKey();\n Map description = entry.getValue();\n String metric_type = description.type;\n if (metric_type == \"histogram\") {\n dynamic_templates[name] = \"histogram\";\n }\n}\nctx._dynamic_templates = dynamic_templates;\nctx.remove(\"_metric_descriptions\");\n" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/traces/elasticsearch/ingest_pipeline/apm_remove_span_metadata.json b/apmpackage/apm/data_stream/traces/elasticsearch/ingest_pipeline/apm_remove_span_metadata.json new file mode 100644 index 00000000000..52e108a3472 --- /dev/null +++ b/apmpackage/apm/data_stream/traces/elasticsearch/ingest_pipeline/apm_remove_span_metadata.json @@ -0,0 +1,25 @@ +{ + "description": "Removes metadata fields available already on the parent transaction, to save storage", + "processors": [ + { + "remove": { + "if": "ctx.processor?.event == 'span'", + "field": [ + "host", + "process", + "user", + "user_agent", + "container", + "kubernetes", + "service.node", + "service.version", + "service.language", + "service.runtime", + "service.framework" + ], + "ignore_missing": true, + "ignore_failure": true + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/traces/elasticsearch/ingest_pipeline/apm_user_agent.json b/apmpackage/apm/data_stream/traces/elasticsearch/ingest_pipeline/apm_user_agent.json new file mode 100644 index 00000000000..bdab7d4bbac --- /dev/null +++ b/apmpackage/apm/data_stream/traces/elasticsearch/ingest_pipeline/apm_user_agent.json @@ -0,0 +1,13 @@ +{ + "description": "Add user agent information for APM events", + "processors": [ + { + "user_agent": { + "field": "user_agent.original", + "target_field": "user_agent", + "ignore_missing": true, + "ignore_failure": true + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/traces/elasticsearch/ingest_pipeline/apm_user_geo.json b/apmpackage/apm/data_stream/traces/elasticsearch/ingest_pipeline/apm_user_geo.json new file mode 100644 index 00000000000..4f6ae66105b --- /dev/null +++ b/apmpackage/apm/data_stream/traces/elasticsearch/ingest_pipeline/apm_user_geo.json @@ -0,0 +1,22 @@ +{ + "description": "Add user geo information for APM events", + "processors": [ + { + "geoip": { + "database_file": "GeoLite2-City.mmdb", + "field": "client.ip", + "target_field": "client.geo", + "ignore_missing": true, + "on_failure": [ + { + "remove": { + "field": "client.ip", + "ignore_missing": true, + "ignore_failure": true + } + } + ] + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/traces/elasticsearch/ingest_pipeline/default.json b/apmpackage/apm/data_stream/traces/elasticsearch/ingest_pipeline/default.json new file mode 100644 index 00000000000..ba4514be78c --- /dev/null +++ b/apmpackage/apm/data_stream/traces/elasticsearch/ingest_pipeline/default.json @@ -0,0 +1,37 @@ +{ + "description": "Default enrichment for APM events", + "processors": [ + { + "pipeline": { + "name": "traces-apm-0.5.0-apm_ingest_timestamp" + } + }, + { + "pipeline": { + "name": "traces-apm-0.5.0-apm_user_agent" + } + }, + { + "pipeline": { + "name": "traces-apm-0.5.0-apm_user_geo" + } + }, + { + "pipeline": { + "name": "traces-apm-0.5.0-apm_remove_span_metadata" + } + }, + { + "pipeline": { + "name": "traces-apm-0.5.0-apm_error_grouping_name", + "if": "ctx.processor?.event == 'error'" + } + }, + { + "pipeline": { + "name": "traces-apm-0.5.0-apm_metrics_dynamic_template", + "if": "ctx.processor?.event == 'metric'" + } + } + ] +} \ No newline at end of file diff --git a/apmpackage/apm/data_stream/traces/fields/base-fields.yml b/apmpackage/apm/data_stream/traces/fields/base-fields.yml new file mode 100644 index 00000000000..bf9b70e13de --- /dev/null +++ b/apmpackage/apm/data_stream/traces/fields/base-fields.yml @@ -0,0 +1,15 @@ +- name: '@timestamp' + type: date + description: Event timestamp. +- name: data_stream.type + type: constant_keyword + description: Data stream type. +- name: data_stream.dataset + type: constant_keyword + description: Data stream dataset. +- name: data_stream.namespace + type: constant_keyword + description: Data stream namespace. +- name: ecs.version + type: keyword + description: ECS version the event conforms to. diff --git a/apmpackage/apm/data_stream/traces/fields/ecs.yml b/apmpackage/apm/data_stream/traces/fields/ecs.yml new file mode 100644 index 00000000000..efec19378b5 --- /dev/null +++ b/apmpackage/apm/data_stream/traces/fields/ecs.yml @@ -0,0 +1,421 @@ +- name: agent + type: group + fields: + - name: ephemeral_id + type: keyword + description: | + The Ephemeral ID identifies a running process. + - name: name + type: keyword + description: | + Name of the agent used. + - name: version + type: keyword + description: | + Version of the agent used. +- name: client + type: group + fields: + - name: domain + type: keyword + description: | + Client domain. + ignore_above: 1024 + - name: ip + type: ip + description: | + IP address of the client of a recorded event. This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + - name: port + type: long + description: | + Port of the client. +- name: cloud + title: Cloud + type: group + description: | + Cloud metadata reported by agents + fields: + - name: account + type: group + fields: + - name: id + level: extended + type: keyword + description: Cloud account ID + ignore_above: 1024 + - name: name + level: extended + type: keyword + description: Cloud account name + ignore_above: 1024 + - name: availability_zone + level: extended + type: keyword + description: Cloud availability zone name + ignore_above: 1024 + - name: instance + type: group + fields: + - name: id + level: extended + type: keyword + description: Cloud instance/machine ID + ignore_above: 1024 + - name: name + level: extended + type: keyword + description: Cloud instance/machine name + ignore_above: 1024 + - name: machine + type: group + fields: + - name: type + level: extended + type: keyword + description: Cloud instance/machine type + ignore_above: 1024 + - name: project + type: group + fields: + - name: id + level: extended + type: keyword + description: Cloud project ID + ignore_above: 1024 + - name: name + level: extended + type: keyword + description: Cloud project name + ignore_above: 1024 + - name: provider + level: extended + type: keyword + description: Cloud provider name + ignore_above: 1024 + - name: region + level: extended + type: keyword + description: Cloud region name + ignore_above: 1024 + - name: service + type: group + fields: + - name: name + level: extended + type: keyword + description: | + Cloud service name, intended to distinguish services running on different platforms within a provider. + ignore_above: 1024 +- name: container + title: Container + type: group + description: | + Container fields are used for meta information about the specific container that is the source of information. These fields help correlate data based containers from any runtime. + fields: + - name: id + type: keyword + description: | + Unique container id. +- name: destination + title: Destination + type: group + description: |- + Destination fields describe details about the destination of a packet/event. + Destination fields are usually populated in conjunction with source fields. + fields: + - name: address + level: extended + type: keyword + description: Some event destination addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. + ignore_above: 1024 + - name: ip + level: core + type: ip + description: IP addess of the destination. Can be one of multiple IPv4 or IPv6 addresses. + - name: port + level: core + type: long + format: string + description: Port of the destination. +- name: event + type: group + fields: + - name: outcome + level: core + type: keyword + description: | + `event.outcome` simply denotes whether the event represents a success or a failure from the perspective of the entity that produced the event. + ignore_above: 1024 +- name: host + type: group + description: | + Optional host fields. + fields: + - name: architecture + type: keyword + description: | + The architecture of the host the event was recorded on. + - name: hostname + type: keyword + description: | + The hostname of the host the event was recorded on. + - name: ip + type: ip + description: | + IP of the host that records the event. + - name: name + type: keyword + description: | + Name of the host the event was recorded on. It can contain same information as host.hostname or a name specified by the user. + - name: os + title: Operating System + type: group + description: | + The OS fields contain information about the operating system. + fields: + - name: platform + type: keyword + description: | + The platform of the host the event was recorded on. +- name: http + type: group + fields: + - name: request + type: group + fields: + - name: method + type: keyword + description: | + The http method of the request leading to this event. + - name: referrer + type: keyword + description: Referrer for this HTTP request. + ignore_above: 1024 + - name: response + type: group + fields: + - name: status_code + type: long + description: | + The status code of the HTTP response. + - name: version + type: keyword + description: | + The http version of the request leading to this event. +- name: labels + type: object + description: | + A flat mapping of user-defined labels with string, boolean or number values. + dynamic: true + object_type_params: + - object_type: keyword + - object_type: boolean + - object_type: scaled_float + scaling_factor: 1000000 +- name: observer + type: group + fields: + - name: hostname + type: keyword + description: | + Hostname of the APM Server. + - name: type + type: keyword + description: | + The type will be set to `apm-server`. + - name: version + type: keyword + description: | + APM Server version. +- name: process + type: group + description: | + Information pertaining to the running process where the data was collected + fields: + - name: args + level: extended + type: keyword + description: | + Process arguments. May be filtered to protect sensitive information. + - name: pid + type: long + description: | + Numeric process ID of the service process. + - name: ppid + type: long + description: | + Numeric ID of the service's parent process. + - name: title + type: keyword + description: | + Service process title. +- name: service + type: group + description: | + Service fields. + fields: + - name: name + type: keyword + description: | + Immutable name of the service emitting this event. + - name: node + type: group + fields: + - name: name + type: keyword + description: | + Unique meaningful name of the service node. + - name: version + type: keyword + description: | + Version of the service emitting this event. +- name: source + type: group + fields: + - name: domain + type: keyword + description: | + Source domain. + ignore_above: 1024 + - name: ip + type: ip + description: | + IP address of the source of a recorded event. This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + - name: port + type: long + description: | + Port of the source. +- name: span + type: group + fields: + - name: id + type: keyword + description: | + The ID of the span stored as hex encoded string. +- name: trace + type: group + fields: + - name: id + type: keyword + description: | + The ID of the trace to which the event belongs to. +- name: transaction + type: group + fields: + - name: id + type: keyword + description: | + The transaction ID. +- name: url + type: group + description: | + A complete Url, with scheme, host and path. + fields: + - name: domain + type: keyword + description: | + The hostname of the request, e.g. "example.com". + - name: fragment + type: keyword + description: | + A fragment specifying a location in a web page , e.g. "top". + - name: full + type: keyword + description: | + The full, possibly agent-assembled URL of the request, e.g https://example.com:443/search?q=elasticsearch#top. + - name: path + type: keyword + description: | + The path of the request, e.g. "/search". + - name: port + type: long + description: | + The port of the request, e.g. 443. + - name: query + type: keyword + description: | + The query string of the request, e.g. "q=elasticsearch". + - name: scheme + type: keyword + description: | + The protocol of the request, e.g. "https:". +- name: user + type: group + fields: + - name: domain + type: keyword + description: | + Domain of the logged in user. + - name: email + type: keyword + description: | + Email of the logged in user. + - name: id + type: keyword + description: | + Identifier of the logged in user. + - name: name + type: keyword + description: | + The username of the logged in user. +- name: user_agent + title: User agent + type: group + description: | + The user_agent fields normally come from a browser request. They often show up in web service logs coming from the parsed user agent string. + fields: + - name: device + title: Device + type: group + description: | + Information concerning the device. + fields: + - name: name + type: keyword + description: | + Name of the device. + - name: name + type: keyword + description: | + Name of the user agent. + - name: original + type: keyword + description: | + Unparsed version of the user_agent. + multi_fields: + - name: text + type: text + - name: os + title: Operating System + type: group + description: | + The OS fields contain information about the operating system. + fields: + - name: family + type: keyword + description: | + OS family (such as redhat, debian, freebsd, windows). + - name: full + type: keyword + description: | + Operating system name, including the version or code name. + - name: kernel + type: keyword + description: | + Operating system kernel version as a raw string. + - name: name + type: keyword + description: | + Operating system name, without the version. + - name: platform + type: keyword + description: | + Operating system platform (such centos, ubuntu, windows). + - name: version + type: keyword + description: | + Operating system version as a raw string. + - name: version + type: keyword + description: | + Version of the user agent. diff --git a/apmpackage/apm/data_stream/traces/fields/fields.yml b/apmpackage/apm/data_stream/traces/fields/fields.yml new file mode 100644 index 00000000000..d981bc1d4fb --- /dev/null +++ b/apmpackage/apm/data_stream/traces/fields/fields.yml @@ -0,0 +1,377 @@ +- name: child + type: group + fields: + - name: id + type: keyword + description: | + The ID(s) of the child event(s). +- name: http + type: group + fields: + - name: request + type: group + fields: + - name: headers + type: object + description: | + The canonical headers of the monitored HTTP request. + - name: response + type: group + fields: + - name: finished + type: boolean + description: | + Used by the Node agent to indicate when in the response life cycle an error has occurred. + - name: headers + type: object + description: | + The canonical headers of the monitored HTTP response. +- name: kubernetes + title: Kubernetes + type: group + description: | + Kubernetes metadata reported by agents + fields: + - name: namespace + type: keyword + description: | + Kubernetes namespace + - name: node + type: group + fields: + - name: name + type: keyword + description: | + Kubernetes node name + - name: pod + type: group + fields: + - name: name + type: keyword + description: | + Kubernetes pod name + - name: uid + type: keyword + description: | + Kubernetes Pod UID +- name: network + type: group + description: | + Optional network fields + fields: + - name: carrier + type: group + description: | + Network operator + fields: + - name: icc + type: keyword + description: | + ISO country code, eg. US + - name: mcc + type: keyword + description: | + Mobile country code + - name: mnc + type: keyword + description: | + Mobile network code + - name: name + type: keyword + description: | + Carrier name, eg. Vodafone, T-Mobile, etc. + - name: connection + type: group + description: | + Network connection details + fields: + - name: subtype + type: keyword + description: | + Detailed network connection sub-type, e.g. "LTE", "CDMA" + - name: type + type: keyword + description: | + Network connection type, eg. "wifi", "cell" +- name: observer + type: group + fields: + - name: ephemeral_id + type: keyword + description: | + Ephemeral identifier of the APM Server. + - name: id + type: keyword + description: | + Unique identifier of the APM Server. + - name: listening + type: keyword + description: | + Address the server is listening on. + - name: version_major + type: byte + description: | + Major version number of the observer +- name: parent + type: group + fields: + - name: id + type: keyword + description: | + The ID of the parent event. +- name: processor.event + type: keyword + description: Processor event. +- name: processor.name + type: keyword + description: Processor name. +- name: service + type: group + description: | + Service fields. + fields: + - name: environment + type: keyword + description: | + Service environment. + - name: framework + type: group + fields: + - name: name + type: keyword + description: | + Name of the framework used. + - name: version + type: keyword + description: | + Version of the framework used. + - name: language + type: group + fields: + - name: name + type: keyword + description: | + Name of the programming language used. + - name: version + type: keyword + description: | + Version of the programming language used. + - name: runtime + type: group + fields: + - name: name + type: keyword + description: | + Name of the runtime used. + - name: version + type: keyword + description: | + Version of the runtime used. +- name: session + type: group + fields: + - name: id + type: keyword + description: | + The ID of the session to which the event belongs. + ignore_above: 1024 + - name: sequence + type: long + description: | + The sequence number of the event within the session to which the event belongs. +- name: span + type: group + fields: + - name: action + type: keyword + description: | + The specific kind of event within the sub-type represented by the span (e.g. query, connect) + - name: composite + type: group + fields: + - name: compression_strategy + type: keyword + description: | + The compression strategy that was used. + - name: count + type: long + description: | + Number of compressed spans the composite span represents. + - name: sum + type: group + fields: + - name: us + type: long + description: | + Sum of the durations of the compressed spans, in microseconds. + - name: db + type: group + fields: + - name: link + type: keyword + description: | + Database link. + - name: rows_affected + type: long + description: | + Number of rows affected by the database statement. + - name: destination + type: group + fields: + - name: service + type: group + description: Destination service context + fields: + - name: name + type: keyword + description: | + Identifier for the destination service (e.g. 'http://elastic.co', 'elasticsearch', 'rabbitmq') DEPRECATED: this field will be removed in a future release + - name: resource + type: keyword + description: | + Identifier for the destination service resource being operated on (e.g. 'http://elastic.co:80', 'elasticsearch', 'rabbitmq/queue_name') + - name: type + type: keyword + description: | + Type of the destination service (e.g. 'db', 'elasticsearch'). Should typically be the same as span.type. DEPRECATED: this field will be removed in a future release + - name: duration + type: group + fields: + - name: us + type: long + description: | + Duration of the span, in microseconds. + - name: message + type: group + fields: + - name: age + type: group + fields: + - name: ms + type: long + description: | + Age of a message in milliseconds. + - name: queue + type: group + fields: + - name: name + type: keyword + description: | + Name of the message queue or topic where the message is published or received. + - name: name + type: keyword + description: | + Generic designation of a span in the scope of a transaction. + - name: start + type: group + fields: + - name: us + type: long + description: | + Offset relative to the transaction's timestamp identifying the start of the span, in microseconds. + - name: subtype + type: keyword + description: | + A further sub-division of the type (e.g. postgresql, elasticsearch) + - name: sync + type: boolean + description: | + Indicates whether the span was executed synchronously or asynchronously. + - name: type + type: keyword + description: | + Keyword of specific relevance in the service's domain (eg: 'db.postgresql.query', 'template.erb', 'cache', etc). +- name: timestamp + type: group + fields: + - name: us + type: long + description: | + Timestamp of the event in microseconds since Unix epoch. +- name: transaction + type: group + fields: + - name: duration + type: group + fields: + - name: us + type: long + description: | + Total duration of this transaction, in microseconds. + - name: experience + type: group + fields: + - name: cls + type: scaled_float + description: The Cumulative Layout Shift metric + - name: fid + type: scaled_float + description: The First Input Delay metric + - name: longtask + type: group + description: Longtask duration/count metrics + fields: + - name: count + type: long + description: The total number of of longtasks + - name: max + type: scaled_float + description: The max longtask duration + - name: sum + type: scaled_float + description: The sum of longtask durations + - name: tbt + type: scaled_float + description: The Total Blocking Time metric + - name: marks + type: object + description: | + A user-defined mapping of groups of marks in milliseconds. + dynamic: true + - name: marks.*.* + type: object + description: | + A user-defined mapping of groups of marks in milliseconds. + dynamic: true + - name: message + type: group + fields: + - name: age + type: group + fields: + - name: ms + type: long + description: | + Age of a message in milliseconds. + - name: queue + type: group + fields: + - name: name + type: keyword + description: | + Name of the message queue or topic where the message is published or received. + - name: name + type: keyword + description: | + Generic designation of a transaction in the scope of a single service (eg. 'GET /users/:id'). + multi_fields: + - name: text + type: text + - name: result + type: keyword + description: | + The result of the transaction. HTTP status code for HTTP-related transactions. + - name: sampled + type: boolean + description: | + Transactions that are 'sampled' will include all available information. Transactions that are not sampled will not have spans or context. + - name: span_count + type: group + fields: + - name: dropped + type: long + description: The total amount of dropped spans for this transaction. + - name: type + type: keyword + description: | + Keyword of specific relevance in the service's domain (eg. 'request', 'backgroundjob', etc) diff --git a/apmpackage/apm/data_stream/traces/manifest.yml b/apmpackage/apm/data_stream/traces/manifest.yml new file mode 100644 index 00000000000..958db83fd00 --- /dev/null +++ b/apmpackage/apm/data_stream/traces/manifest.yml @@ -0,0 +1,12 @@ +title: APM traces +type: traces +dataset: apm +ilm_policy: traces-apm.traces-default_policy +elasticsearch: + index_template: + mappings: + # TODO(axw) investigate setting `dynamic: runtime`, so that fields are + # runtime searchable by default. That way users can, for example, perform + # ad-hoc searches on HTTP request headers without incurring storage cost + # for users who do not need this capability. + dynamic: false diff --git a/apmpackage/apm/docs/README.md b/apmpackage/apm/docs/README.md new file mode 100644 index 00000000000..9c1bd6cd8ce --- /dev/null +++ b/apmpackage/apm/docs/README.md @@ -0,0 +1,827 @@ +# APM Integration + +The APM integration installs Elasticsearch templates and ingest node pipelines for APM data. + +### Quick start + +Ready to jump in? Read the [APM quick start](https://ela.st/quick-start-apm). + +### How to use this integration + +Add the APM integration to an Elastic Agent policy to create an `apm` input. +Any Elastic Agents set up with this policy will run an APM Server binary locally. +Don't forget to configure the APM Server `host` if it needs to be accessed from outside, like when running in Docker. +Then, configure your APM agents to communicate with APM Server. + +If you have Real User Monitoring (RUM) enabled, you must run Elastic Agent centrally. +Otherwise, you can run it on edge machines by downloading and installing Elastic Agent +on the same machines that your instrumented services run. + +#### Data Streams + +When using the APM integration, apm events are indexed into data streams. Data stream names contain the event type, +service name, and a user-configurable namespace. + +There is no specific recommendation for what to use as a namespace; it is intentionally flexible. +You might use the environment, like `production`, `testing`, or `development`, +or you could namespace data by business unit. It is your choice. + +See [APM data streams](https://ela.st/apm-data-streams) for more information. + +## Compatibility and limitations + +The APM integration requires Kibana v7.12 and Elasticsearch with at least the basic license. +This version is experimental and has some limitations, listed bellow: + +- Sourcemaps need to be uploaded to Elasticsearch directly. +- You need to create specific API keys for sourcemaps and central configuration. +- You can't use an Elastic Agent enrolled before 7.12. +- Not all settings are supported. +- The `apm` templates, pipelines, and ILM settings that ship with this integration cannot be configured or changed with Fleet; +changes must be made with Elasticsearch APIs or Kibana's Stack Management. + +See [APM integration limitations](https://ela.st/apm-integration-limitations) for more information. + +IMPORTANT: If you run APM Server with Elastic Agent manually in standalone mode, you must install the APM integration before ingestion starts. + +## Traces + +Traces are comprised of [spans and transactions](https://www.elastic.co/guide/en/apm/get-started/current/apm-data-model.html). +Traces are written to `traces-apm.*` indices. + +**Exported Fields** + +| Field | Description | Type | ECS | +|---|---|---|:---:| +|@timestamp|Event timestamp.|date| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|agent.ephemeral\_id|The Ephemeral ID identifies a running process.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|agent.name|Name of the agent used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|agent.version|Version of the agent used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|child.id|The ID(s) of the child event(s).|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|client.domain|Client domain.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|client.ip|IP address of the client of a recorded event. This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address.|ip| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|client.port|Port of the client.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.account.id|Cloud account ID|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.account.name|Cloud account name|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.availability\_zone|Cloud availability zone name|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.instance.id|Cloud instance/machine ID|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.instance.name|Cloud instance/machine name|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.machine.type|Cloud instance/machine type|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.project.id|Cloud project ID|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.project.name|Cloud project name|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.provider|Cloud provider name|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.region|Cloud region name|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.service.name|Cloud service name, intended to distinguish services running on different platforms within a provider.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|container.id|Unique container id.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|data\_stream.dataset|Data stream dataset.|constant\_keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|data\_stream.namespace|Data stream namespace.|constant\_keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|data\_stream.type|Data stream type.|constant\_keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|destination.address|Some event destination addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the \`.address\` field. Then it should be duplicated to \`.ip\` or \`.domain\`, depending on which one it is.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|destination.ip|IP addess of the destination. Can be one of multiple IPv4 or IPv6 addresses.|ip| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|destination.port|Port of the destination.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|ecs.version|ECS version the event conforms to.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|event.outcome|\`event.outcome\` simply denotes whether the event represents a success or a failure from the perspective of the entity that produced the event.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|host.architecture|The architecture of the host the event was recorded on.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|host.hostname|The hostname of the host the event was recorded on.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|host.ip|IP of the host that records the event.|ip| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|host.name|Name of the host the event was recorded on. It can contain same information as host.hostname or a name specified by the user.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|host.os.platform|The platform of the host the event was recorded on.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|http.request.headers|The canonical headers of the monitored HTTP request.|object| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|http.request.method|The http method of the request leading to this event.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|http.request.referrer|Referrer for this HTTP request.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|http.response.finished|Used by the Node agent to indicate when in the response life cycle an error has occurred.|boolean| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|http.response.headers|The canonical headers of the monitored HTTP response.|object| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|http.response.status\_code|The status code of the HTTP response.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|http.version|The http version of the request leading to this event.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|kubernetes.namespace|Kubernetes namespace|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|kubernetes.node.name|Kubernetes node name|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|kubernetes.pod.name|Kubernetes pod name|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|kubernetes.pod.uid|Kubernetes Pod UID|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|labels|A flat mapping of user-defined labels with string, boolean or number values.|object| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|network.carrier.icc|ISO country code, eg. US|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|network.carrier.mcc|Mobile country code|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|network.carrier.mnc|Mobile network code|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|network.carrier.name|Carrier name, eg. Vodafone, T-Mobile, etc.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|network.connection.subtype|Detailed network connection sub-type, e.g. "LTE", "CDMA"|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|network.connection.type|Network connection type, eg. "wifi", "cell"|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|observer.ephemeral\_id|Ephemeral identifier of the APM Server.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|observer.hostname|Hostname of the APM Server.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|observer.id|Unique identifier of the APM Server.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|observer.listening|Address the server is listening on.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|observer.type|The type will be set to \`apm-server\`.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|observer.version|APM Server version.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|observer.version\_major|Major version number of the observer|byte| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|parent.id|The ID of the parent event.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|process.args|Process arguments. May be filtered to protect sensitive information.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|process.pid|Numeric process ID of the service process.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|process.ppid|Numeric ID of the service's parent process.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|process.title|Service process title.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|processor.event|Processor event.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|processor.name|Processor name.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|service.environment|Service environment.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|service.framework.name|Name of the framework used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|service.framework.version|Version of the framework used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|service.language.name|Name of the programming language used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|service.language.version|Version of the programming language used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|service.name|Immutable name of the service emitting this event.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|service.node.name|Unique meaningful name of the service node.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|service.runtime.name|Name of the runtime used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|service.runtime.version|Version of the runtime used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|service.version|Version of the service emitting this event.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|session.id|The ID of the session to which the event belongs.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|session.sequence|The sequence number of the event within the session to which the event belongs.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|source.domain|Source domain.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|source.ip|IP address of the source of a recorded event. This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address.|ip| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|source.port|Port of the source.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|span.action|The specific kind of event within the sub-type represented by the span (e.g. query, connect)|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|span.composite.compression\_strategy|The compression strategy that was used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|span.composite.count|Number of compressed spans the composite span represents.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|span.composite.sum.us|Sum of the durations of the compressed spans, in microseconds.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|span.db.link|Database link.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|span.db.rows\_affected|Number of rows affected by the database statement.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|span.destination.service.name|Identifier for the destination service (e.g. 'http://elastic.co', 'elasticsearch', 'rabbitmq') DEPRECATED: this field will be removed in a future release|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|span.destination.service.resource|Identifier for the destination service resource being operated on (e.g. 'http://elastic.co:80', 'elasticsearch', 'rabbitmq/queue\_name')|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|span.destination.service.type|Type of the destination service (e.g. 'db', 'elasticsearch'). Should typically be the same as span.type. DEPRECATED: this field will be removed in a future release|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|span.duration.us|Duration of the span, in microseconds.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|span.id|The ID of the span stored as hex encoded string.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|span.message.age.ms|Age of a message in milliseconds.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|span.message.queue.name|Name of the message queue or topic where the message is published or received.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|span.name|Generic designation of a span in the scope of a transaction.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|span.start.us|Offset relative to the transaction's timestamp identifying the start of the span, in microseconds.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|span.subtype|A further sub-division of the type (e.g. postgresql, elasticsearch)|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|span.sync|Indicates whether the span was executed synchronously or asynchronously.|boolean| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|span.type|Keyword of specific relevance in the service's domain (eg: 'db.postgresql.query', 'template.erb', 'cache', etc).|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|timestamp.us|Timestamp of the event in microseconds since Unix epoch.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|trace.id|The ID of the trace to which the event belongs to.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|transaction.duration.us|Total duration of this transaction, in microseconds.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|transaction.experience.cls|The Cumulative Layout Shift metric|scaled\_float| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|transaction.experience.fid|The First Input Delay metric|scaled\_float| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|transaction.experience.longtask.count|The total number of of longtasks|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|transaction.experience.longtask.max|The max longtask duration|scaled\_float| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|transaction.experience.longtask.sum|The sum of longtask durations|scaled\_float| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|transaction.experience.tbt|The Total Blocking Time metric|scaled\_float| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|transaction.id|The transaction ID.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|transaction.marks|A user-defined mapping of groups of marks in milliseconds.|object| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|transaction.marks.\*.\*|A user-defined mapping of groups of marks in milliseconds.|object| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|transaction.message.age.ms|Age of a message in milliseconds.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|transaction.message.queue.name|Name of the message queue or topic where the message is published or received.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|transaction.name|Generic designation of a transaction in the scope of a single service (eg. 'GET /users/:id').|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|transaction.result|The result of the transaction. HTTP status code for HTTP-related transactions.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|transaction.sampled|Transactions that are 'sampled' will include all available information. Transactions that are not sampled will not have spans or context.|boolean| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|transaction.span\_count.dropped|The total amount of dropped spans for this transaction.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|transaction.type|Keyword of specific relevance in the service's domain (eg. 'request', 'backgroundjob', etc)|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|url.domain|The hostname of the request, e.g. "example.com".|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|url.fragment|A fragment specifying a location in a web page , e.g. "top".|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|url.full|The full, possibly agent-assembled URL of the request, e.g https://example.com:443/search?q=elasticsearch#top.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|url.path|The path of the request, e.g. "/search".|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|url.port|The port of the request, e.g. 443.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|url.query|The query string of the request, e.g. "q=elasticsearch".|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|url.scheme|The protocol of the request, e.g. "https:".|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user.domain|Domain of the logged in user.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user.email|Email of the logged in user.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user.id|Identifier of the logged in user.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user.name|The username of the logged in user.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.device.name|Name of the device.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.name|Name of the user agent.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.original|Unparsed version of the user\_agent.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.os.family|OS family (such as redhat, debian, freebsd, windows).|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.os.full|Operating system name, including the version or code name.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.os.kernel|Operating system kernel version as a raw string.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.os.name|Operating system name, without the version.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.os.platform|Operating system platform (such centos, ubuntu, windows).|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.os.version|Operating system version as a raw string.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.version|Version of the user agent.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | + + +#### Examples + +```json +{ + "@timestamp": "2017-05-30T18:53:42.281Z", + "agent": { + "name": "elastic-node", + "version": "3.14.0" + }, + "container": { + "id": "container-id" + }, + "ecs": { + "version": "1.11.0" + }, + "event": { + "ingested": "2020-08-11T09:55:04.391451Z", + "outcome": "unknown" + }, + "host": { + "architecture": "x64", + "ip": "127.0.0.1", + "os": { + "platform": "darwin" + } + }, + "kubernetes": { + "namespace": "namespace1", + "pod": { + "name": "pod-name", + "uid": "pod-uid" + } + }, + "observer": { + "ephemeral_id": "f78f6762-2157-4322-95aa-aecd2f486c1a", + "hostname": "ix.lan", + "id": "80b79979-4a7d-450d-b2ce-75c589f7fffd", + "type": "apm-server", + "version": "8.0.0", + "version_major": 8 + }, + "process": { + "args": [ + "node", + "server.js" + ], + "pid": 1234, + "ppid": 6789, + "title": "node" + }, + "processor": { + "event": "transaction", + "name": "transaction" + }, + "service": { + "environment": "staging", + "framework": { + "name": "Express", + "version": "1.2.3" + }, + "language": { + "name": "ecmascript", + "version": "8" + }, + "name": "1234_service-12a3", + "node": { + "name": "container-id" + }, + "runtime": { + "name": "node", + "version": "8.0.0" + }, + "version": "5.1.3" + }, + "timestamp": { + "us": 1496170422281000 + }, + "trace": { + "id": "85925e55b43f4340aaaaaaaaaaaaaaaa" + }, + "transaction": { + "duration": { + "us": 13980 + }, + "id": "85925e55b43f4340", + "name": "GET /api/types", + "result": "failure", + "sampled": true, + "span_count": { + "started": 0 + }, + "type": "request" + }, + "user": { + "email": "foo@bar.com", + "id": "123user", + "name": "foo" + } +} +``` + +```json +{ + "@timestamp": "2017-05-30T18:53:27.154Z", + "agent": { + "name": "elastic-node", + "version": "3.14.0" + }, + "ecs": { + "version": "1.11.0" + }, + "event": { + "outcome": "unknown" + }, + "http": { + "request": { + "method": "GET" + }, + "response": { + "status_code": 200 + } + }, + "labels": { + "span_tag": "something" + }, + "observer": { + "ephemeral_id": "c0cea3b6-97d7-4e15-9e35-c868e7a3c869", + "hostname": "ix.lan", + "id": "a49b4a08-689a-4724-8050-8bd0ae043281", + "type": "apm-server", + "version": "8.0.0", + "version_major": 8 + }, + "parent": { + "id": "945254c567a5417e" + }, + "processor": { + "event": "span", + "name": "transaction" + }, + "service": { + "environment": "staging", + "name": "1234_service-12a3" + }, + "span": { + "action": "query", + "db": { + "instance": "customers", + "statement": "SELECT * FROM product_types WHERE user_id=?", + "type": "sql", + "user": { + "name": "readonly_user" + } + }, + "duration": { + "us": 3781 + }, + "http": { + "method": "GET", + "response": { + "status_code": 200 + } + }, + "http.url.original": "http://localhost:8000", + "id": "0aaaaaaaaaaaaaaa", + "name": "SELECT FROM product_types", + "stacktrace": [ + { + "abs_path": "net.js", + "context": { + "post": [ + " ins.currentTransaction = prev", + " return result", + "}" + ], + "pre": [ + " var trans = this.currentTransaction", + "" + ] + }, + "exclude_from_grouping": false, + "filename": "net.js", + "function": "onread", + "library_frame": true, + "line": { + "column": 4, + "context": "line3", + "number": 547 + }, + "module": "some module", + "vars": { + "key": "value" + } + }, + { + "exclude_from_grouping": false, + "filename": "my2file.js", + "line": { + "number": 10 + } + } + ], + "start": { + "us": 2830 + }, + "subtype": "postgresql", + "sync": false, + "type": "db" + }, + "timestamp": { + "us": 1496170407154000 + }, + "trace": { + "id": "945254c567a5417eaaaaaaaaaaaaaaaa" + }, + "transaction": { + "id": "945254c567a5417e" + }, + "url": { + "original": "http://localhost:8000" + } +} +``` + + +## Metrics + +Metrics include application-based metrics and some basic system metrics. +Metrics are written to `metrics-apm.app.*`, `metrics-apm.internal.*`, and `metrics-apm.profiling.*` indices. + +**Exported Fields** + +| Field | Description | Type | ECS | +|---|---|---|:---:| +|@timestamp|Event timestamp.|date| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|agent.ephemeral\_id|The Ephemeral ID identifies a running process.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|agent.name|Name of the agent used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|agent.version|Version of the agent used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|client.domain|Client domain.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|client.ip|IP address of the client of a recorded event. This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address.|ip| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|client.port|Port of the client.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.account.id|Cloud account ID|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.account.name|Cloud account name|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.availability\_zone|Cloud availability zone name|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.instance.id|Cloud instance/machine ID|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.instance.name|Cloud instance/machine name|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.machine.type|Cloud instance/machine type|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.project.id|Cloud project ID|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.project.name|Cloud project name|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.provider|Cloud provider name|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.region|Cloud region name|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.service.name|Cloud service name, intended to distinguish services running on different platforms within a provider.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|container.id|Unique container id.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|data\_stream.dataset|Data stream dataset.|constant\_keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|data\_stream.namespace|Data stream namespace.|constant\_keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|data\_stream.type|Data stream type.|constant\_keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|destination.address|Some event destination addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the \`.address\` field. Then it should be duplicated to \`.ip\` or \`.domain\`, depending on which one it is.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|destination.ip|IP addess of the destination. Can be one of multiple IPv4 or IPv6 addresses.|ip| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|destination.port|Port of the destination.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|ecs.version|ECS version the event conforms to.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|host.architecture|The architecture of the host the event was recorded on.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|host.hostname|The hostname of the host the event was recorded on.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|host.ip|IP of the host that records the event.|ip| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|host.name|Name of the host the event was recorded on. It can contain same information as host.hostname or a name specified by the user.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|host.os.platform|The platform of the host the event was recorded on.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|kubernetes.namespace|Kubernetes namespace|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|kubernetes.node.name|Kubernetes node name|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|kubernetes.pod.name|Kubernetes pod name|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|kubernetes.pod.uid|Kubernetes Pod UID|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|labels|A flat mapping of user-defined labels with string, boolean or number values.|object| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|metricset.name|Name of the set of metrics.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|metricset.period|Current data collection period for this event in milliseconds.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|network.carrier.icc|ISO country code, eg. US|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|network.carrier.mcc|Mobile country code|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|network.carrier.mnc|Mobile network code|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|network.carrier.name|Carrier name, eg. Vodafone, T-Mobile, etc.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|network.connection.subtype|Detailed network connection sub-type, e.g. "LTE", "CDMA"|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|network.connection.type|Network connection type, eg. "wifi", "cell"|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|observer.ephemeral\_id|Ephemeral identifier of the APM Server.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|observer.hostname|Hostname of the APM Server.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|observer.id|Unique identifier of the APM Server.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|observer.listening|Address the server is listening on.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|observer.type|The type will be set to \`apm-server\`.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|observer.version|APM Server version.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|observer.version\_major|Major version number of the observer|byte| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|process.args|Process arguments. May be filtered to protect sensitive information.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|process.pid|Numeric process ID of the service process.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|process.ppid|Numeric ID of the service's parent process.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|process.title|Service process title.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|processor.event|Processor event.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|processor.name|Processor name.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|service.environment|Service environment.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|service.framework.name|Name of the framework used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|service.framework.version|Version of the framework used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|service.language.name|Name of the programming language used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|service.language.version|Version of the programming language used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|service.name|Immutable name of the service emitting this event.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|service.node.name|Unique meaningful name of the service node.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|service.runtime.name|Name of the runtime used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|service.runtime.version|Version of the runtime used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|service.version|Version of the service emitting this event.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|source.domain|Source domain.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|source.ip|IP address of the source of a recorded event. This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address.|ip| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|source.port|Port of the source.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|system.cpu.total.norm.pct|The percentage of CPU time spent by the process since the last event. This value is normalized by the number of CPU cores and it ranges from 0 to 100%.|scaled\_float| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|system.memory.actual.free|Actual free memory in bytes. It is calculated based on the OS. On Linux it consists of the free memory plus caches and buffers. On OSX it is a sum of free memory and the inactive memory. On Windows, it is equal to \`system.memory.free\`.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|system.memory.total|Total memory.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|system.process.cgroup.cpu.cfs.period.us|CFS period in microseconds.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|system.process.cgroup.cpu.cfs.quota.us|CFS quota in microseconds.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|system.process.cgroup.cpu.id|ID for the current cgroup CPU.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|system.process.cgroup.cpu.stats.periods|Number of periods seen by the CPU.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|system.process.cgroup.cpu.stats.throttled.ns|Nanoseconds spent throttled seen by the CPU.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|system.process.cgroup.cpu.stats.throttled.periods|Number of throttled periods seen by the CPU.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|system.process.cgroup.cpuacct.id|ID for the current cgroup CPU.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|system.process.cgroup.cpuacct.total.ns|Total CPU time for the current cgroup CPU in nanoseconds.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|system.process.cgroup.memory.mem.limit.bytes|Memory limit for the current cgroup slice.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|system.process.cgroup.memory.mem.usage.bytes|Memory usage by the current cgroup slice.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|system.process.cpu.total.norm.pct|The percentage of CPU time spent by the process since the last event. This value is normalized by the number of CPU cores and it ranges from 0 to 100%.|scaled\_float| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|system.process.memory.rss.bytes|The Resident Set Size. The amount of memory the process occupied in main memory (RAM).|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|system.process.memory.size|The total virtual memory the process has.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|timeseries.instance|Time series instance ID|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|timestamp.us|Timestamp of the event in microseconds since Unix epoch.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|user.email|Email of the logged in user.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user.id|Identifier of the logged in user.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user.name|The username of the logged in user.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.device.name|Name of the device.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.name|Name of the user agent.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.original|Unparsed version of the user\_agent.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.os.family|OS family (such as redhat, debian, freebsd, windows).|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.os.full|Operating system name, including the version or code name.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.os.kernel|Operating system kernel version as a raw string.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.os.name|Operating system name, without the version.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.os.platform|Operating system platform (such centos, ubuntu, windows).|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.os.version|Operating system version as a raw string.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.version|Version of the user agent.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | + + +### Example + +```json +{ + "@timestamp": "2021-09-14T09:52:49.454Z", + "agent": { + "ephemeral_id": "29a27947-ed3a-4d87-b2e6-28f7a940ec2d", + "name": "java", + "version": "1.25.1-SNAPSHOT.UNKNOWN" + }, + "container": { + "id": "a47ed147c6ee269400f7ea4e296b3d01ec7398471bb2951907e4ea12f028bc69" + }, + "ecs": { + "version": "1.11.0" + }, + "event": { + "ingested": "2021-09-14T09:53:00.834276431Z" + }, + "host": { + "architecture": "amd64", + "ip": "35.240.52.17", + "os": { + "platform": "Linux" + } + }, + "jvm.gc.count": 2224, + "jvm.gc.time": 11511, + "kubernetes": { + "pod": { + "name": "opbeans-java-7c68f48dc6-n6mzc", + "uid": "b0cb3baa-4619-4b82-bef5-84cc87b5f853" + } + }, + "labels": { + "name": "Copy" + }, + "metricset.name": "app", + "observer": { + "ephemeral_id": "b7f21735-d283-4945-ab80-ce8df494a207", + "hostname": "3c5ac040e8f9", + "id": "6657d6e6-f3e8-4ce4-aa22-e7fe2ad77b5e", + "name": "instance-0000000002", + "type": "apm-server", + "version": "7.15.0", + "version_major": 7 + }, + "process": { + "pid": 8, + "ppid": 1, + "title": "/opt/java/openjdk/bin/java" + }, + "processor": { + "event": "metric", + "name": "metric" + }, + "service": { + "environment": "production", + "language": { + "name": "Java", + "version": "11.0.11" + }, + "name": "opbeans-java", + "node": { + "name": "a47ed147c6ee269400f7ea4e296b3d01ec7398471bb2951907e4ea12f028bc69" + }, + "runtime": { + "name": "Java", + "version": "11.0.11" + }, + "version": "2021-09-08 03:55:06" + } +} +``` + +## Logs + +Logs are application error events. +Logs are written to `logs-apm.error.*` indices. + +**Exported Fields** + +| Field | Description | Type | ECS | +|---|---|---|:---:| +|@timestamp|Event timestamp.|date| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|agent.ephemeral\_id|The Ephemeral ID identifies a running process.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|agent.name|Name of the agent used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|agent.version|Version of the agent used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|client.domain|Client domain.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|client.ip|IP address of the client of a recorded event. This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address.|ip| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|client.port|Port of the client.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.account.id|Cloud account ID|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.account.name|Cloud account name|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.availability\_zone|Cloud availability zone name|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.instance.id|Cloud instance/machine ID|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.instance.name|Cloud instance/machine name|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.machine.type|Cloud instance/machine type|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.project.id|Cloud project ID|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.project.name|Cloud project name|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.provider|Cloud provider name|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.region|Cloud region name|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|cloud.service.name|Cloud service name, intended to distinguish services running on different platforms within a provider.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|container.id|Unique container id.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|data\_stream.dataset|Data stream dataset.|constant\_keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|data\_stream.namespace|Data stream namespace.|constant\_keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|data\_stream.type|Data stream type.|constant\_keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|destination.address|Some event destination addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the \`.address\` field. Then it should be duplicated to \`.ip\` or \`.domain\`, depending on which one it is.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|destination.ip|IP addess of the destination. Can be one of multiple IPv4 or IPv6 addresses.|ip| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|destination.port|Port of the destination.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|ecs.version|ECS version the event conforms to.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|error.culprit|Function call which was the primary perpetrator of this event.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|error.exception.code|The error code set when the error happened, e.g. database error code.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|error.exception.handled|Indicator whether the error was caught somewhere in the code or not.|boolean| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|error.exception.message|The original error message.|text| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|error.exception.module|The module namespace of the original error.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|error.exception.type|The type of the original error, e.g. the Java exception class name.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|error.grouping\_key|Hash of select properties of the logged error for grouping purposes.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|error.grouping\_name|Name to associate with an error group. Errors belonging to the same group (same grouping\_key) may have differing values for grouping\_name. Consumers may choose one arbitrarily.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|error.id|The ID of the error.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|error.log.level|The severity of the record.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|error.log.logger\_name|The name of the logger instance used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|error.log.message|The additionally logged error message.|text| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|error.log.param\_message|A parametrized message. E.g. 'Could not connect to %s'. The property message is still required, and should be equal to the param\_message, but with placeholders replaced. In some situations the param\_message is used to group errors together.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|host.architecture|The architecture of the host the event was recorded on.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|host.hostname|The hostname of the host the event was recorded on.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|host.ip|IP of the host that records the event.|ip| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|host.name|Name of the host the event was recorded on. It can contain same information as host.hostname or a name specified by the user.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|host.os.platform|The platform of the host the event was recorded on.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|http.request.headers|The canonical headers of the monitored HTTP request.|object| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|http.request.method|The http method of the request leading to this event.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|http.request.referrer|Referrer for this HTTP request.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|http.response.finished|Used by the Node agent to indicate when in the response life cycle an error has occurred.|boolean| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|http.response.headers|The canonical headers of the monitored HTTP response.|object| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|http.response.status\_code|The status code of the HTTP response.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|http.version|The http version of the request leading to this event.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|kubernetes.namespace|Kubernetes namespace|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|kubernetes.node.name|Kubernetes node name|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|kubernetes.pod.name|Kubernetes pod name|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|kubernetes.pod.uid|Kubernetes Pod UID|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|labels|A flat mapping of user-defined labels with string, boolean or number values.|object| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|message|The original error message.|text| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|network.carrier.icc|ISO country code, eg. US|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|network.carrier.mcc|Mobile country code|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|network.carrier.mnc|Mobile network code|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|network.carrier.name|Carrier name, eg. Vodafone, T-Mobile, etc.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|network.connection.subtype|Detailed network connection sub-type, e.g. "LTE", "CDMA"|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|network.connection.type|Network connection type, eg. "wifi", "cell"|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|observer.ephemeral\_id|Ephemeral identifier of the APM Server.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|observer.hostname|Hostname of the APM Server.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|observer.id|Unique identifier of the APM Server.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|observer.listening|Address the server is listening on.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|observer.type|The type will be set to \`apm-server\`.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|observer.version|APM Server version.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|observer.version\_major|Major version number of the observer|byte| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|parent.id|The ID of the parent event.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|process.args|Process arguments. May be filtered to protect sensitive information.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|process.pid|Numeric process ID of the service process.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|process.ppid|Numeric ID of the service's parent process.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|process.title|Service process title.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|processor.event|Processor event.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|processor.name|Processor name.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|service.environment|Service environment.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|service.framework.name|Name of the framework used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|service.framework.version|Version of the framework used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|service.language.name|Name of the programming language used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|service.language.version|Version of the programming language used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|service.name|Immutable name of the service emitting this event.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|service.node.name|Unique meaningful name of the service node.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|service.runtime.name|Name of the runtime used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|service.runtime.version|Version of the runtime used.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|service.version|Version of the service emitting this event.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|source.domain|Source domain.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|source.ip|IP address of the source of a recorded event. This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address.|ip| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|source.port|Port of the source.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|timestamp.us|Timestamp of the event in microseconds since Unix epoch.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|trace.id|The ID of the trace to which the event belongs to.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|transaction.id|The transaction ID.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|transaction.name|Generic designation of a transaction in the scope of a single service (eg. 'GET /users/:id').|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|transaction.sampled|Transactions that are 'sampled' will include all available information. Transactions that are not sampled will not have spans or context.|boolean| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|transaction.type|Keyword of specific relevance in the service's domain (eg. 'request', 'backgroundjob', etc)|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png) | +|url.domain|The hostname of the request, e.g. "example.com".|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|url.fragment|A fragment specifying a location in a web page , e.g. "top".|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|url.full|The full, possibly agent-assembled URL of the request, e.g https://example.com:443/search?q=elasticsearch#top.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|url.path|The path of the request, e.g. "/search".|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|url.port|The port of the request, e.g. 443.|long| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|url.query|The query string of the request, e.g. "q=elasticsearch".|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|url.scheme|The protocol of the request, e.g. "https:".|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user.domain|Domain of the logged in user.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user.email|Email of the logged in user.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user.id|Identifier of the logged in user.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user.name|The username of the logged in user.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.device.name|Name of the device.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.name|Name of the user agent.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.original|Unparsed version of the user\_agent.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.os.family|OS family (such as redhat, debian, freebsd, windows).|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.os.full|Operating system name, including the version or code name.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.os.kernel|Operating system kernel version as a raw string.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.os.name|Operating system name, without the version.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.os.platform|Operating system platform (such centos, ubuntu, windows).|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.os.version|Operating system version as a raw string.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | +|user\_agent.version|Version of the user agent.|keyword| ![](https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png) | + + +### Example + +```json +{ + "@timestamp": "2017-05-09T15:04:05.999Z", + "agent": { + "name": "elastic-node", + "version": "3.14.0" + }, + "container": { + "id": "container-id" + }, + "ecs": { + "version": "1.11.0" + }, + "error": { + "grouping_key": "d6b3f958dfea98dc9ed2b57d5f0c48bb", + "grouping_name": "Cannot read property 'baz' of undefined", + "id": "0f0e9d67c1854d21a6f44673ed561ec8", + "log": { + "level": "custom log level", + "message": "Cannot read property 'baz' of undefined" + } + }, + "event": { + "ingested": "2020-04-22T14:52:08.436124Z" + }, + "host": { + "architecture": "x64", + "ip": "127.0.0.1", + "os": { + "platform": "darwin" + } + }, + "kubernetes": { + "namespace": "namespace1", + "pod": { + "name": "pod-name", + "uid": "pod-uid" + } + }, + "labels": { + "tag1": "one", + "tag2": 2 + }, + "message": "Cannot read property 'baz' of undefined", + "observer": { + "ephemeral_id": "f1838cde-80dd-4af5-b7ac-ffc2d3fccc9d", + "hostname": "ix.lan", + "id": "5d4dc8fe-cb14-47ee-b720-d6bf49f87ef0", + "type": "apm-server", + "version": "8.0.0", + "version_major": 8 + }, + "process": { + "args": [ + "node", + "server.js" + ], + "pid": 1234, + "ppid": 7788, + "title": "node" + }, + "processor": { + "event": "error", + "name": "error" + }, + "service": { + "environment": "staging", + "framework": { + "name": "Express", + "version": "1.2.3" + }, + "language": { + "name": "ecmascript", + "version": "8" + }, + "name": "1234_service-12a3", + "node": { + "name": "myservice-node" + }, + "runtime": { + "name": "node", + "version": "8.0.0" + }, + "version": "5.1.3" + }, + "timestamp": { + "us": 1494342245999000 + } +} +``` diff --git a/apmpackage/apm/img/apm-01-service-inventory.png b/apmpackage/apm/img/apm-01-service-inventory.png new file mode 100644 index 00000000000..18ba2c422ef Binary files /dev/null and b/apmpackage/apm/img/apm-01-service-inventory.png differ diff --git a/apmpackage/apm/img/apm-02-service-overview.png b/apmpackage/apm/img/apm-02-service-overview.png new file mode 100644 index 00000000000..4f9f5cf0c0d Binary files /dev/null and b/apmpackage/apm/img/apm-02-service-overview.png differ diff --git a/apmpackage/apm/img/apm-03-trace.png b/apmpackage/apm/img/apm-03-trace.png new file mode 100644 index 00000000000..dd3d680b925 Binary files /dev/null and b/apmpackage/apm/img/apm-03-trace.png differ diff --git a/apmpackage/apm/img/apm-04-service-map.png b/apmpackage/apm/img/apm-04-service-map.png new file mode 100644 index 00000000000..98bd8929cf1 Binary files /dev/null and b/apmpackage/apm/img/apm-04-service-map.png differ diff --git a/apmpackage/apm/img/logo_apm.svg b/apmpackage/apm/img/logo_apm.svg new file mode 100644 index 00000000000..d0b9786431b --- /dev/null +++ b/apmpackage/apm/img/logo_apm.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/apmpackage/apm/manifest.yml b/apmpackage/apm/manifest.yml new file mode 100644 index 00000000000..9c97d7c1294 --- /dev/null +++ b/apmpackage/apm/manifest.yml @@ -0,0 +1,142 @@ +format_version: 1.0.0 +name: apm +title: Elastic APM +version: 0.5.0 +license: basic +description: Ingest APM data +type: integration +categories: ["elastic_stack", "monitoring"] +release: ga +conditions: + kibana.version: "^7.16.0" +icons: + - src: /img/logo_apm.svg + title: APM Logo + size: 32x32 + type: image/svg+xml +screenshots: + - src: /img/apm-01-service-inventory.png + title: apm app service inventory + size: 1440x1025 + type: image/png + - src: /img/apm-02-service-overview.png + title: apm app service overview + size: 1440x2032 + type: image/png + - src: /img/apm-03-trace.png + title: apm app trace + size: 1440x1382 + type: image/png + - src: /img/apm-04-service-map.png + title: apm app service map + size: 1440x1025 + type: image/png +policy_templates: + - name: apmserver + title: Elastic APM Integration + description: Elastic APM Integration + multiple: false + inputs: + - type: apm + title: Collect application traces + description: Collect application traces + vars: + - name: host + type: text + default: localhost:8200 + - name: url + type: text + default: http://localhost:8200 + - name: secret_token + type: text + - name: api_key_enabled + type: bool + default: false + - name: enable_rum + type: bool + default: true + - name: anonymous_enabled + type: bool + default: true + - name: anonymous_allow_agent + type: text + multi: true + default: ['rum-js', 'js-base', 'iOS/swift'] + - name: anonymous_allow_service + type: text + multi: true + - name: anonymous_rate_limit_event_limit + type: integer + default: 10 + - name: anonymous_rate_limit_ip_limit + type: integer + default: 10000 + - name: default_service_environment + type: text + - name: rum_allow_origins + type: text + multi: true + default: ['"*"'] + - name: rum_allow_headers + type: text + multi: true + - name: rum_response_headers + type: yaml + - name: rum_library_pattern + type: text + default: '"node_modules|bower_components|~"' + - name: rum_exclude_from_grouping + type: text + default: '"^/webpack"' + - name: api_key_limit + type: integer + default: 100 + - name: max_event_bytes + type: integer + default: 307200 + - name: capture_personal_data + type: bool + default: true + - name: max_header_bytes + type: integer + default: 1048576 + - name: idle_timeout + type: text + default: "45s" + - name: read_timeout + type: text + default: "3600s" + - name: shutdown_timeout + type: text + default: "30s" + - name: write_timeout + type: text + default: "30s" + - name: max_connections + type: integer + default: 0 + - name: response_headers + type: yaml + - name: expvar_enabled + type: bool + default: false + - name: tls_enabled + type: bool + default: false + - name: tls_certificate + type: text + - name: tls_key + type: text + - name: tls_supported_protocols + type: text + multi: true + default: ["TLSv1.0", "TLSv1.1", "TLSv1.2"] + - name: tls_cipher_suites + type: text + multi: true + - name: tls_curve_types + type: text + multi: true + template_path: template.yml.hbs +owner: + github: elastic/apm-server diff --git a/apmpackage/cmd/gen-package/field.go b/apmpackage/cmd/gen-package/field.go new file mode 100644 index 00000000000..a32ef4480f3 --- /dev/null +++ b/apmpackage/cmd/gen-package/field.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package main + +type field struct { + Name string `yaml:"name,omitempty"` + Key string `yaml:"key,omitempty"` + Title string `yaml:"title,omitempty"` + Level string `yaml:"level,omitempty"` + Required *bool `yaml:"required,omitempty"` + Type string `yaml:"type,omitempty"` + Format string `yaml:"format,omitempty"` + Description string `yaml:"description,omitempty"` + + // Dynamic controls whether a field is dynamically mapped. + // + // Note: we intentionally omit "dynamic: false", as we set + // a default dynamic mapping value for each data stream and + // opt *in* to dynamically mapping where needed. + Dynamic bool `yaml:"dynamic,omitempty"` + + DynamicTemplate bool `yaml:"dynamic_template,omitempty"` + ObjectTypeParams interface{} `yaml:"object_type_params,omitempty"` + Release string `yaml:"release,omitempty"` + Alias string `yaml:"alias,omitempty"` + Path string `yaml:"path,omitempty"` + Footnote string `yaml:"footnote,omitempty"` + IgnoreAbove *int `yaml:"ignore_above,omitempty"` + MultiFields []multiFieldDefinition `yaml:"multi_fields,omitempty"` + Fields []field `yaml:"fields,omitempty"` + MetricType string `yaml:"metric_type,omitempty"` + Unit string `yaml:"unit,omitempty"` + + IsECS bool `yaml:"-"` +} + +type multiFieldDefinition struct { + Name string `yaml:"name,omitempty"` + Type string `yaml:"type,omitempty"` + Norms *bool `yaml:"norms,omitempty"` + DefaultField *bool `yaml:"default_field,omitempty"` +} diff --git a/apmpackage/cmd/gen-package/gendocs.go b/apmpackage/cmd/gen-package/gendocs.go new file mode 100644 index 00000000000..98a043d2c29 --- /dev/null +++ b/apmpackage/cmd/gen-package/gendocs.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package main + +import ( + "encoding/json" + "io/ioutil" + "os" + "path" + "strings" + "text/template" +) + +func escapeReplacer(s ...string) *strings.Replacer { + pairs := make([]string, len(s)*2) + for i, s := range s { + pairs[2*i] = s + pairs[2*i+1] = "\\" + s + } + return strings.NewReplacer(pairs...) +} + +var markdownReplacer = escapeReplacer("\\", "`", "*", "_") + +func generateDocs(inputFields map[string]fieldMap) { + addBaseFields(inputFields, "traces", "app_metrics", "error_logs") + data := docsData{ + Traces: flattenFields(inputFields["traces"]), + Metrics: flattenFields(inputFields["app_metrics"]), + Logs: flattenFields(inputFields["error_logs"]), + TransactionExample: loadExample("generated/transactions.json"), + SpanExample: loadExample("generated/spans.json"), + MetricsExample: loadExample("metricset.json"), + ErrorExample: loadExample("generated/errors.json"), + } + t := template.New(docsTemplateFilePath()) + tmpl, err := t.Funcs(map[string]interface{}{ + "Trim": strings.TrimSpace, + "EscapeMarkdown": markdownReplacer.Replace, + }).ParseFiles(docsTemplateFilePath()) + if err != nil { + panic(err) + } + path := docsFilePath() + file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + panic(err) + } + defer file.Close() + err = tmpl.ExecuteTemplate(file, "README.template.md", data) + if err != nil { + panic(err) + } +} + +type docsData struct { + Traces []field + Metrics []field + Logs []field + TransactionExample string + SpanExample string + MetricsExample string + ErrorExample string +} + +func addBaseFields(streamFields map[string]fieldMap, streams ...string) { + for _, stream := range streams { + fields := streamFields[stream] + for _, f := range loadFieldsFile(baseFieldsFilePath(stream)) { + f.IsECS = true + fields[f.Name] = fieldMapItem{field: f} + } + } +} + +func loadExample(file string) string { + in, err := ioutil.ReadFile(path.Join("docs/data/elasticsearch/", file)) + if err != nil { + panic(err) + } + var aux interface{} + err = json.Unmarshal(in, &aux) + if err != nil { + panic(err) + } + if slice, ok := aux.([]interface{}); ok { + aux = slice[0] + } + out, err := json.MarshalIndent(aux, "", " ") + if err != nil { + panic(err) + } + return string(out) +} diff --git a/apmpackage/cmd/gen-package/genfields.go b/apmpackage/cmd/gen-package/genfields.go new file mode 100644 index 00000000000..0d98971a0db --- /dev/null +++ b/apmpackage/cmd/gen-package/genfields.go @@ -0,0 +1,236 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package main + +import ( + "io/ioutil" + "log" + "net/http" + "path/filepath" + "sort" + + "gopkg.in/yaml.v2" + + "github.com/elastic/ecs/code/go/ecs" +) + +func generateFields() map[string]fieldMap { + + ecsFlatFields := loadECSFields() + + inputFieldsFiles := map[string]fieldMap{ + "error_logs": readFields("model/error/_meta/fields.yml"), + "internal_metrics": readFields("model/metricset/_meta/fields.yml", "x-pack/apm-server/fields/_meta/fields.yml"), + "profile_metrics": readFields("model/profile/_meta/fields.yml"), + "traces": readFields("model/transaction/_meta/fields.yml", "model/span/_meta/fields.yml"), + } + + appMetrics := readFields("model/metricset/_meta/fields.yml", "x-pack/apm-server/fields/_meta/fields.yml") + delete(appMetrics, "transaction") + delete(appMetrics, "span") + delete(appMetrics, "event") + inputFieldsFiles["app_metrics"] = appMetrics + + for streamType, fields := range inputFieldsFiles { + log.Printf("%s", streamType) + populateECSInfo(ecsFlatFields, fields) + for _, field := range fields { + log.Printf(" - %s (%s)", field.Name, field.Type) + } + ecsFields, nonECSFields := splitFields(fields) + + var topLevelECSFields, topLevelNonECSFields []field + for _, field := range ecsFields { + topLevelECSFields = append(topLevelECSFields, field.field) + } + for _, field := range nonECSFields { + topLevelNonECSFields = append(topLevelNonECSFields, field.field) + } + sortFields(topLevelECSFields) + sortFields(topLevelNonECSFields) + writeFields(streamType, "ecs.yml", topLevelECSFields) + writeFields(streamType, "fields.yml", topLevelNonECSFields) + } + return inputFieldsFiles +} + +func writeFields(streamType, filename string, data []field) { + if len(data) == 0 { + return + } + bytes, err := yaml.Marshal(data) + if err != nil { + panic(err) + } + err = ioutil.WriteFile(filepath.Join(fieldsPath(streamType), filename), bytes, 0644) + if err != nil { + panic(err) + } +} + +// populateECSInfo sets the IsECS property of each field. Group fields +// will have IsECS set only if all sub-fields have IsECS set. +func populateECSInfo(ecsFlatFields map[string]interface{}, inputFields fieldMap) { + var traverse func(path string, fields fieldMap) (ecsOnly bool) + traverse = func(path string, fields fieldMap) bool { + ecsOnly := true + for name, field := range fields { + fieldName := field.Name + if path != "" { + fieldName = path + "." + fieldName + } + if field.Type != "group" { + _, field.IsECS = ecsFlatFields[fieldName] + } else { + field.IsECS = traverse(fieldName, field.fields) + } + fields[name] = field + ecsOnly = ecsOnly && field.IsECS + } + return ecsOnly + } + traverse("", inputFields) +} + +// splitFields splits fields into ECS and non-ECS fieldMaps. +func splitFields(fields fieldMap) (ecsFields, nonECSFields fieldMap) { + ecsFields = make(fieldMap) + nonECSFields = make(fieldMap) + for name, field := range fields { + if field.IsECS { + ecsFields[name] = field + continue + } else if field.Type != "group" { + nonECSFields[name] = field + continue + } + subECSFields, subNonECSFields := splitFields(field.fields) + fieldCopy := field.field + fieldCopy.Fields = nil // recreated by update calls + if len(subECSFields) > 0 { + ecsFields[name] = fieldMapItem{field: fieldCopy, fields: subECSFields} + ecsFields.update(fieldCopy) + } + nonECSFields[name] = fieldMapItem{field: fieldCopy, fields: subNonECSFields} + nonECSFields.update(fieldCopy) + } + return ecsFields, nonECSFields +} + +func loadECSFields() map[string]interface{} { + url := "https://raw.githubusercontent.com/elastic/ecs/v" + ecs.Version + "/generated/ecs/ecs_flat.yml" + // TODO cache this to avoid fetching each time + resp, err := http.Get(url) + if err != nil { + panic(err) + } + defer resp.Body.Close() + var ret map[string]interface{} + err = yaml.NewDecoder(resp.Body).Decode(&ret) + if err != nil { + panic(err) + } + return ret +} + +// readFields reads fields from all of the specified files, +// merging them into a fieldMap. +func readFields(fileNames ...string) fieldMap { + fields := make(fieldMap) + for _, fname := range fileNames { + for _, key := range loadFieldsFile(fname) { + for _, field := range key.Fields { + fields.update(field) + } + } + } + return fields +} + +func flattenFields(fields fieldMap) []field { + var flattened []field + var traverse func(path string, fields fieldMap) + traverse = func(path string, fields fieldMap) { + for name, field := range fields { + full := path + if full != "" { + full += "." + } + full += name + field.Name = full + if field.Type == "group" { + traverse(full, field.fields) + } else { + flattened = append(flattened, field.field) + } + } + } + traverse("", fields) + sortFields(flattened) + return flattened +} + +type fieldMap map[string]fieldMapItem + +type fieldMapItem struct { + field + fields fieldMap +} + +func (m fieldMap) update(f field) { + if f.DynamicTemplate { + // We don't add dynamic_template "fields" to the + // integration package; they are manually defined + // in the data stream manifest. + return + } + + item := m[f.Name] + item.field = f + if item.fields == nil { + item.fields = make(fieldMap) + } + for _, f := range f.Fields { + item.fields.update(f) + } + // Update the Fields slice, in case of merges. + item.Fields = item.Fields[:0] + for _, f := range item.fields { + item.Fields = append(item.Fields, f.field) + } + sortFields(item.Fields) + m[f.Name] = item +} + +func loadFieldsFile(path string) []field { + fields, err := ioutil.ReadFile(path) + if err != nil { + panic(err) + } + var fs []field + if err := yaml.Unmarshal(fields, &fs); err != nil { + panic(err) + } + return fs +} + +func sortFields(fields []field) { + sort.Slice(fields, func(i, j int) bool { + return fields[i].Name < fields[j].Name + }) +} diff --git a/apmpackage/cmd/gen-package/genpipelines.go b/apmpackage/cmd/gen-package/genpipelines.go new file mode 100644 index 00000000000..2eed1a9307a --- /dev/null +++ b/apmpackage/cmd/gen-package/genpipelines.go @@ -0,0 +1,111 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package main + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/tidwall/gjson" + "github.com/tidwall/sjson" + + "github.com/elastic/apm-server/model" +) + +var streamMappings = map[string]string{ + "error_logs": "logs-" + model.ErrorsDataset, + "traces": "traces-" + model.TracesDataset, + "app_metrics": "metrics-" + model.AppMetricsDataset, + "internal_metrics": "metrics-" + model.InternalMetricsDataset, + "profile_metrics": "metrics-" + model.ProfilesDataset, +} + +type PipelineDef struct { + ID string `json:"id"` + Body json.RawMessage `json:"body"` +} + +func generatePipelines(version, dataStream string) error { + pipelines, err := os.Open("ingest/pipeline/definition.json") + if err != nil { + return err + } + defer pipelines.Close() + + bytes, err := ioutil.ReadAll(pipelines) + if err != nil { + return err + } + var definitions []PipelineDef + if err := json.Unmarshal(bytes, &definitions); err != nil { + return err + } + + if err := os.MkdirAll(pipelinesPath(dataStream), 0755); err != nil { + return err + } + + var apmPipelineBody json.RawMessage + for _, definition := range definitions { + if definition.ID == "apm" { + apmPipelineBody = definition.Body + continue + } + fName := filepath.Join(pipelinesPath(dataStream), definition.ID+".json") + if err := writeFilePipelineJSON(fName, definition.Body); err != nil { + return err + } + } + + pipelineNames := gjson.GetBytes(apmPipelineBody, "processors.#.pipeline.name") + if !pipelineNames.Exists() { + return errors.New("failed to locate pipeline processor names in 'apm' pipeline") + } + for i, pipelineNameResult := range pipelineNames.Array() { + // Update name to match the one generated by Fleet when installs the pipelines. + pipelineName := pipelineNameResult.Str + pipelineName = fmt.Sprintf("%s-%s-%s", streamMappings[dataStream], version, pipelineName) + out, err := sjson.SetBytes(apmPipelineBody, fmt.Sprintf("processors.%d.pipeline.name", i), pipelineName) + if err != nil { + return err + } + apmPipelineBody = out + } + fName := filepath.Join(pipelinesPath(dataStream), "default.json") + return writeFilePipelineJSON(fName, apmPipelineBody) +} + +func writeFilePipelineJSON(filename string, v []byte) error { + f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + defer f.Close() + + var indented bytes.Buffer + if err := json.Indent(&indented, v, "", " "); err != nil { + return err + } + _, err = indented.WriteTo(f) + return err +} diff --git a/apmpackage/cmd/gen-package/main.go b/apmpackage/cmd/gen-package/main.go new file mode 100644 index 00000000000..c2378cc748e --- /dev/null +++ b/apmpackage/cmd/gen-package/main.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package main + +import ( + "io/ioutil" + "log" + "os" + + "gopkg.in/yaml.v2" +) + +// Some data streams may not have a counterpart template +// in standalone apm-server, and so it does not make sense +// to maintain a separate fields.yml. +var handwritten = map[string]bool{ + "sampled_traces": true, +} + +func main() { + manifestData, err := ioutil.ReadFile(manifestFilePath()) + if err != nil { + log.Fatal(err) + } + var manifest struct { + Version string `yaml:"version"` + } + if err := yaml.Unmarshal(manifestData, &manifest); err != nil { + log.Fatal(err) + } + + clear() + inputFields := generateFields() + for dataStream := range inputFields { + if err := generatePipelines(manifest.Version, dataStream); err != nil { + log.Fatal(err) + } + } + // TODO(axw) rely on `elastic-package build` to build docs from a template, like in integrations. + generateDocs(inputFields) + log.Printf("Package fields and docs generated for version %s", manifest.Version) +} + +func clear() { + fileInfo, err := ioutil.ReadDir(dataStreamPath()) + if err != nil { + panic(err) + } + for _, f := range fileInfo { + if !f.IsDir() { + continue + } + name := f.Name() + if handwritten[name] { + continue + } + removeFile(ecsFilePath(name)) + removeFile(fieldsFilePath(name)) + removeDir(pipelinesPath(name)) + } + ioutil.WriteFile(docsFilePath(), nil, 0644) +} + +func removeFile(path string) { + if err := os.Remove(path); err != nil && !os.IsNotExist(err) { + log.Fatal(err) + } +} + +func removeDir(path string) { + if err := os.RemoveAll(path); err != nil && !os.IsNotExist(err) { + log.Fatal(err) + } +} diff --git a/apmpackage/cmd/gen-package/paths.go b/apmpackage/cmd/gen-package/paths.go new file mode 100644 index 00000000000..70b7b210102 --- /dev/null +++ b/apmpackage/cmd/gen-package/paths.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package main + +import "path/filepath" + +func manifestFilePath() string { + return filepath.Join("apmpackage", "apm", "manifest.yml") +} + +func docsTemplateFilePath() string { + return filepath.Join("apmpackage", "apm", "README.template.md") +} + +func docsFilePath() string { + return filepath.Join("apmpackage", "apm", "docs/README.md") +} + +func pipelinesPath(dataStream string) string { + return filepath.Join("apmpackage", "apm", "data_stream", dataStream, "elasticsearch", "ingest_pipeline") +} + +func dataStreamPath() string { + return filepath.Join("apmpackage", "apm", "data_stream") +} + +func fieldsPath(dataStream string) string { + return filepath.Join(dataStreamPath(), dataStream, "fields") +} + +func ecsFilePath(dataStream string) string { + return filepath.Join(fieldsPath(dataStream), "ecs.yml") +} + +func fieldsFilePath(dataStream string) string { + return filepath.Join(fieldsPath(dataStream), "fields.yml") +} + +func baseFieldsFilePath(dataStream string) string { + return filepath.Join(fieldsPath(dataStream), "base-fields.yml") +} diff --git a/approvaltest/approvals.go b/approvaltest/approvals.go index 2333c58085a..485a87bd73b 100644 --- a/approvaltest/approvals.go +++ b/approvaltest/approvals.go @@ -21,6 +21,7 @@ import ( "encoding/json" "fmt" "os" + "path/filepath" "testing" "github.com/google/go-cmp/cmp" @@ -128,6 +129,9 @@ func removeReceived(name string) { } func writeReceived(name string, received interface{}) { + if err := os.MkdirAll(filepath.Dir(name), 0755); err != nil { + panic(err) + } f, err := os.Create(name + ReceivedSuffix) if err != nil { panic(err) diff --git a/approvaltest/go.mod b/approvaltest/go.mod index daa218712f9..20ec1589e92 100644 --- a/approvaltest/go.mod +++ b/approvaltest/go.mod @@ -3,7 +3,8 @@ module github.com/elastic/apm-server/approvaltest go 1.15 require ( + github.com/fatih/color v1.12.0 github.com/google/go-cmp v0.5.2 - github.com/tidwall/gjson v1.6.0 + github.com/tidwall/gjson v1.6.5 github.com/tidwall/sjson v1.1.1 ) diff --git a/approvaltest/go.sum b/approvaltest/go.sum index 167870f8a82..3a22dd41789 100644 --- a/approvaltest/go.sum +++ b/approvaltest/go.sum @@ -1,12 +1,25 @@ +github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc= +github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/tidwall/gjson v1.6.0 h1:9VEQWz6LLMUsUl6PueE49ir4Ka6CzLymOAZDxpFsTDc= +github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= -github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= +github.com/tidwall/gjson v1.6.5 h1:P/K9r+1pt9AK54uap7HcoIp6T3a7AoMg3v18tUis+Cg= +github.com/tidwall/gjson v1.6.5/go.mod h1:zeFuBCIqD4sN/gmqBzZ4j7Jd6UcA2Fc56x7QFsv+8fI= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= +github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE= +github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.0.1 h1:WE4RBSZ1x6McVVC8S/Md+Qse8YUv6HRObAx6ke00NY8= github.com/tidwall/pretty v1.0.1/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.0.2 h1:Z7S3cePv9Jwm1KwS0513MRaoUe3S01WPbLNV40pwWZU= +github.com/tidwall/pretty v1.0.2/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/sjson v1.1.1 h1:7h1vk049Jnd5EH9NyzNiEuwYW4b5qgreBbqRC19AS3U= github.com/tidwall/sjson v1.1.1/go.mod h1:yvVuSnpEQv5cYIrO+AT6kw4QVfd5SDZoGIS7/5+fZFs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/beater/acker.go b/beater/acker.go new file mode 100644 index 00000000000..b06035a851c --- /dev/null +++ b/beater/acker.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package beater + +import ( + "context" + "sync" + "sync/atomic" + + "github.com/elastic/beats/v7/libbeat/beat" +) + +// waitPublishedAcker is a beat.ACKer which keeps track of the number of +// events published. waitPublishedAcker provides an interruptible Wait method +// that blocks until all clients are closed, and all published events at the +// time the clients are closed are acknowledged. +type waitPublishedAcker struct { + active int64 // atomic + + mu sync.Mutex + empty *sync.Cond +} + +// newWaitPublishedAcker returns a new waitPublishedAcker. +func newWaitPublishedAcker() *waitPublishedAcker { + acker := &waitPublishedAcker{} + acker.empty = sync.NewCond(&acker.mu) + return acker +} + +// AddEvent is called when an event has been published or dropped by the client, +// and increments a counter for published events. +func (w *waitPublishedAcker) AddEvent(event beat.Event, published bool) { + if published { + w.incref(1) + } +} + +// ACKEvents is called when published events have been acknowledged. +func (w *waitPublishedAcker) ACKEvents(n int) { + w.decref(int64(n)) +} + +// Open must be called exactly once before any new pipeline client is opened, +// incrementing the acker's reference count. +func (w *waitPublishedAcker) Open() { + w.incref(1) +} + +// Close is called when a pipeline client is closed, and decrements the +// acker's reference count. +// +// This must be called at most once for each call to Open. +func (w *waitPublishedAcker) Close() { + w.decref(1) +} + +func (w *waitPublishedAcker) incref(n int64) { + atomic.AddInt64(&w.active, 1) +} + +func (w *waitPublishedAcker) decref(n int64) { + if atomic.AddInt64(&w.active, int64(-n)) == 0 { + w.empty.Broadcast() + } +} + +// Wait waits for w to be closed and all previously published events to be +// acknowledged. +func (w *waitPublishedAcker) Wait(ctx context.Context) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + go func() { + <-ctx.Done() + w.empty.Broadcast() + }() + + w.mu.Lock() + defer w.mu.Unlock() + for atomic.LoadInt64(&w.active) != 0 && ctx.Err() == nil { + w.empty.Wait() + } + return ctx.Err() +} diff --git a/beater/api/asset/sourcemap/handler.go b/beater/api/asset/sourcemap/handler.go index e175d687a9d..0d627a9372d 100644 --- a/beater/api/asset/sourcemap/handler.go +++ b/beater/api/asset/sourcemap/handler.go @@ -18,6 +18,8 @@ package sourcemap import ( + "context" + "errors" "fmt" "io/ioutil" "net/http" @@ -27,8 +29,8 @@ import ( "github.com/elastic/beats/v7/libbeat/monitoring" + "github.com/elastic/apm-server/beater/auth" "github.com/elastic/apm-server/beater/request" - "github.com/elastic/apm-server/processor/asset" "github.com/elastic/apm-server/publish" "github.com/elastic/apm-server/utility" ) @@ -37,13 +39,21 @@ var ( // MonitoringMap holds a mapping for request.IDs to monitoring counters MonitoringMap = request.DefaultMonitoringMapForRegistry(registry) registry = monitoring.Default.NewRegistry("apm-server.sourcemap") + + decodingCount = monitoring.NewInt(registry, "decoding.count") + decodingError = monitoring.NewInt(registry, "decoding.errors") + validateCount = monitoring.NewInt(registry, "validation.count") + validateError = monitoring.NewInt(registry, "validation.errors") ) -// RequestDecoder is the type for a function that decodes sourcemap data from an http.Request. -type RequestDecoder func(req *http.Request) (map[string]interface{}, error) +// AddedNotifier is an interface for notifying of sourcemap additions. +// This is implemented by sourcemap.Store. +type AddedNotifier interface { + NotifyAdded(ctx context.Context, serviceName, serviceVersion, bundleFilepath string) +} // Handler returns a request.Handler for managing asset requests. -func Handler(dec RequestDecoder, processor asset.Processor, report publish.Reporter) request.Handler { +func Handler(report publish.Reporter, notifier AddedNotifier) request.Handler { return func(c *request.Context) { if c.Request.Method != "POST" { c.Result.SetDefault(request.IDResponseErrorsMethodNotAllowed) @@ -51,71 +61,85 @@ func Handler(dec RequestDecoder, processor asset.Processor, report publish.Repor return } - data, err := dec(c.Request) - if err != nil { - if strings.Contains(err.Error(), request.MapResultIDToStatus[request.IDResponseErrorsRequestTooLarge].Keyword) { - c.Result.SetWithError(request.IDResponseErrorsRequestTooLarge, err) + if err := auth.Authorize(c.Request.Context(), auth.ActionSourcemapUpload, auth.Resource{}); err != nil { + if errors.Is(err, auth.ErrUnauthorized) { + id := request.IDResponseErrorsForbidden + status := request.MapResultIDToStatus[id] + c.Result.Set(id, status.Code, err.Error(), nil, nil) } else { - c.Result.SetWithError(request.IDResponseErrorsDecode, err) + c.Result.SetDefault(request.IDResponseErrorsServiceUnavailable) + c.Result.Err = err } c.Write() return } - if err = processor.Validate(data); err != nil { - c.Result.SetWithError(request.IDResponseErrorsValidate, err) + var smap sourcemapDoc + decodingCount.Inc() + if err := decode(c.Request, &smap); err != nil { + decodingError.Inc() + if strings.Contains(err.Error(), request.MapResultIDToStatus[request.IDResponseErrorsRequestTooLarge].Keyword) { + c.Result.SetWithError(request.IDResponseErrorsRequestTooLarge, err) + } else { + c.Result.SetWithError(request.IDResponseErrorsDecode, err) + } c.Write() return } - - transformables, err := processor.Decode(data) - if err != nil { - c.Result.SetWithError(request.IDResponseErrorsDecode, err) + validateCount.Inc() + if err := validate(&smap); err != nil { + validateError.Inc() + c.Result.SetWithError(request.IDResponseErrorsValidate, err) c.Write() return } - req := publish.PendingReq{Transformables: transformables} + req := publish.PendingReq{Transformable: &smap} span, ctx := apm.StartSpan(c.Request.Context(), "Send", "Reporter") defer span.End() req.Trace = !span.Dropped() - - if err = report(ctx, req); err != nil { + if err := report(ctx, req); err != nil { if err == publish.ErrChannelClosed { c.Result.SetWithError(request.IDResponseErrorsShuttingDown, err) } else { c.Result.SetWithError(request.IDResponseErrorsFullQueue, err) } c.Write() + return } - + notifier.NotifyAdded(c.Request.Context(), smap.ServiceName, smap.ServiceVersion, smap.BundleFilepath) c.Result.SetDefault(request.IDResponseValidAccepted) c.Write() } } -func DecodeSourcemapFormData(req *http.Request) (map[string]interface{}, error) { - contentType := req.Header.Get("Content-Type") - if !strings.Contains(contentType, "multipart/form-data") { - return nil, fmt.Errorf("invalid content type: %s", req.Header.Get("Content-Type")) +func decode(req *http.Request, smap *sourcemapDoc) error { + if !strings.Contains(req.Header.Get("Content-Type"), "multipart/form-data") { + return fmt.Errorf("invalid content type: %s", req.Header.Get("Content-Type")) } - file, _, err := req.FormFile("sourcemap") if err != nil { - return nil, err + return err } defer file.Close() - - sourcemapBytes, err := ioutil.ReadAll(file) + bytes, err := ioutil.ReadAll(file) if err != nil { - return nil, err - } - payload := map[string]interface{}{ - "sourcemap": string(sourcemapBytes), - "service_name": req.FormValue("service_name"), - "service_version": req.FormValue("service_version"), - "bundle_filepath": utility.CleanUrlPath(req.FormValue("bundle_filepath")), + return err } + smap.Sourcemap = string(bytes) + smap.BundleFilepath = utility.CleanUrlPath(req.FormValue("bundle_filepath")) + smap.ServiceName = req.FormValue("service_name") + smap.ServiceVersion = req.FormValue("service_version") + return nil +} - return payload, nil +func validate(smap *sourcemapDoc) error { + // ensure all information is given + if smap.BundleFilepath == "" || smap.ServiceName == "" || smap.ServiceVersion == "" { + return errors.New("error validating sourcemap: bundle_filepath, service_name and service_version must be sent") + } + if smap.Sourcemap == "" { + return errors.New(`error validating sourcemap: expected sourcemap to be sent as string, but got null`) + } + return nil } diff --git a/beater/api/asset/sourcemap/handler_test.go b/beater/api/asset/sourcemap/handler_test.go index 23425cd6367..d2776982160 100644 --- a/beater/api/asset/sourcemap/handler_test.go +++ b/beater/api/asset/sourcemap/handler_test.go @@ -20,59 +20,52 @@ package sourcemap import ( "bytes" "context" + "errors" "fmt" "io" + "io/ioutil" "mime/multipart" "net/http" "net/http/httptest" + "path/filepath" + "strings" "testing" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/elastic/apm-server/approvaltest" + "github.com/elastic/apm-server/beater/auth" "github.com/elastic/apm-server/beater/beatertest" "github.com/elastic/apm-server/beater/request" - "github.com/elastic/apm-server/processor/asset" "github.com/elastic/apm-server/publish" - "github.com/elastic/apm-server/tests/loader" - "github.com/elastic/apm-server/transform" ) -func TestAssetHandler(t *testing.T) { +type notifier struct { + notified bool +} + +func (n *notifier) NotifyAdded(ctx context.Context, serviceName, serviceVersion, bundleFilepath string) { + n.notified = true +} +func TestAssetHandler(t *testing.T) { testcases := map[string]testcaseT{ "method": { r: httptest.NewRequest(http.MethodGet, "/", nil), code: http.StatusMethodNotAllowed, body: beatertest.ResultErrWrap(request.MapResultIDToStatus[request.IDResponseErrorsMethodNotAllowed].Keyword), }, - "large": { - dec: func(r *http.Request) (map[string]interface{}, error) { - return nil, errors.New("error decoding request body too large") - }, - code: http.StatusRequestEntityTooLarge, - body: beatertest.ResultErrWrap(fmt.Sprintf("%s: error decoding request body too large", - request.MapResultIDToStatus[request.IDResponseErrorsRequestTooLarge].Keyword)), - }, "decode": { - dec: func(r *http.Request) (map[string]interface{}, error) { - return nil, errors.New("foo") - }, - code: http.StatusBadRequest, - body: beatertest.ResultErrWrap(fmt.Sprintf("%s: foo", + contentType: "invalid", + code: http.StatusBadRequest, + body: beatertest.ResultErrWrap(fmt.Sprintf("%s: invalid content type: invalid", request.MapResultIDToStatus[request.IDResponseErrorsDecode].Keyword)), }, "validate": { - dec: func(req *http.Request) (map[string]interface{}, error) { return nil, nil }, - code: http.StatusBadRequest, - body: beatertest.ResultErrWrap(fmt.Sprintf("%s: no input", request.MapResultIDToStatus[request.IDResponseErrorsValidate].Keyword)), - }, - "processorDecode": { - dec: func(*http.Request) (map[string]interface{}, error) { - return map[string]interface{}{"mockProcessor": "xyz"}, nil - }, - code: http.StatusBadRequest, - body: beatertest.ResultErrWrap(fmt.Sprintf("%s: processor decode error", request.MapResultIDToStatus[request.IDResponseErrorsDecode].Keyword)), + missingServiceName: true, + code: http.StatusBadRequest, + body: beatertest.ResultErrWrap(fmt.Sprintf("%s: error validating sourcemap: bundle_filepath, service_name and service_version must be sent", request.MapResultIDToStatus[request.IDResponseErrorsValidate].Keyword)), }, "shuttingDown": { reporter: func(ctx context.Context, p publish.PendingReq) error { @@ -89,107 +82,118 @@ func TestAssetHandler(t *testing.T) { code: http.StatusServiceUnavailable, body: beatertest.ResultErrWrap(fmt.Sprintf("%s: 500", request.MapResultIDToStatus[request.IDResponseErrorsFullQueue].Keyword)), }, - "valid": { + "valid-full-payload": { + sourcemapInput: func() string { + b, err := ioutil.ReadFile("../../../../testdata/sourcemap/bundle.js.map") + require.NoError(t, err) + return string(b) + }(), + reporter: func(ctx context.Context, p publish.PendingReq) error { + events := p.Transformable.Transform(ctx) + docs := beatertest.EncodeEventDocs(events...) + name := filepath.Join("test_approved", "TestProcessSourcemap") + approvaltest.ApproveEventDocs(t, name, docs, "@timestamp") + return nil + }, code: http.StatusAccepted, }, + "unauthorized": { + authorizer: func(context.Context, auth.Action, auth.Resource) error { + return auth.ErrUnauthorized + }, + code: http.StatusForbidden, + body: beatertest.ResultErrWrap("unauthorized"), + }, + "auth_unavailable": { + authorizer: func(context.Context, auth.Action, auth.Resource) error { + return errors.New("boom") + }, + code: http.StatusServiceUnavailable, + body: beatertest.ResultErrWrap("service unavailable"), + }, } - for name, tc := range testcases { t.Run(name, func(t *testing.T) { - tc.setup() - + require.NoError(t, tc.setup()) // test assertion assert.Equal(t, tc.code, tc.w.Code) assert.Equal(t, tc.body, tc.w.Body.String()) - + assert.Equal(t, tc.code == http.StatusAccepted, tc.notifier.notified) }) } } type testcaseT struct { - w *httptest.ResponseRecorder - r *http.Request - dec RequestDecoder - processor asset.Processor - reporter func(ctx context.Context, p publish.PendingReq) error + w *httptest.ResponseRecorder + r *http.Request + sourcemapInput string + contentType string + reporter func(ctx context.Context, p publish.PendingReq) error + authorizer authorizerFunc + notifier notifier + + missingSourcemap, missingServiceName, missingServiceVersion, missingBundleFilepath bool code int body string } -func (tc *testcaseT) setup() { +func (tc *testcaseT) setup() error { if tc.w == nil { tc.w = httptest.NewRecorder() } - if tc.r == nil { - tc.r = httptest.NewRequest(http.MethodPost, "/", nil) - } - if tc.dec == nil { - tc.dec = func(*http.Request) (map[string]interface{}, error) { - return map[string]interface{}{"foo": "bar"}, nil + if tc.authorizer == nil { + tc.authorizer = func(ctx context.Context, action auth.Action, resource auth.Resource) error { + return nil } } - if tc.processor == nil { - tc.processor = &mockProcessor{} + if tc.r == nil { + buf := bytes.Buffer{} + w := multipart.NewWriter(&buf) + if !tc.missingSourcemap { + part, err := w.CreateFormFile("sourcemap", "bundle_no_mapping.js.map") + if err != nil { + return err + } + if tc.sourcemapInput == "" { + tc.sourcemapInput = "sourcemap dummy string" + } + if _, err = io.Copy(part, strings.NewReader(tc.sourcemapInput)); err != nil { + return err + } + } + if !tc.missingBundleFilepath { + w.WriteField("bundle_filepath", "js/./test/../bundle_no_mapping.js.map") + } + if !tc.missingServiceName { + w.WriteField("service_name", "My service") + } + if !tc.missingServiceVersion { + w.WriteField("service_version", "0.1") + } + if err := w.Close(); err != nil { + return err + } + tc.r = httptest.NewRequest(http.MethodPost, "/", &buf) + if tc.contentType == "" { + tc.contentType = w.FormDataContentType() + } + tc.r.Header.Set("Content-Type", tc.contentType) + tc.r = tc.r.WithContext(auth.ContextWithAuthorizer(tc.r.Context(), tc.authorizer)) } + if tc.reporter == nil { - tc.reporter = beatertest.NilReporter + tc.reporter = func(context.Context, publish.PendingReq) error { return nil } } c := request.NewContext() c.Reset(tc.w, tc.r) - h := Handler(tc.dec, tc.processor, tc.reporter) + h := Handler(tc.reporter, &tc.notifier) h(c) -} - -type mockProcessor struct{} - -func (p *mockProcessor) Validate(m map[string]interface{}) error { - if m == nil { - return errors.New("no input") - } return nil } -func (p *mockProcessor) Decode(m map[string]interface{}) ([]transform.Transformable, error) { - if _, ok := m["mockProcessor"]; ok { - return nil, errors.New("processor decode error") - } - return nil, nil -} -func (p *mockProcessor) Name() string { - return "mockProcessor" -} - -func TestDecodeSourcemapFormData(t *testing.T) { - - body := &bytes.Buffer{} - writer := multipart.NewWriter(body) - - fileBytes, err := loader.LoadDataAsBytes("../testdata/sourcemap/bundle.js.map") - assert.NoError(t, err) - part, err := writer.CreateFormFile("sourcemap", "bundle_no_mapping.js.map") - assert.NoError(t, err) - _, err = io.Copy(part, bytes.NewReader(fileBytes)) - assert.NoError(t, err) - - writer.WriteField("bundle_filepath", "js/./test/../bundle_no_mapping.js.map") - writer.WriteField("service_name", "My service") - writer.WriteField("service_version", "0.1") - - err = writer.Close() - assert.NoError(t, err) - - req, err := http.NewRequest("POST", "_", body) - req.Header.Set("Content-Type", writer.FormDataContentType()) - assert.NoError(t, err) - assert.NoError(t, err) - data, err := DecodeSourcemapFormData(req) - assert.NoError(t, err) +type authorizerFunc func(context.Context, auth.Action, auth.Resource) error - assert.Len(t, data, 4) - assert.Equal(t, "js/bundle_no_mapping.js.map", data["bundle_filepath"]) - assert.Equal(t, "My service", data["service_name"]) - assert.Equal(t, "0.1", data["service_version"]) - assert.NotNil(t, data["sourcemap"].(string)) - assert.Equal(t, len(fileBytes), len(data["sourcemap"].(string))) +func (f authorizerFunc) Authorize(ctx context.Context, action auth.Action, resource auth.Resource) error { + return f(ctx, action, resource) } diff --git a/beater/api/asset/sourcemap/sourcemap.go b/beater/api/asset/sourcemap/sourcemap.go new file mode 100644 index 00000000000..bcf0b6fd4bd --- /dev/null +++ b/beater/api/asset/sourcemap/sourcemap.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package sourcemap + +import ( + "context" + "time" + + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/monitoring" + + "github.com/elastic/apm-server/utility" +) + +const ( + sourcemapProcessorName = "sourcemap" + sourcemapDocType = "sourcemap" +) + +var ( + processorRegistry = monitoring.Default.NewRegistry("apm-server.processor.sourcemap") + sourcemapCounter = monitoring.NewInt(processorRegistry, "counter") + sourcemapProcessorEntry = common.MapStr{"name": sourcemapProcessorName, "event": sourcemapDocType} +) + +type sourcemapDoc struct { + ServiceName string + ServiceVersion string + Sourcemap string + BundleFilepath string +} + +func (pa *sourcemapDoc) Transform(ctx context.Context) []beat.Event { + sourcemapCounter.Inc() + if pa == nil { + return nil + } + ev := beat.Event{ + Fields: common.MapStr{ + "processor": sourcemapProcessorEntry, + sourcemapDocType: common.MapStr{ + "bundle_filepath": utility.UrlPath(pa.BundleFilepath), + "service": common.MapStr{"name": pa.ServiceName, "version": pa.ServiceVersion}, + "sourcemap": pa.Sourcemap, + }, + }, + Timestamp: time.Now(), + } + return []beat.Event{ev} +} diff --git a/beater/api/asset/sourcemap/sourcemap_test.go b/beater/api/asset/sourcemap/sourcemap_test.go new file mode 100644 index 00000000000..8d2a2f3f16e --- /dev/null +++ b/beater/api/asset/sourcemap/sourcemap_test.go @@ -0,0 +1,67 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package sourcemap + +import ( + "context" + "io/ioutil" + "testing" + "time" + + s "github.com/go-sourcemap/sourcemap" + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/v7/libbeat/common" +) + +func getStr(data common.MapStr, key string) string { + rs, _ := data.GetValue(key) + return rs.(string) +} + +func TestTransform(t *testing.T) { + p := sourcemapDoc{ + ServiceName: "myService", + ServiceVersion: "1.0", + BundleFilepath: "/my/path", + Sourcemap: "mysmap", + } + + events := p.Transform(context.Background()) + assert.Len(t, events, 1) + event := events[0] + + assert.WithinDuration(t, time.Now(), event.Timestamp, time.Second) + output := event.Fields["sourcemap"].(common.MapStr) + + assert.Equal(t, "/my/path", getStr(output, "bundle_filepath")) + assert.Equal(t, "myService", getStr(output, "service.name")) + assert.Equal(t, "1.0", getStr(output, "service.version")) + assert.Equal(t, "mysmap", getStr(output, "sourcemap")) +} + +func TestParseSourcemaps(t *testing.T) { + fileBytes, err := ioutil.ReadFile("../../../../testdata/sourcemap/bundle.js.map") + assert.NoError(t, err) + parser, err := s.Parse("", fileBytes) + assert.NoError(t, err) + + source, _, _, _, ok := parser.Source(1, 9) + assert.True(t, ok) + assert.Equal(t, "webpack:///bundle.js", source) +} diff --git a/beater/api/asset/sourcemap/test_approved/TestProcessSourcemap.approved.json b/beater/api/asset/sourcemap/test_approved/TestProcessSourcemap.approved.json new file mode 100644 index 00000000000..c86d47dbd70 --- /dev/null +++ b/beater/api/asset/sourcemap/test_approved/TestProcessSourcemap.approved.json @@ -0,0 +1,19 @@ +{ + "events": [ + { + "@timestamp": "dynamic", + "processor": { + "event": "sourcemap", + "name": "sourcemap" + }, + "sourcemap": { + "bundle_filepath": "js/bundle_no_mapping.js.map", + "service": { + "name": "My service", + "version": "0.1" + }, + "sourcemap": "{\n \"version\": 3,\n \"sources\": [\n \"webpack:///bundle.js\",\n \"webpack:///webpack/bootstrap 6002740481c9666b0d38\",\n \"webpack:///./scripts/index.js\",\n \"webpack:///./index.html\",\n \"webpack:///./scripts/app.js\"\n ],\n \"names\": [\n \"modules\",\n \"__webpack_require__\",\n \"moduleId\",\n \"installedModules\",\n \"exports\",\n \"module\",\n \"id\",\n \"loaded\",\n \"call\",\n \"m\",\n \"c\",\n \"p\",\n \"foo\",\n \"console\",\n \"log\",\n \"foobar\"\n ],\n \"mappings\": \"CAAS,SAAUA,GCInB,QAAAC,GAAAC,GAGA,GAAAC,EAAAD,GACA,MAAAC,GAAAD,GAAAE,OAGA,IAAAC,GAAAF,EAAAD,IACAE,WACAE,GAAAJ,EACAK,QAAA,EAUA,OANAP,GAAAE,GAAAM,KAAAH,EAAAD,QAAAC,IAAAD,QAAAH,GAGAI,EAAAE,QAAA,EAGAF,EAAAD,QAvBA,GAAAD,KAqCA,OATAF,GAAAQ,EAAAT,EAGAC,EAAAS,EAAAP,EAGAF,EAAAU,EAAA,GAGAV,EAAA,KDMM,SAASI,EAAQD,EAASH,GE3ChCA,EAAA,GAEAA,EAAA,GAEAW,OFmDM,SAASP,EAAQD,EAASH,GGxDhCI,EAAAD,QAAAH,EAAAU,EAAA,cH8DM,SAASN,EAAQD,GI9DvB,QAAAQ,KACAC,QAAAC,IAAAC,QAGAH\",\n \"file\": \"bundle.js\",\n \"sourcesContent\": [\n \"/******/ (function(modules) { // webpackBootstrap\\n/******/ \\t// The module cache\\n/******/ \\tvar installedModules = {};\\n/******/\\n/******/ \\t// The require function\\n/******/ \\tfunction __webpack_require__(moduleId) {\\n/******/\\n/******/ \\t\\t// Check if module is in cache\\n/******/ \\t\\tif(installedModules[moduleId])\\n/******/ \\t\\t\\treturn installedModules[moduleId].exports;\\n/******/\\n/******/ \\t\\t// Create a new module (and put it into the cache)\\n/******/ \\t\\tvar module = installedModules[moduleId] = {\\n/******/ \\t\\t\\texports: {},\\n/******/ \\t\\t\\tid: moduleId,\\n/******/ \\t\\t\\tloaded: false\\n/******/ \\t\\t};\\n/******/\\n/******/ \\t\\t// Execute the module function\\n/******/ \\t\\tmodules[moduleId].call(module.exports, module, module.exports, __webpack_require__);\\n/******/\\n/******/ \\t\\t// Flag the module as loaded\\n/******/ \\t\\tmodule.loaded = true;\\n/******/\\n/******/ \\t\\t// Return the exports of the module\\n/******/ \\t\\treturn module.exports;\\n/******/ \\t}\\n/******/\\n/******/\\n/******/ \\t// expose the modules object (__webpack_modules__)\\n/******/ \\t__webpack_require__.m = modules;\\n/******/\\n/******/ \\t// expose the module cache\\n/******/ \\t__webpack_require__.c = installedModules;\\n/******/\\n/******/ \\t// __webpack_public_path__\\n/******/ \\t__webpack_require__.p = \\\"\\\";\\n/******/\\n/******/ \\t// Load entry module and return exports\\n/******/ \\treturn __webpack_require__(0);\\n/******/ })\\n/************************************************************************/\\n/******/ ([\\n/* 0 */\\n/***/ function(module, exports, __webpack_require__) {\\n\\n\\t// Webpack\\n\\t__webpack_require__(1)\\n\\t\\n\\t__webpack_require__(2)\\n\\t\\n\\tfoo()\\n\\n\\n/***/ },\\n/* 1 */\\n/***/ function(module, exports, __webpack_require__) {\\n\\n\\tmodule.exports = __webpack_require__.p + \\\"index.html\\\"\\n\\n/***/ },\\n/* 2 */\\n/***/ function(module, exports) {\\n\\n\\tfunction foo() {\\n\\t console.log(foobar)\\n\\t}\\n\\t\\n\\tfoo()\\n\\n\\n/***/ }\\n/******/ ]);\\n\\n\\n/** WEBPACK FOOTER **\\n ** bundle.js\\n **/\",\n \" \\t// The module cache\\n \\tvar installedModules = {};\\n\\n \\t// The require function\\n \\tfunction __webpack_require__(moduleId) {\\n\\n \\t\\t// Check if module is in cache\\n \\t\\tif(installedModules[moduleId])\\n \\t\\t\\treturn installedModules[moduleId].exports;\\n\\n \\t\\t// Create a new module (and put it into the cache)\\n \\t\\tvar module = installedModules[moduleId] = {\\n \\t\\t\\texports: {},\\n \\t\\t\\tid: moduleId,\\n \\t\\t\\tloaded: false\\n \\t\\t};\\n\\n \\t\\t// Execute the module function\\n \\t\\tmodules[moduleId].call(module.exports, module, module.exports, __webpack_require__);\\n\\n \\t\\t// Flag the module as loaded\\n \\t\\tmodule.loaded = true;\\n\\n \\t\\t// Return the exports of the module\\n \\t\\treturn module.exports;\\n \\t}\\n\\n\\n \\t// expose the modules object (__webpack_modules__)\\n \\t__webpack_require__.m = modules;\\n\\n \\t// expose the module cache\\n \\t__webpack_require__.c = installedModules;\\n\\n \\t// __webpack_public_path__\\n \\t__webpack_require__.p = \\\"\\\";\\n\\n \\t// Load entry module and return exports\\n \\treturn __webpack_require__(0);\\n\\n\\n\\n/** WEBPACK FOOTER **\\n ** webpack/bootstrap 6002740481c9666b0d38\\n **/\",\n \"// Webpack\\nrequire('../index.html')\\n\\nrequire('./app')\\n\\nfoo()\\n\\n\\n\\n/*****************\\n ** WEBPACK FOOTER\\n ** ./scripts/index.js\\n ** module id = 0\\n ** module chunks = 0\\n **/\",\n \"module.exports = __webpack_public_path__ + \\\"index.html\\\"\\n\\n\\n/*****************\\n ** WEBPACK FOOTER\\n ** ./index.html\\n ** module id = 1\\n ** module chunks = 0\\n **/\",\n \"function foo() {\\n console.log(foobar)\\n}\\n\\nfoo()\\n\\n\\n\\n/*****************\\n ** WEBPACK FOOTER\\n ** ./scripts/app.js\\n ** module id = 2\\n ** module chunks = 0\\n **/\"\n ],\n \"sourceRoot\": \"\"\n}\n" + } + } + ] +} diff --git a/beater/api/asset/sourcemap/test_approved/integration/TestRootHandler_PanicMiddleware.approved.json b/beater/api/asset/sourcemap/test_approved/integration/TestRootHandler_PanicMiddleware.approved.json deleted file mode 100644 index f4ec3b4f28e..00000000000 --- a/beater/api/asset/sourcemap/test_approved/integration/TestRootHandler_PanicMiddleware.approved.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "error": "panic handling request" -} diff --git a/beater/api/asset/sourcemap/test_approved/integration/TestSourcemapHandler_AuthorizationMiddleware/Forbidden.approved.json b/beater/api/asset/sourcemap/test_approved/integration/TestSourcemapHandler_AuthorizationMiddleware/Forbidden.approved.json new file mode 100644 index 00000000000..c95f7ef93cb --- /dev/null +++ b/beater/api/asset/sourcemap/test_approved/integration/TestSourcemapHandler_AuthorizationMiddleware/Forbidden.approved.json @@ -0,0 +1,3 @@ +{ + "error": "unauthorized: anonymous access not permitted for sourcemap uploads" +} diff --git a/beater/api/asset/sourcemap/test_approved/integration/TestSourcemapHandler_AuthorizationMiddleware/Unauthorized.approved.json b/beater/api/asset/sourcemap/test_approved/integration/TestSourcemapHandler_AuthorizationMiddleware/Unauthorized.approved.json index d7cdee759b9..ca3ae3ce16d 100644 --- a/beater/api/asset/sourcemap/test_approved/integration/TestSourcemapHandler_AuthorizationMiddleware/Unauthorized.approved.json +++ b/beater/api/asset/sourcemap/test_approved/integration/TestSourcemapHandler_AuthorizationMiddleware/Unauthorized.approved.json @@ -1,3 +1,3 @@ { - "error": "unauthorized" + "error": "authentication failed: missing or improperly formatted Authorization header: expected 'Authorization: Bearer secret_token' or 'Authorization: ApiKey base64(API key ID:API key)'" } diff --git a/beater/api/asset/sourcemap/test_approved/integration/TestSourcemapHandler_KillSwitchMiddleware/DataStreams.approved.json b/beater/api/asset/sourcemap/test_approved/integration/TestSourcemapHandler_KillSwitchMiddleware/DataStreams.approved.json new file mode 100644 index 00000000000..bd5de196284 --- /dev/null +++ b/beater/api/asset/sourcemap/test_approved/integration/TestSourcemapHandler_KillSwitchMiddleware/DataStreams.approved.json @@ -0,0 +1,3 @@ +{ + "error": "forbidden request: When APM Server is managed by Fleet, Sourcemaps must be uploaded directly to Elasticsearch." +} diff --git a/beater/api/config/agent/handler.go b/beater/api/config/agent/handler.go index 8d49407aefa..96b453227b0 100644 --- a/beater/api/config/agent/handler.go +++ b/beater/api/config/agent/handler.go @@ -18,6 +18,7 @@ package agent import ( + "encoding/json" "fmt" "net/http" "strings" @@ -30,22 +31,19 @@ import ( "github.com/elastic/beats/v7/libbeat/monitoring" "github.com/elastic/apm-server/agentcfg" + "github.com/elastic/apm-server/beater/auth" "github.com/elastic/apm-server/beater/config" "github.com/elastic/apm-server/beater/headers" "github.com/elastic/apm-server/beater/request" - "github.com/elastic/apm-server/convert" - "github.com/elastic/apm-server/kibana" ) const ( errMaxAgeDuration = 5 * time.Minute - msgInvalidQuery = "invalid query" - msgKibanaDisabled = "disabled Kibana configuration" - msgKibanaVersionNotCompatible = "not a compatible Kibana version" - msgMethodUnsupported = "method not supported" - msgNoKibanaConnection = "unable to retrieve connection to Kibana" - msgServiceUnavailable = "service unavailable" + msgInvalidQuery = "invalid query" + msgMethodUnsupported = "method not supported" + msgNoKibanaConnection = "unable to retrieve connection to Kibana" + msgServiceUnavailable = "service unavailable" ) var ( @@ -53,105 +51,118 @@ var ( MonitoringMap = request.DefaultMonitoringMapForRegistry(registry) registry = monitoring.Default.NewRegistry("apm-server.acm") - errMsgKibanaDisabled = errors.New(msgKibanaDisabled) - errMsgNoKibanaConnection = errors.New(msgNoKibanaConnection) - errCacheControl = fmt.Sprintf("max-age=%v, must-revalidate", errMaxAgeDuration.Seconds()) - - // rumAgents keywords (new and old) - rumAgents = []string{"rum-js", "js-base"} + errCacheControl = fmt.Sprintf("max-age=%v, must-revalidate", errMaxAgeDuration.Seconds()) ) -// Handler returns a request.Handler for managing agent central configuration requests. -func Handler(client kibana.Client, config *config.AgentConfig) request.Handler { - cacheControl := fmt.Sprintf("max-age=%v, must-revalidate", config.Cache.Expiration.Seconds()) - fetcher := agentcfg.NewFetcher(client, config.Cache.Expiration) - - return func(c *request.Context) { - // error handling - c.Header().Set(headers.CacheControl, errCacheControl) +type handler struct { + f agentcfg.Fetcher - ok := c.RateLimiter == nil || c.RateLimiter.Allow() - if !ok { - c.Result.SetDefault(request.IDResponseErrorsRateLimit) - c.Write() - return - } + allowAnonymousAgents []string + cacheControl, defaultServiceEnvironment string +} - if valid := validateClient(c, client, c.Authorization.IsAuthorizationConfigured()); !valid { - c.Write() - return - } +func NewHandler( + f agentcfg.Fetcher, + config config.KibanaAgentConfig, + defaultServiceEnvironment string, + allowAnonymousAgents []string, +) request.Handler { + if f == nil { + panic("fetcher must not be nil") + } + cacheControl := fmt.Sprintf("max-age=%v, must-revalidate", config.Cache.Expiration.Seconds()) + h := &handler{ + f: f, + cacheControl: cacheControl, + defaultServiceEnvironment: defaultServiceEnvironment, + allowAnonymousAgents: allowAnonymousAgents, + } - query, queryErr := buildQuery(c) - if queryErr != nil { - extractQueryError(c, queryErr, c.Authorization.IsAuthorizationConfigured()) - c.Write() - return - } + return h.Handle +} - result, err := fetcher.Fetch(c.Request.Context(), query) - if err != nil { - apm.CaptureError(c.Request.Context(), err).Send() - extractInternalError(c, err, c.Authorization.IsAuthorizationConfigured()) - c.Write() - return - } +// Handler implements request.Handler for managing agent central configuration +// requests. +func (h *handler) Handle(c *request.Context) { + // error handling + c.Header().Set(headers.CacheControl, errCacheControl) - // configuration successfully fetched - c.Header().Set(headers.CacheControl, cacheControl) - c.Header().Set(headers.Etag, fmt.Sprintf("\"%s\"", result.Source.Etag)) - c.Header().Set(headers.AccessControlExposeHeaders, headers.Etag) + query, queryErr := buildQuery(c) + if queryErr != nil { + extractQueryError(c, queryErr) + c.Write() + return + } + if query.Service.Environment == "" { + query.Service.Environment = h.defaultServiceEnvironment + } - if result.Source.Etag == ifNoneMatch(c) { - c.Result.SetDefault(request.IDResponseValidNotModified) + // Only service, and not agent, is known for config queries. + // For anonymous/untrusted agents, we filter the results using + // query.InsecureAgents below. + authResource := auth.Resource{ServiceName: query.Service.Name} + if err := auth.Authorize(c.Request.Context(), auth.ActionAgentConfig, authResource); err != nil { + if errors.Is(err, auth.ErrUnauthorized) { + id := request.IDResponseErrorsForbidden + status := request.MapResultIDToStatus[id] + c.Result.Set(id, status.Code, err.Error(), nil, nil) } else { - c.Result.SetWithBody(request.IDResponseValidOK, result.Source.Settings) + c.Result.SetDefault(request.IDResponseErrorsServiceUnavailable) + c.Result.Err = err } c.Write() + return } -} - -func validateClient(c *request.Context, client kibana.Client, withAuth bool) bool { - if client == nil { - c.Result.Set(request.IDResponseErrorsServiceUnavailable, - http.StatusServiceUnavailable, - msgKibanaDisabled, - msgKibanaDisabled, - errMsgKibanaDisabled) - return false + if c.Authentication.Method == auth.MethodAnonymous { + // Unauthenticated client, restrict results. + query.InsecureAgents = h.allowAnonymousAgents } - if supported, err := client.SupportsVersion(c.Request.Context(), agentcfg.KibanaMinVersion, true); !supported { - if err != nil { - c.Result.Set(request.IDResponseErrorsServiceUnavailable, + result, err := h.f.Fetch(c.Request.Context(), query) + if err != nil { + var verr *agentcfg.ValidationError + if errors.As(err, &verr) { + body := verr.Body() + if strings.HasPrefix(body, agentcfg.ErrMsgKibanaVersionNotCompatible) { + body = authErrMsg(c, body, agentcfg.ErrMsgKibanaVersionNotCompatible) + } + c.Result.Set( + request.IDResponseErrorsServiceUnavailable, http.StatusServiceUnavailable, - msgNoKibanaConnection, - msgNoKibanaConnection, - errMsgNoKibanaConnection) - return false + verr.Keyword(), + body, + verr, + ) + } else { + apm.CaptureError(c.Request.Context(), err).Send() + extractInternalError(c, err) } + c.Write() + return + } + + // configuration successfully fetched + c.Header().Set(headers.CacheControl, h.cacheControl) + c.Header().Set(headers.Etag, fmt.Sprintf("\"%s\"", result.Source.Etag)) + c.Header().Set(headers.AccessControlExposeHeaders, headers.Etag) - version, _ := client.GetVersion(c.Request.Context()) - - errMsg := fmt.Sprintf("%s: min version %+v, configured version %+v", - msgKibanaVersionNotCompatible, agentcfg.KibanaMinVersion, version.String()) - body := authErrMsg(errMsg, msgKibanaVersionNotCompatible, withAuth) - c.Result.Set(request.IDResponseErrorsServiceUnavailable, - http.StatusServiceUnavailable, - msgKibanaVersionNotCompatible, - body, - errors.New(errMsg)) - return false + if result.Source.Etag == ifNoneMatch(c) { + c.Result.SetDefault(request.IDResponseValidNotModified) + } else { + c.Result.SetWithBody(request.IDResponseValidOK, result.Source.Settings) } - return true + c.Write() } -func buildQuery(c *request.Context) (query agentcfg.Query, err error) { +func buildQuery(c *request.Context) (agentcfg.Query, error) { r := c.Request + var query agentcfg.Query switch r.Method { case http.MethodPost: - err = convert.FromReader(r.Body, &query) + if err := json.NewDecoder(r.Body).Decode(&query); err != nil { + return query, err + } case http.MethodGet: params := r.URL.Query() query = agentcfg.Query{ @@ -161,41 +172,40 @@ func buildQuery(c *request.Context) (query agentcfg.Query, err error) { }, } default: - err = errors.Errorf("%s: %s", msgMethodUnsupported, r.Method) - } - - if err == nil && query.Service.Name == "" { - err = errors.New(agentcfg.ServiceName + " is required") + if err := errors.Errorf("%s: %s", msgMethodUnsupported, r.Method); err != nil { + return query, err + } } - if c.IsRum { - query.InsecureAgents = rumAgents + if query.Service.Name == "" { + return query, errors.New(agentcfg.ServiceName + " is required") } + query.Etag = ifNoneMatch(c) - return + return query, nil } -func extractInternalError(c *request.Context, err error, withAuth bool) { +func extractInternalError(c *request.Context, err error) { msg := err.Error() var body interface{} var keyword string switch { case strings.Contains(msg, agentcfg.ErrMsgSendToKibanaFailed): - body = authErrMsg(msg, agentcfg.ErrMsgSendToKibanaFailed, withAuth) + body = authErrMsg(c, msg, agentcfg.ErrMsgSendToKibanaFailed) keyword = agentcfg.ErrMsgSendToKibanaFailed case strings.Contains(msg, agentcfg.ErrMsgReadKibanaResponse): - body = authErrMsg(msg, agentcfg.ErrMsgReadKibanaResponse, withAuth) + body = authErrMsg(c, msg, agentcfg.ErrMsgReadKibanaResponse) keyword = agentcfg.ErrMsgReadKibanaResponse case strings.Contains(msg, agentcfg.ErrUnauthorized): fullMsg := "APM Server is not authorized to query Kibana. " + "Please configure apm-server.kibana.username and apm-server.kibana.password, " + "and ensure the user has the necessary privileges." - body = authErrMsg(fullMsg, agentcfg.ErrUnauthorized, withAuth) + body = authErrMsg(c, fullMsg, agentcfg.ErrUnauthorized) keyword = agentcfg.ErrUnauthorized default: - body = authErrMsg(msg, msgServiceUnavailable, withAuth) + body = authErrMsg(c, msg, msgServiceUnavailable) keyword = msgServiceUnavailable } @@ -206,25 +216,25 @@ func extractInternalError(c *request.Context, err error, withAuth bool) { err) } -func extractQueryError(c *request.Context, err error, withAuth bool) { +func extractQueryError(c *request.Context, err error) { msg := err.Error() if strings.Contains(msg, msgMethodUnsupported) { c.Result.Set(request.IDResponseErrorsMethodNotAllowed, http.StatusMethodNotAllowed, msgMethodUnsupported, - authErrMsg(msg, msgMethodUnsupported, withAuth), + authErrMsg(c, msg, msgMethodUnsupported), err) return } c.Result.Set(request.IDResponseErrorsInvalidQuery, http.StatusBadRequest, msgInvalidQuery, - authErrMsg(msg, msgInvalidQuery, withAuth), + authErrMsg(c, msg, msgInvalidQuery), err) } -func authErrMsg(fullMsg, shortMsg string, withAuth bool) string { - if withAuth { +func authErrMsg(c *request.Context, fullMsg, shortMsg string) string { + if c.Authentication.Method != auth.MethodAnonymous { return fullMsg } return shortMsg diff --git a/beater/api/config/agent/handler_test.go b/beater/api/config/agent/handler_test.go index cbfe03613f1..134b5a421bb 100644 --- a/beater/api/config/agent/handler_test.go +++ b/beater/api/config/agent/handler_test.go @@ -18,9 +18,11 @@ package agent import ( + "bytes" "context" "encoding/json" "fmt" + "io" "io/ioutil" "net/http" "net/http/httptest" @@ -31,19 +33,17 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.elastic.co/apm/apmtest" - "golang.org/x/time/rate" "github.com/elastic/beats/v7/libbeat/common" libkibana "github.com/elastic/beats/v7/libbeat/kibana" "github.com/elastic/apm-server/agentcfg" - "github.com/elastic/apm-server/beater/authorization" + "github.com/elastic/apm-server/beater/auth" "github.com/elastic/apm-server/beater/config" "github.com/elastic/apm-server/beater/headers" "github.com/elastic/apm-server/beater/request" - "github.com/elastic/apm-server/convert" "github.com/elastic/apm-server/kibana" - "github.com/elastic/apm-server/tests" + "github.com/elastic/apm-server/kibana/kibanatest" ) type m map[string]interface{} @@ -60,12 +60,11 @@ var ( queryParams map[string]string method string respStatus int - respBodyToken map[string]string respBody map[string]string respEtagHeader, respCacheControlHeader string }{ "NotModified": { - kbClient: tests.MockKibana(http.StatusOK, m{ + kbClient: kibanatest.MockKibana(http.StatusOK, m{ "_id": "1", "_source": m{ "settings": m{ @@ -83,7 +82,7 @@ var ( }, "ModifiedWithEtag": { - kbClient: tests.MockKibana(http.StatusOK, m{ + kbClient: kibanatest.MockKibana(http.StatusOK, m{ "_id": "1", "_source": m{ "settings": m{ @@ -99,76 +98,70 @@ var ( respEtagHeader: `"` + mockEtag + `"`, respCacheControlHeader: "max-age=4, must-revalidate", respBody: successBody, - respBodyToken: successBody, }, "NoConfigFound": { - kbClient: tests.MockKibana(http.StatusNotFound, m{}, mockVersion, true), + kbClient: kibanatest.MockKibana(http.StatusNotFound, m{}, mockVersion, true), method: http.MethodGet, queryParams: map[string]string{"service.name": "opbeans-python"}, respStatus: http.StatusOK, respCacheControlHeader: "max-age=4, must-revalidate", respEtagHeader: fmt.Sprintf("\"%s\"", agentcfg.EtagSentinel), respBody: emptyBody, - respBodyToken: emptyBody, }, "SendToKibanaFailed": { - kbClient: tests.MockKibana(http.StatusBadGateway, m{}, mockVersion, true), + kbClient: kibanatest.MockKibana(http.StatusBadGateway, m{}, mockVersion, true), method: http.MethodGet, queryParams: map[string]string{"service.name": "opbeans-ruby"}, respStatus: http.StatusServiceUnavailable, respCacheControlHeader: "max-age=300, must-revalidate", - respBody: map[string]string{"error": agentcfg.ErrMsgSendToKibanaFailed}, - respBodyToken: map[string]string{"error": fmt.Sprintf("%s: testerror", agentcfg.ErrMsgSendToKibanaFailed)}, + respBody: map[string]string{"error": fmt.Sprintf("%s: testerror", agentcfg.ErrMsgSendToKibanaFailed)}, }, "NoConnection": { - kbClient: tests.MockKibana(http.StatusServiceUnavailable, m{}, mockVersion, false), + kbClient: kibanatest.MockKibana(http.StatusServiceUnavailable, m{}, mockVersion, false), method: http.MethodGet, + queryParams: map[string]string{"service.name": "opbeans-node"}, respStatus: http.StatusServiceUnavailable, respCacheControlHeader: "max-age=300, must-revalidate", - respBody: map[string]string{"error": msgNoKibanaConnection}, - respBodyToken: map[string]string{"error": msgNoKibanaConnection}, + respBody: map[string]string{"error": agentcfg.ErrMsgNoKibanaConnection}, }, "InvalidVersion": { - kbClient: tests.MockKibana(http.StatusServiceUnavailable, m{}, + kbClient: kibanatest.MockKibana(http.StatusServiceUnavailable, m{}, *common.MustNewVersion("7.2.0"), true), method: http.MethodGet, + queryParams: map[string]string{"service.name": "opbeans-node"}, respStatus: http.StatusServiceUnavailable, respCacheControlHeader: "max-age=300, must-revalidate", - respBody: map[string]string{"error": msgKibanaVersionNotCompatible}, - respBodyToken: map[string]string{"error": fmt.Sprintf("%s: min version 7.5.0, "+ - "configured version 7.2.0", msgKibanaVersionNotCompatible)}, + respBody: map[string]string{"error": fmt.Sprintf("%s: min version 7.5.0, "+ + "configured version 7.2.0", agentcfg.ErrMsgKibanaVersionNotCompatible)}, }, "NoService": { - kbClient: tests.MockKibana(http.StatusOK, m{}, mockVersion, true), + kbClient: kibanatest.MockKibana(http.StatusOK, m{}, mockVersion, true), method: http.MethodGet, respStatus: http.StatusBadRequest, - respBody: map[string]string{"error": msgInvalidQuery}, - respBodyToken: map[string]string{"error": "service.name is required"}, + respBody: map[string]string{"error": "service.name is required"}, respCacheControlHeader: "max-age=300, must-revalidate", }, "MethodNotAllowed": { - kbClient: tests.MockKibana(http.StatusOK, m{}, mockVersion, true), + kbClient: kibanatest.MockKibana(http.StatusOK, m{}, mockVersion, true), method: http.MethodPut, respStatus: http.StatusMethodNotAllowed, respCacheControlHeader: "max-age=300, must-revalidate", - respBody: map[string]string{"error": msgMethodUnsupported}, - respBodyToken: map[string]string{"error": fmt.Sprintf("%s: PUT", msgMethodUnsupported)}, + respBody: map[string]string{"error": fmt.Sprintf("%s: PUT", msgMethodUnsupported)}, }, "Unauthorized": { - kbClient: tests.MockKibana(http.StatusUnauthorized, m{"error": "Unauthorized"}, mockVersion, true), + kbClient: kibanatest.MockKibana(http.StatusUnauthorized, m{"error": "Unauthorized"}, mockVersion, true), method: http.MethodGet, queryParams: map[string]string{"service.name": "opbeans-node"}, respStatus: http.StatusServiceUnavailable, respCacheControlHeader: "max-age=300, must-revalidate", - respBody: map[string]string{"error": agentcfg.ErrUnauthorized}, - respBodyToken: map[string]string{"error": "APM Server is not authorized to query Kibana. " + + respBody: map[string]string{"error": "APM Server is not authorized to query Kibana. " + "Please configure apm-server.kibana.username and apm-server.kibana.password, " + "and ensure the user has the necessary privileges."}, }, @@ -176,57 +169,104 @@ var ( ) func TestAgentConfigHandler(t *testing.T) { - var cfg = config.AgentConfig{Cache: &config.Cache{Expiration: 4 * time.Second}} - - for name, tc := range testcases { - - runTest := func(t *testing.T, expectedBody map[string]string, auth authorization.Authorization) { - h := Handler(tc.kbClient, &cfg) - w := httptest.NewRecorder() - r := httptest.NewRequest(tc.method, target(tc.queryParams), nil) - for k, v := range tc.requestHeader { - r.Header.Set(k, v) - } - ctx := request.NewContext() - ctx.Reset(w, r) - ctx.Authorization = auth - h(ctx) - - require.Equal(t, tc.respStatus, w.Code) - require.Equal(t, tc.respCacheControlHeader, w.Header().Get(headers.CacheControl)) - require.Equal(t, tc.respEtagHeader, w.Header().Get(headers.Etag)) - b, err := ioutil.ReadAll(w.Body) - require.NoError(t, err) - var actualBody map[string]string - json.Unmarshal(b, &actualBody) - assert.Equal(t, expectedBody, actualBody) + var cfg = config.KibanaAgentConfig{Cache: config.Cache{Expiration: 4 * time.Second}} + for _, tc := range testcases { + f := agentcfg.NewKibanaFetcher(tc.kbClient, cfg.Cache.Expiration) + h := NewHandler(f, cfg, "", nil) + r := httptest.NewRequest(tc.method, target(tc.queryParams), nil) + for k, v := range tc.requestHeader { + r.Header.Set(k, v) } + ctx, w := newRequestContext(r) + h(ctx) + + require.Equal(t, tc.respStatus, w.Code) + require.Equal(t, tc.respCacheControlHeader, w.Header().Get(headers.CacheControl)) + require.Equal(t, tc.respEtagHeader, w.Header().Get(headers.Etag)) + b, err := ioutil.ReadAll(w.Body) + require.NoError(t, err) + var actualBody map[string]string + json.Unmarshal(b, &actualBody) + assert.Equal(t, tc.respBody, actualBody) + } +} - t.Run(name+"NoSecretToken", func(t *testing.T) { - runTest(t, tc.respBody, authorization.AllowAuth{}) - }) +func TestAgentConfigHandlerAnonymousAccess(t *testing.T) { + kbClient := kibanatest.MockKibana(http.StatusUnauthorized, m{"error": "Unauthorized"}, mockVersion, true) + cfg := config.KibanaAgentConfig{Cache: config.Cache{Expiration: time.Nanosecond}} + f := agentcfg.NewKibanaFetcher(kbClient, cfg.Cache.Expiration) + h := NewHandler(f, cfg, "", nil) + + for _, tc := range []struct { + anonymous bool + response string + authResource *auth.Resource + }{{ + anonymous: false, + response: `{"error":"APM Server is not authorized to query Kibana. Please configure apm-server.kibana.username and apm-server.kibana.password, and ensure the user has the necessary privileges."}`, + authResource: &auth.Resource{ServiceName: "opbeans"}, + }, { + anonymous: true, + response: `{"error":"Unauthorized"}`, + authResource: &auth.Resource{ServiceName: "opbeans"}, + }} { + r := httptest.NewRequest(http.MethodGet, target(map[string]string{"service.name": "opbeans"}), nil) + c, w := newRequestContext(r) + + c.Authentication.Method = "none" + if tc.anonymous { + c.Authentication.Method = "" + } - t.Run(name+"WithSecretToken", func(t *testing.T) { - runTest(t, tc.respBodyToken, authorization.DenyAuth{}) - }) + var requestedResource *auth.Resource + c.Request = withAuthorizer(c.Request, + authorizerFunc(func(ctx context.Context, action auth.Action, resource auth.Resource) error { + if requestedResource != nil { + panic("expected only one Authorize request") + } + requestedResource = &resource + return nil + }), + ) + h(c) + assert.Equal(t, tc.response+"\n", w.Body.String()) + assert.Equal(t, tc.authResource, requestedResource) } } -func TestAgentConfigHandler_NoKibanaClient(t *testing.T) { - cfg := config.AgentConfig{Cache: &config.Cache{Expiration: time.Nanosecond}} - h := Handler(nil, &cfg) - - w := httptest.NewRecorder() - ctx := request.NewContext() - ctx.Reset(w, httptest.NewRequest(http.MethodGet, "/config", nil)) +func TestAgentConfigHandlerAuthorizedForService(t *testing.T) { + cfg := config.KibanaAgentConfig{Cache: config.Cache{Expiration: time.Nanosecond}} + f := agentcfg.NewKibanaFetcher(nil, cfg.Cache.Expiration) + h := NewHandler(f, cfg, "", nil) + + r := httptest.NewRequest(http.MethodGet, target(map[string]string{"service.name": "opbeans"}), nil) + ctx, w := newRequestContext(r) + + var queriedResource auth.Resource + ctx.Request = withAuthorizer(ctx.Request, + authorizerFunc(func(ctx context.Context, action auth.Action, resource auth.Resource) error { + queriedResource = resource + return auth.ErrUnauthorized + }), + ) h(ctx) + assert.Equal(t, http.StatusForbidden, w.Code, w.Body.String()) + assert.Equal(t, auth.Resource{ServiceName: "opbeans"}, queriedResource) +} + +func TestAgentConfigHandler_NoKibanaClient(t *testing.T) { + cfg := config.KibanaAgentConfig{Cache: config.Cache{Expiration: time.Nanosecond}} + f := agentcfg.NewKibanaFetcher(nil, cfg.Cache.Expiration) + h := NewHandler(f, cfg, "", nil) + + w := sendRequest(h, httptest.NewRequest(http.MethodPost, "/config", jsonReader(m{ + "service": m{"name": "opbeans-node"}}))) assert.Equal(t, http.StatusServiceUnavailable, w.Code, w.Body.String()) } func TestAgentConfigHandler_PostOk(t *testing.T) { - - kb := tests.MockKibana(http.StatusOK, m{ + kb := kibanatest.MockKibana(http.StatusOK, m{ "_id": "1", "_source": m{ "settings": m{ @@ -235,27 +275,47 @@ func TestAgentConfigHandler_PostOk(t *testing.T) { }, }, mockVersion, true) - var cfg = config.AgentConfig{Cache: &config.Cache{Expiration: time.Nanosecond}} - h := Handler(kb, &cfg) - - w := httptest.NewRecorder() - r := httptest.NewRequest(http.MethodPost, "/config", convert.ToReader(m{ - "service": m{"name": "opbeans-node"}})) - ctx := request.NewContext() - ctx.Reset(w, r) - h(ctx) + var cfg = config.KibanaAgentConfig{Cache: config.Cache{Expiration: time.Nanosecond}} + f := agentcfg.NewKibanaFetcher(kb, cfg.Cache.Expiration) + h := NewHandler(f, cfg, "", nil) + w := sendRequest(h, httptest.NewRequest(http.MethodPost, "/config", jsonReader(m{ + "service": m{"name": "opbeans-node"}}))) assert.Equal(t, http.StatusOK, w.Code, w.Body.String()) } +func TestAgentConfigHandler_DefaultServiceEnvironment(t *testing.T) { + kb := &recordingKibanaClient{ + Client: kibanatest.MockKibana(http.StatusOK, m{ + "_id": "1", + "_source": m{ + "settings": m{ + "sampling_rate": 0.5, + }, + }, + }, mockVersion, true), + } + + var cfg = config.KibanaAgentConfig{Cache: config.Cache{Expiration: time.Nanosecond}} + f := agentcfg.NewKibanaFetcher(kb, cfg.Cache.Expiration) + h := NewHandler(f, cfg, "default", nil) + + sendRequest(h, httptest.NewRequest(http.MethodPost, "/config", jsonReader(m{"service": m{"name": "opbeans-node", "environment": "specified"}}))) + sendRequest(h, httptest.NewRequest(http.MethodPost, "/config", jsonReader(m{"service": m{"name": "opbeans-node"}}))) + require.Len(t, kb.requests, 2) + + body0, _ := ioutil.ReadAll(kb.requests[0].Body) + body1, _ := ioutil.ReadAll(kb.requests[1].Body) + assert.Equal(t, `{"service":{"name":"opbeans-node","environment":"specified"},"etag":""}`+"\n", string(body0)) + assert.Equal(t, `{"service":{"name":"opbeans-node","environment":"default"},"etag":""}`+"\n", string(body1)) +} + func TestAgentConfigRum(t *testing.T) { h := getHandler("rum-js") - w := httptest.NewRecorder() - r := httptest.NewRequest(http.MethodPost, "/rum", convert.ToReader(m{ + r := httptest.NewRequest(http.MethodPost, "/rum", jsonReader(m{ "service": m{"name": "opbeans"}})) - ctx := request.NewContext() - ctx.Reset(w, r) - ctx.IsRum = true + ctx, w := newRequestContext(r) + ctx.Authentication.Method = "" // unauthenticated h(ctx) var actual map[string]string json.Unmarshal(w.Body.Bytes(), &actual) @@ -266,22 +326,22 @@ func TestAgentConfigRum(t *testing.T) { func TestAgentConfigRumEtag(t *testing.T) { h := getHandler("rum-js") - w := httptest.NewRecorder() r := httptest.NewRequest(http.MethodGet, "/rum?ifnonematch=123&service.name=opbeans", nil) - ctx := request.NewContext() - ctx.Reset(w, r) - ctx.IsRum = true + ctx, w := newRequestContext(r) h(ctx) assert.Equal(t, http.StatusNotModified, w.Code, w.Body.String()) } func TestAgentConfigNotRum(t *testing.T) { h := getHandler("node-js") - w := httptest.NewRecorder() - r := httptest.NewRequest(http.MethodPost, "/backend", convert.ToReader(m{ + r := httptest.NewRequest(http.MethodPost, "/backend", jsonReader(m{ "service": m{"name": "opbeans"}})) - ctx := request.NewContext() - ctx.Reset(w, r) + ctx, w := newRequestContext(r) + ctx.Request = withAuthorizer(ctx.Request, + authorizerFunc(func(context.Context, auth.Action, auth.Resource) error { + return nil + }), + ) h(ctx) var actual map[string]string json.Unmarshal(w.Body.Bytes(), &actual) @@ -291,12 +351,10 @@ func TestAgentConfigNotRum(t *testing.T) { func TestAgentConfigNoLeak(t *testing.T) { h := getHandler("node-js") - w := httptest.NewRecorder() - r := httptest.NewRequest(http.MethodPost, "/rum", convert.ToReader(m{ + r := httptest.NewRequest(http.MethodPost, "/rum", jsonReader(m{ "service": m{"name": "opbeans"}})) - ctx := request.NewContext() - ctx.Reset(w, r) - ctx.IsRum = true + ctx, w := newRequestContext(r) + ctx.Authentication.Method = "" // unauthenticated h(ctx) var actual map[string]string json.Unmarshal(w.Body.Bytes(), &actual) @@ -304,24 +362,8 @@ func TestAgentConfigNoLeak(t *testing.T) { assert.Equal(t, map[string]string{}, actual) } -func TestAgentConfigRateLimit(t *testing.T) { - h := getHandler("rum-js") - w := httptest.NewRecorder() - r := httptest.NewRequest(http.MethodPost, "/rum", convert.ToReader(m{ - "service": m{"name": "opbeans"}})) - ctx := request.NewContext() - ctx.Reset(w, r) - ctx.IsRum = true - ctx.RateLimiter = rate.NewLimiter(rate.Limit(0), 0) - h(ctx) - var actual map[string]string - json.Unmarshal(w.Body.Bytes(), &actual) - assert.Equal(t, http.StatusTooManyRequests, w.Code, w.Body.String()) - assert.Equal(t, map[string]string{"error": "too many requests"}, actual) -} - func getHandler(agent string) request.Handler { - kb := tests.MockKibana(http.StatusOK, m{ + kb := kibanatest.MockKibana(http.StatusOK, m{ "_id": "1", "_source": m{ "settings": m{ @@ -332,9 +374,9 @@ func getHandler(agent string) request.Handler { "agent_name": agent, }, }, mockVersion, true) - - var cfg = config.AgentConfig{Cache: &config.Cache{Expiration: time.Nanosecond}} - return Handler(kb, &cfg) + cfg := config.KibanaAgentConfig{Cache: config.Cache{Expiration: time.Nanosecond}} + f := agentcfg.NewKibanaFetcher(kb, cfg.Cache.Expiration) + return NewHandler(f, cfg, "", []string{"rum-js"}) } func TestIfNoneMatch(t *testing.T) { @@ -355,25 +397,43 @@ func TestIfNoneMatch(t *testing.T) { } func TestAgentConfigTraceContext(t *testing.T) { - kibanaCfg := libkibana.DefaultClientConfig() + kibanaCfg := config.KibanaConfig{Enabled: true, ClientConfig: libkibana.DefaultClientConfig()} kibanaCfg.Host = "testKibana:12345" client := kibana.NewConnectingClient(&kibanaCfg) - handler := Handler(client, &config.AgentConfig{Cache: &config.Cache{Expiration: 5 * time.Minute}}) + cfg := config.KibanaAgentConfig{Cache: config.Cache{Expiration: 5 * time.Minute}} + f := agentcfg.NewKibanaFetcher(client, cfg.Cache.Expiration) + handler := NewHandler(f, cfg, "default", nil) _, spans, _ := apmtest.WithTransaction(func(ctx context.Context) { // When the handler is called with a context containing // a transaction, the underlying Kibana query should create a span - w := httptest.NewRecorder() - r := httptest.NewRequest(http.MethodPost, "/backend", convert.ToReader(m{ + r := httptest.NewRequest(http.MethodPost, "/backend", jsonReader(m{ "service": m{"name": "opbeans"}})) - r = r.WithContext(ctx) - c := request.NewContext() - c.Reset(w, r) - handler(c) + sendRequest(handler, r.WithContext(ctx)) }) require.Len(t, spans, 1) assert.Equal(t, "app", spans[0].Type) } +func sendRequest(h request.Handler, r *http.Request) *httptest.ResponseRecorder { + ctx, recorder := newRequestContext(r) + ctx.Request = withAuthorizer(ctx.Request, + authorizerFunc(func(context.Context, auth.Action, auth.Resource) error { + return nil + }), + ) + h(ctx) + return recorder +} + +func newRequestContext(r *http.Request) (*request.Context, *httptest.ResponseRecorder) { + w := httptest.NewRecorder() + ctx := request.NewContext() + ctx.Reset(w, r) + ctx.Request = withAnonymousAuthorizer(ctx.Request) + ctx.Authentication.Method = auth.MethodNone + return ctx, w +} + func target(params map[string]string) string { t := "/config" if len(params) == 0 { @@ -385,3 +445,44 @@ func target(params map[string]string) string { } return t } + +type recordingKibanaClient struct { + kibana.Client + requests []*http.Request +} + +func (c *recordingKibanaClient) Send(ctx context.Context, method string, path string, params url.Values, header http.Header, body io.Reader) (*http.Response, error) { + req := httptest.NewRequest(method, path, body) + req.URL.RawQuery = params.Encode() + for k, values := range header { + for _, v := range values { + req.Header.Add(k, v) + } + } + c.requests = append(c.requests, req.WithContext(ctx)) + return c.Client.Send(ctx, method, path, params, header, body) +} + +func withAnonymousAuthorizer(req *http.Request) *http.Request { + return withAuthorizer(req, authorizerFunc(func(context.Context, auth.Action, auth.Resource) error { + return nil + })) +} + +func withAuthorizer(req *http.Request, authz auth.Authorizer) *http.Request { + return req.WithContext(auth.ContextWithAuthorizer(req.Context(), authz)) +} + +type authorizerFunc func(context.Context, auth.Action, auth.Resource) error + +func (f authorizerFunc) Authorize(ctx context.Context, action auth.Action, resource auth.Resource) error { + return f(ctx, action, resource) +} + +func jsonReader(v interface{}) io.Reader { + data, err := json.Marshal(v) + if err != nil { + panic(err) + } + return bytes.NewReader(data) +} diff --git a/beater/api/config/agent/test_approved/integration/TestConfigAgentHandler_AuthorizationMiddleware/Unauthorized.approved.json b/beater/api/config/agent/test_approved/integration/TestConfigAgentHandler_AuthorizationMiddleware/Unauthorized.approved.json index d7cdee759b9..ca3ae3ce16d 100644 --- a/beater/api/config/agent/test_approved/integration/TestConfigAgentHandler_AuthorizationMiddleware/Unauthorized.approved.json +++ b/beater/api/config/agent/test_approved/integration/TestConfigAgentHandler_AuthorizationMiddleware/Unauthorized.approved.json @@ -1,3 +1,3 @@ { - "error": "unauthorized" + "error": "authentication failed: missing or improperly formatted Authorization header: expected 'Authorization: Bearer secret_token' or 'Authorization: ApiKey base64(API key ID:API key)'" } diff --git a/beater/api/config/agent/test_approved/integration/TestConfigAgentHandler_DirectConfiguration.approved.json b/beater/api/config/agent/test_approved/integration/TestConfigAgentHandler_DirectConfiguration.approved.json new file mode 100644 index 00000000000..5eeddc85243 --- /dev/null +++ b/beater/api/config/agent/test_approved/integration/TestConfigAgentHandler_DirectConfiguration.approved.json @@ -0,0 +1,3 @@ +{ + "key1": "val1" +} diff --git a/beater/api/intake/handler.go b/beater/api/intake/handler.go index 054ba85b65a..99dae19332f 100644 --- a/beater/api/intake/handler.go +++ b/beater/api/intake/handler.go @@ -18,16 +18,18 @@ package intake import ( + "context" + "errors" "fmt" "io" "net/http" "strings" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/monitoring" + "github.com/elastic/apm-server/beater/auth" "github.com/elastic/apm-server/beater/headers" + "github.com/elastic/apm-server/beater/ratelimit" "github.com/elastic/apm-server/beater/request" "github.com/elastic/apm-server/decoder" "github.com/elastic/apm-server/model" @@ -35,125 +37,181 @@ import ( "github.com/elastic/apm-server/publish" ) +const ( + batchSize = 10 +) + var ( // MonitoringMap holds a mapping for request.IDs to monitoring counters MonitoringMap = request.DefaultMonitoringMapForRegistry(registry) registry = monitoring.Default.NewRegistry("apm-server.server") + + errMethodNotAllowed = errors.New("only POST requests are supported") + errServerShuttingDown = errors.New("server is shutting down") + errInvalidContentType = errors.New("invalid content type") ) +// StreamHandler is an interface for handling an Elastic APM agent ND-JSON event +// stream, implemented by processor/stream. +type StreamHandler interface { + HandleStream( + ctx context.Context, + base model.APMEvent, + stream io.Reader, + batchSize int, + processor model.BatchProcessor, + out *stream.Result, + ) error +} + +// RequestMetadataFunc is a function type supplied to Handler for extracting +// metadata from the request. This is used for conditionally injecting the +// source IP address as `client.ip` for RUM. +type RequestMetadataFunc func(*request.Context) model.APMEvent + // Handler returns a request.Handler for managing intake requests for backend and rum events. -func Handler(processor *stream.Processor, report publish.Reporter) request.Handler { +func Handler(handler StreamHandler, requestMetadataFunc RequestMetadataFunc, batchProcessor model.BatchProcessor) request.Handler { return func(c *request.Context) { - - serr := validateRequest(c.Request) - if serr != nil { - sendError(c, serr) + if err := validateRequest(c); err != nil { + writeError(c, err) return } - ok := c.RateLimiter == nil || c.RateLimiter.Allow() - if !ok { - sendError(c, &stream.Error{ - Type: stream.RateLimitErrType, Message: "rate limit exceeded"}) + reader, err := decoder.CompressedRequestReader(c.Request) + if err != nil { + writeError(c, compressedRequestReaderError{err}) return } - reader, serr := bodyReader(c.Request) - if serr != nil { - sendError(c, serr) - return + base := requestMetadataFunc(c) + var result stream.Result + if err := handler.HandleStream( + c.Request.Context(), + base, + reader, + batchSize, + batchProcessor, + &result, + ); err != nil { + result.Add(err) } + writeStreamResult(c, &result) + } +} - metadata := model.Metadata{ - UserAgent: model.UserAgent{Original: c.RequestMetadata.UserAgent}, - Client: model.Client{IP: c.RequestMetadata.ClientIP}, - System: model.System{IP: c.RequestMetadata.SystemIP}} - res := processor.HandleStream(c.Request.Context(), c.RateLimiter, &metadata, reader, report) - sendResponse(c, res) +func validateRequest(c *request.Context) error { + if c.Request.Method != http.MethodPost { + return errMethodNotAllowed + } + if contentType := c.Request.Header.Get(headers.ContentType); !strings.Contains(contentType, "application/x-ndjson") { + return fmt.Errorf("%w: '%s'", errInvalidContentType, contentType) } + return nil } -func sendResponse(c *request.Context, sr *stream.Result) { - code := http.StatusAccepted +func writeError(c *request.Context, err error) { + var result stream.Result + result.Add(err) + writeStreamResult(c, &result) +} + +func writeStreamResult(c *request.Context, sr *stream.Result) { + statusCode := http.StatusAccepted id := request.IDResponseValidAccepted - set := func(c int, i request.ResultID) { - if c > code { - code = c - id = i - } + jsonResult := jsonResult{Accepted: sr.Accepted} + var errorMessages []string + + if n := len(sr.Errors); n > 0 { + jsonResult.Errors = make([]jsonError, n) + errorMessages = make([]string, n) } -L: - for _, err := range sr.Errors { - switch err.Type { - case stream.MethodForbiddenErrType: + for i, err := range sr.Errors { + errID := request.IDResponseErrorsInternal + var invalidInput *stream.InvalidInputError + if errors.As(err, &invalidInput) { + if invalidInput.TooLarge { + errID = request.IDResponseErrorsRequestTooLarge + } else { + errID = request.IDResponseErrorsValidate + } + jsonResult.Errors[i] = jsonError{ + Message: invalidInput.Message, + Document: invalidInput.Document, + } + } else { + if errors.As(err, &compressedRequestReaderError{}) { + errID = request.IDResponseErrorsValidate + } else { + switch { + case errors.Is(err, publish.ErrChannelClosed): + errID = request.IDResponseErrorsShuttingDown + err = errServerShuttingDown + case errors.Is(err, publish.ErrFull): + errID = request.IDResponseErrorsFullQueue + case errors.Is(err, errMethodNotAllowed): + errID = request.IDResponseErrorsMethodNotAllowed + case errors.Is(err, errInvalidContentType): + errID = request.IDResponseErrorsValidate + case errors.Is(err, ratelimit.ErrRateLimitExceeded): + errID = request.IDResponseErrorsRateLimit + case errors.Is(err, auth.ErrUnauthorized): + errID = request.IDResponseErrorsForbidden + } + } + jsonResult.Errors[i] = jsonError{Message: err.Error()} + } + errorMessages[i] = jsonResult.Errors[i].Message + + var errStatusCode int + switch errID { + case request.IDResponseErrorsMethodNotAllowed: // TODO: remove exception case and use StatusMethodNotAllowed (breaking bugfix) - set(http.StatusBadRequest, request.IDResponseErrorsMethodNotAllowed) - case stream.InputTooLargeErrType: + errStatusCode = http.StatusBadRequest + case request.IDResponseErrorsRequestTooLarge: // TODO: remove exception case and use StatusRequestEntityTooLarge (breaking bugfix) - set(http.StatusBadRequest, request.IDResponseErrorsRequestTooLarge) - case stream.InvalidInputErrType: - set(request.MapResultIDToStatus[request.IDResponseErrorsValidate].Code, request.IDResponseErrorsValidate) - case stream.RateLimitErrType: - set(request.MapResultIDToStatus[request.IDResponseErrorsRateLimit].Code, request.IDResponseErrorsRateLimit) - case stream.QueueFullErrType: - set(request.MapResultIDToStatus[request.IDResponseErrorsFullQueue].Code, request.IDResponseErrorsFullQueue) - break L - case stream.ShuttingDownErrType: - set(request.MapResultIDToStatus[request.IDResponseErrorsShuttingDown].Code, request.IDResponseErrorsShuttingDown) - break L + errStatusCode = http.StatusBadRequest default: - set(request.MapResultIDToStatus[request.IDResponseErrorsInternal].Code, request.IDResponseErrorsInternal) + errStatusCode = request.MapResultIDToStatus[errID].Code + } + if errStatusCode > statusCode { + statusCode = errStatusCode + id = errID } } + var err error + if len(errorMessages) > 0 { + err = errors.New(strings.Join(errorMessages, ", ")) + } + writeResult(c, id, statusCode, &jsonResult, err) +} + +func writeResult(c *request.Context, id request.ResultID, statusCode int, result *jsonResult, err error) { var body interface{} - if code >= http.StatusBadRequest { + if statusCode >= http.StatusBadRequest { // this signals to the client that we're closing the connection // but also signals to http.Server that it should close it: // https://golang.org/src/net/http/server.go#L1254 c.Header().Add(headers.Connection, "Close") - body = sr + body = result } else if _, ok := c.Request.URL.Query()["verbose"]; ok { - body = sr - } - var err error - if errMsg := sr.Error(); errMsg != "" { - err = errors.New(errMsg) + body = result } - c.Result.Set(id, code, request.MapResultIDToStatus[id].Keyword, body, err) + c.Result.Set(id, statusCode, request.MapResultIDToStatus[id].Keyword, body, err) c.Write() } -func sendError(c *request.Context, err *stream.Error) { - sr := stream.Result{} - sr.Add(err) - sendResponse(c, &sr) -} -func validateRequest(r *http.Request) *stream.Error { - if r.Method != http.MethodPost { - return &stream.Error{ - Type: stream.MethodForbiddenErrType, - Message: "only POST requests are supported", - } - } +type compressedRequestReaderError struct { + error +} - if !strings.Contains(r.Header.Get(headers.ContentType), "application/x-ndjson") { - return &stream.Error{ - Type: stream.InvalidInputErrType, - Message: fmt.Sprintf("invalid content type: '%s'", r.Header.Get(headers.ContentType)), - } - } - return nil +type jsonResult struct { + Accepted int `json:"accepted"` + Errors []jsonError `json:"errors,omitempty"` } -func bodyReader(r *http.Request) (io.ReadCloser, *stream.Error) { - reader, err := decoder.CompressedRequestReader(r) - if err != nil { - return nil, &stream.Error{ - Type: stream.InvalidInputErrType, - Message: err.Error(), - } - } - return reader, nil +type jsonError struct { + Message string `json:"message"` + Document string `json:"document,omitempty"` } diff --git a/beater/api/intake/handler_test.go b/beater/api/intake/handler_test.go index 875d5d658aa..d17a63e1ea0 100644 --- a/beater/api/intake/handler_test.go +++ b/beater/api/intake/handler_test.go @@ -22,29 +22,26 @@ import ( "compress/gzip" "compress/zlib" "context" + "io/ioutil" "net/http" "net/http/httptest" "path/filepath" "testing" - "github.com/elastic/apm-server/approvaltest" - "github.com/elastic/apm-server/beater/api/ratelimit" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/elastic/apm-server/beater/beatertest" + "github.com/elastic/apm-server/approvaltest" "github.com/elastic/apm-server/beater/config" "github.com/elastic/apm-server/beater/headers" "github.com/elastic/apm-server/beater/request" + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modelprocessor" "github.com/elastic/apm-server/processor/stream" "github.com/elastic/apm-server/publish" - "github.com/elastic/apm-server/tests/loader" ) func TestIntakeHandler(t *testing.T) { - var rateLimit, err = ratelimit.NewStore(1, 0, 0) - require.NoError(t, err) for name, tc := range map[string]testcaseIntakeHandler{ "Method": { path: "errors.ndjson", @@ -60,11 +57,6 @@ func TestIntakeHandler(t *testing.T) { }(), code: http.StatusBadRequest, id: request.IDResponseErrorsValidate, }, - "RateLimit": { - path: "errors.ndjson", - rateLimit: rateLimit, - code: http.StatusTooManyRequests, id: request.IDResponseErrorsRateLimit, - }, "BodyReader": { path: "errors.ndjson", r: func() *http.Request { @@ -95,13 +87,24 @@ func TestIntakeHandler(t *testing.T) { code: http.StatusAccepted, id: request.IDResponseValidAccepted, }, "TooLarge": { - path: "errors.ndjson", processor: &stream.Processor{}, + path: "errors.ndjson", + processor: func() *stream.Processor { + p := stream.BackendProcessor(config.DefaultConfig()) + p.MaxEventSize = 10 + return p + }(), code: http.StatusBadRequest, id: request.IDResponseErrorsRequestTooLarge}, "Closing": { - path: "errors.ndjson", reporter: beatertest.ErrorReporterFn(publish.ErrChannelClosed), + path: "errors.ndjson", + batchProcessor: model.ProcessBatchFunc(func(context.Context, *model.Batch) error { + return publish.ErrChannelClosed + }), code: http.StatusServiceUnavailable, id: request.IDResponseErrorsShuttingDown}, "FullQueue": { - path: "errors.ndjson", reporter: beatertest.ErrorReporterFn(publish.ErrFull), + path: "errors.ndjson", + batchProcessor: model.ProcessBatchFunc(func(context.Context, *model.Batch) error { + return publish.ErrFull + }), code: http.StatusServiceUnavailable, id: request.IDResponseErrorsFullQueue}, "InvalidEvent": { path: "invalid-event.ndjson", @@ -119,7 +122,7 @@ func TestIntakeHandler(t *testing.T) { path: "invalid-metadata-2.ndjson", code: http.StatusBadRequest, id: request.IDResponseErrorsValidate}, "UnrecognizedEvent": { - path: "unrecognized-event.ndjson", + path: "invalid-event-type.ndjson", code: http.StatusBadRequest, id: request.IDResponseErrorsValidate}, "Success": { path: "errors.ndjson", @@ -130,11 +133,8 @@ func TestIntakeHandler(t *testing.T) { // setup tc.setup(t) - if tc.rateLimit != nil { - tc.c.RateLimiter = tc.rateLimit.ForIP(&http.Request{}) - } // call handler - h := Handler(tc.processor, tc.reporter) + h := Handler(tc.processor, emptyRequestMetadata, tc.batchProcessor) h(tc.c) require.Equal(t, string(tc.id), string(tc.c.Result.ID)) @@ -154,13 +154,12 @@ func TestIntakeHandler(t *testing.T) { } type testcaseIntakeHandler struct { - c *request.Context - w *httptest.ResponseRecorder - r *http.Request - processor *stream.Processor - rateLimit *ratelimit.Store - reporter func(ctx context.Context, p publish.PendingReq) error - path string + c *request.Context + w *httptest.ResponseRecorder + r *http.Request + processor *stream.Processor + batchProcessor model.BatchProcessor + path string code int id request.ResultID @@ -171,12 +170,12 @@ func (tc *testcaseIntakeHandler) setup(t *testing.T) { cfg := config.DefaultConfig() tc.processor = stream.BackendProcessor(cfg) } - if tc.reporter == nil { - tc.reporter = beatertest.NilReporter + if tc.batchProcessor == nil { + tc.batchProcessor = modelprocessor.Nop{} } if tc.r == nil { - data, err := loader.LoadDataAsBytes(filepath.Join("../testdata/intake-v2/", tc.path)) + data, err := ioutil.ReadFile(filepath.Join("../../../testdata/intake-v2", tc.path)) require.NoError(t, err) tc.r = httptest.NewRequest("POST", "/", bytes.NewBuffer(data)) @@ -193,7 +192,7 @@ func (tc *testcaseIntakeHandler) setup(t *testing.T) { } func compressedRequest(t *testing.T, compressionType string, compressPayload bool) *http.Request { - data, err := loader.LoadDataAsBytes("../testdata/intake-v2/errors.ndjson") + data, err := ioutil.ReadFile("../../../testdata/intake-v2/errors.ndjson") require.NoError(t, err) var buf bytes.Buffer if compressPayload { @@ -216,3 +215,7 @@ func compressedRequest(t *testing.T, compressionType string, compressPayload boo req.Header.Set(headers.ContentEncoding, compressionType) return req } + +func emptyRequestMetadata(*request.Context) model.APMEvent { + return model.APMEvent{} +} diff --git a/beater/api/intake/test_approved/BodyReader.approved.json b/beater/api/intake/test_approved/BodyReader.approved.json index fc636bfdcb3..116bbf02348 100644 --- a/beater/api/intake/test_approved/BodyReader.approved.json +++ b/beater/api/intake/test_approved/BodyReader.approved.json @@ -2,7 +2,7 @@ "accepted": 0, "errors": [ { - "message": "EOF while reading metadata" + "message": "validation error: 'metadata' required" } ] } diff --git a/beater/api/intake/test_approved/Decoder.approved.json b/beater/api/intake/test_approved/Decoder.approved.json deleted file mode 100644 index cadc282feca..00000000000 --- a/beater/api/intake/test_approved/Decoder.approved.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "accepted": 0, - "errors": [ - { - "message": "cannot decode `xyz`" - } - ] -} diff --git a/beater/api/intake/test_approved/InvalidEvent.approved.json b/beater/api/intake/test_approved/InvalidEvent.approved.json index 703797831cb..f0a468a25ff 100644 --- a/beater/api/intake/test_approved/InvalidEvent.approved.json +++ b/beater/api/intake/test_approved/InvalidEvent.approved.json @@ -3,7 +3,7 @@ "errors": [ { "document": "{ \"transaction\": { \"id\": 12345, \"trace_id\": \"0123456789abcdef0123456789abcdef\", \"parent_id\": \"abcdefabcdef01234567\", \"type\": \"request\", \"duration\": 32.592981, \"span_count\": { \"started\": 21 } } } ", - "message": "failed to validate transaction: error validating JSON: I[#] S[#] doesn't validate with \"transaction#\"\n I[#] S[#/allOf/3] allOf failed\n I[#/id] S[#/allOf/3/properties/id/type] expected string, but got number" + "message": "decode error: data read error: v2.transactionRoot.Transaction: v2.transaction.ID: ReadString: expects \" or n," } ] } diff --git a/beater/api/intake/test_approved/InvalidJSONEvent.approved.json b/beater/api/intake/test_approved/InvalidJSONEvent.approved.json index ac1e798d6f8..ae54db10f8d 100644 --- a/beater/api/intake/test_approved/InvalidJSONEvent.approved.json +++ b/beater/api/intake/test_approved/InvalidJSONEvent.approved.json @@ -3,7 +3,7 @@ "errors": [ { "document": "{ \"invalid-json\" }", - "message": "data read error: invalid character '}' after object key" + "message": "invalid-json: did not recognize object type" } ] } diff --git a/beater/api/intake/test_approved/InvalidJSONMetadata.approved.json b/beater/api/intake/test_approved/InvalidJSONMetadata.approved.json index b1f27fbb794..13a42198474 100644 --- a/beater/api/intake/test_approved/InvalidJSONMetadata.approved.json +++ b/beater/api/intake/test_approved/InvalidJSONMetadata.approved.json @@ -3,7 +3,7 @@ "errors": [ { "document": "{\"metadata\": {\"invalid-json\"}}", - "message": "data read error: invalid character '}' after object key" + "message": "decode error: data read error: v2.metadataRoot.Metadata: v2.metadata.readFieldHash: expect :," } ] } diff --git a/beater/api/intake/test_approved/InvalidMetadata.approved.json b/beater/api/intake/test_approved/InvalidMetadata.approved.json index da67acd9c67..b36495f24f1 100644 --- a/beater/api/intake/test_approved/InvalidMetadata.approved.json +++ b/beater/api/intake/test_approved/InvalidMetadata.approved.json @@ -3,7 +3,7 @@ "errors": [ { "document": "{\"metadata\": {\"user\": null}}", - "message": "failed to validate metadata: error validating JSON: I[#] S[#] doesn't validate with \"metadata#\"\n I[#] S[#/required] missing properties: \"service\"" + "message": "validation error: 'metadata' required" } ] } diff --git a/beater/api/intake/test_approved/InvalidMetadata2.approved.json b/beater/api/intake/test_approved/InvalidMetadata2.approved.json index 26640294ec7..3b487d4e70f 100644 --- a/beater/api/intake/test_approved/InvalidMetadata2.approved.json +++ b/beater/api/intake/test_approved/InvalidMetadata2.approved.json @@ -3,7 +3,7 @@ "errors": [ { "document": "{\"not\": \"metadata\"}", - "message": "did not recognize object type" + "message": "validation error: 'metadata' required" } ] } diff --git a/beater/api/intake/test_approved/RateLimit.approved.json b/beater/api/intake/test_approved/RateLimit.approved.json deleted file mode 100644 index 507b15773aa..00000000000 --- a/beater/api/intake/test_approved/RateLimit.approved.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "accepted": 0, - "errors": [ - { - "message": "rate limit exceeded" - } - ] -} diff --git a/beater/api/intake/test_approved/RumRateLimit.approved.json b/beater/api/intake/test_approved/RumRateLimit.approved.json deleted file mode 100644 index 748c625209c..00000000000 --- a/beater/api/intake/test_approved/RumRateLimit.approved.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "accepted": 30, - "errors": [ - { - "message": "rate limit exceeded" - } - ] -} diff --git a/beater/api/intake/test_approved/TooLarge.approved.json b/beater/api/intake/test_approved/TooLarge.approved.json index bb3e5059a90..c18fbfeef24 100644 --- a/beater/api/intake/test_approved/TooLarge.approved.json +++ b/beater/api/intake/test_approved/TooLarge.approved.json @@ -2,6 +2,7 @@ "accepted": 0, "errors": [ { + "document": "{\"metadata", "message": "event exceeded the permitted size." } ] diff --git a/beater/api/intake/test_approved/UnrecognizedEvent.approved.json b/beater/api/intake/test_approved/UnrecognizedEvent.approved.json index 0dfff3f52d8..a3231874cb8 100644 --- a/beater/api/intake/test_approved/UnrecognizedEvent.approved.json +++ b/beater/api/intake/test_approved/UnrecognizedEvent.approved.json @@ -3,7 +3,7 @@ "errors": [ { "document": "{\"tennis-court\": {\"name\": \"Centre Court, Wimbledon\"}}", - "message": "did not recognize object type" + "message": "tennis-court: did not recognize object type" } ] } diff --git a/beater/api/intake/test_approved/integration/backend/TestIntakeBackendHandler_AuthorizationMiddleware/Unauthorized.approved.json b/beater/api/intake/test_approved/integration/backend/TestIntakeBackendHandler_AuthorizationMiddleware/Unauthorized.approved.json index d7cdee759b9..ca3ae3ce16d 100644 --- a/beater/api/intake/test_approved/integration/backend/TestIntakeBackendHandler_AuthorizationMiddleware/Unauthorized.approved.json +++ b/beater/api/intake/test_approved/integration/backend/TestIntakeBackendHandler_AuthorizationMiddleware/Unauthorized.approved.json @@ -1,3 +1,3 @@ { - "error": "unauthorized" + "error": "authentication failed: missing or improperly formatted Authorization header: expected 'Authorization: Bearer secret_token' or 'Authorization: ApiKey base64(API key ID:API key)'" } diff --git a/beater/api/mux.go b/beater/api/mux.go index b6d50b4d968..98356f2892b 100644 --- a/beater/api/mux.go +++ b/beater/api/mux.go @@ -18,26 +18,34 @@ package api import ( + "net" "net/http" + "net/http/pprof" + "regexp" - "github.com/elastic/beats/v7/libbeat/monitoring" + "github.com/pkg/errors" + "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/monitoring" - "github.com/elastic/apm-server/beater/api/asset/sourcemap" + "github.com/elastic/apm-server/agentcfg" + apisourcemap "github.com/elastic/apm-server/beater/api/asset/sourcemap" "github.com/elastic/apm-server/beater/api/config/agent" "github.com/elastic/apm-server/beater/api/intake" "github.com/elastic/apm-server/beater/api/profile" "github.com/elastic/apm-server/beater/api/root" - "github.com/elastic/apm-server/beater/authorization" + "github.com/elastic/apm-server/beater/auth" "github.com/elastic/apm-server/beater/config" "github.com/elastic/apm-server/beater/middleware" + "github.com/elastic/apm-server/beater/ratelimit" "github.com/elastic/apm-server/beater/request" - "github.com/elastic/apm-server/kibana" logs "github.com/elastic/apm-server/log" - psourcemap "github.com/elastic/apm-server/processor/asset/sourcemap" + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modelprocessor" "github.com/elastic/apm-server/processor/stream" "github.com/elastic/apm-server/publish" + "github.com/elastic/apm-server/sourcemap" ) const ( @@ -65,155 +73,264 @@ const ( IntakeRUMV3Path = "/intake/v3/rum/events" ) -type route struct { - path string - handlerFn func(*config.Config, *authorization.Builder, publish.Reporter) (request.Handler, error) -} - // NewMux registers apm handlers to paths building up the APM Server API. -func NewMux(beaterConfig *config.Config, report publish.Reporter) (*http.ServeMux, error) { +func NewMux( + beatInfo beat.Info, + beaterConfig *config.Config, + report publish.Reporter, + batchProcessor model.BatchProcessor, + authenticator *auth.Authenticator, + fetcher agentcfg.Fetcher, + ratelimitStore *ratelimit.Store, + sourcemapStore *sourcemap.Store, + fleetManaged bool, + publishReady func() bool, +) (*http.ServeMux, error) { pool := request.NewContextPool() mux := http.NewServeMux() logger := logp.NewLogger(logs.Handler) - auth, err := authorization.NewBuilder(beaterConfig) - if err != nil { - return nil, err + builder := routeBuilder{ + info: beatInfo, + cfg: beaterConfig, + authenticator: authenticator, + reporter: report, + batchProcessor: batchProcessor, + ratelimitStore: ratelimitStore, + sourcemapStore: sourcemapStore, + fleetManaged: fleetManaged, } + type route struct { + path string + handlerFn func() (request.Handler, error) + } routeMap := []route{ - {RootPath, rootHandler}, - {AssetSourcemapPath, sourcemapHandler}, - {AgentConfigPath, backendAgentConfigHandler}, - {AgentConfigRUMPath, rumAgentConfigHandler}, - {IntakeRUMPath, rumIntakeHandler}, - {IntakeRUMV3Path, rumV3IntakeHandler}, - {IntakePath, backendIntakeHandler}, + {RootPath, builder.rootHandler(publishReady)}, + {AssetSourcemapPath, builder.sourcemapHandler}, + {AgentConfigPath, builder.backendAgentConfigHandler(fetcher)}, + {AgentConfigRUMPath, builder.rumAgentConfigHandler(fetcher)}, + {IntakeRUMPath, builder.rumIntakeHandler(stream.RUMV2Processor)}, + {IntakeRUMV3Path, builder.rumIntakeHandler(stream.RUMV3Processor)}, + {IntakePath, builder.backendIntakeHandler}, // The profile endpoint is in Beta - {ProfilePath, profileHandler}, + {ProfilePath, builder.profileHandler}, } for _, route := range routeMap { - h, err := route.handlerFn(beaterConfig, auth, report) + h, err := route.handlerFn() if err != nil { return nil, err } logger.Infof("Path %s added to request handler", route.path) mux.Handle(route.path, pool.HTTPHandler(h)) - } - if beaterConfig.Expvar.IsEnabled() { + if beaterConfig.Expvar.Enabled { path := beaterConfig.Expvar.URL logger.Infof("Path %s added to request handler", path) mux.Handle(path, http.HandlerFunc(debugVarsHandler)) } + if beaterConfig.Pprof.Enabled { + const path = "/debug/pprof" + logger.Infof("Path %s added to request handler", path) + mux.Handle(path+"/", http.HandlerFunc(pprof.Index)) + mux.Handle(path+"/cmdline", http.HandlerFunc(pprof.Cmdline)) + mux.Handle(path+"/profile", http.HandlerFunc(pprof.Profile)) + mux.Handle(path+"/symbol", http.HandlerFunc(pprof.Symbol)) + mux.Handle(path+"/trace", http.HandlerFunc(pprof.Trace)) + } return mux, nil } -func profileHandler(cfg *config.Config, builder *authorization.Builder, reporter publish.Reporter) (request.Handler, error) { - h := profile.Handler(reporter) - authHandler := builder.ForPrivilege(authorization.PrivilegeEventWrite.Action) - return middleware.Wrap(h, backendMiddleware(cfg, authHandler, profile.MonitoringMap)...) +type routeBuilder struct { + info beat.Info + cfg *config.Config + authenticator *auth.Authenticator + reporter publish.Reporter + batchProcessor model.BatchProcessor + ratelimitStore *ratelimit.Store + sourcemapStore *sourcemap.Store + fleetManaged bool } -func backendIntakeHandler(cfg *config.Config, builder *authorization.Builder, reporter publish.Reporter) (request.Handler, error) { - h := intake.Handler(stream.BackendProcessor(cfg), reporter) - authHandler := builder.ForPrivilege(authorization.PrivilegeEventWrite.Action) - return middleware.Wrap(h, backendMiddleware(cfg, authHandler, intake.MonitoringMap)...) +func (r *routeBuilder) profileHandler() (request.Handler, error) { + requestMetadataFunc := emptyRequestMetadata + if r.cfg.AugmentEnabled { + requestMetadataFunc = backendRequestMetadata + } + h := profile.Handler(requestMetadataFunc, r.batchProcessor) + return middleware.Wrap(h, backendMiddleware(r.cfg, r.authenticator, r.ratelimitStore, profile.MonitoringMap)...) } -func rumIntakeHandler(cfg *config.Config, _ *authorization.Builder, reporter publish.Reporter) (request.Handler, error) { - h := intake.Handler(stream.RUMV2Processor(cfg), reporter) - return middleware.Wrap(h, rumMiddleware(cfg, nil, intake.MonitoringMap)...) +func (r *routeBuilder) backendIntakeHandler() (request.Handler, error) { + requestMetadataFunc := emptyRequestMetadata + if r.cfg.AugmentEnabled { + requestMetadataFunc = backendRequestMetadata + } + h := intake.Handler(stream.BackendProcessor(r.cfg), requestMetadataFunc, r.batchProcessor) + return middleware.Wrap(h, backendMiddleware(r.cfg, r.authenticator, r.ratelimitStore, intake.MonitoringMap)...) } -func rumV3IntakeHandler(cfg *config.Config, _ *authorization.Builder, reporter publish.Reporter) (request.Handler, error) { - h := intake.Handler(stream.RUMV3Processor(cfg), reporter) - return middleware.Wrap(h, rumMiddleware(cfg, nil, intake.MonitoringMap)...) +func (r *routeBuilder) rumIntakeHandler(newProcessor func(*config.Config) *stream.Processor) func() (request.Handler, error) { + requestMetadataFunc := emptyRequestMetadata + if r.cfg.AugmentEnabled { + requestMetadataFunc = rumRequestMetadata + } + return func() (request.Handler, error) { + var batchProcessors modelprocessor.Chained + // The order of these processors is important. Source mapping must happen before identifying library frames, or + // frames to exclude from error grouping; identifying library frames must happen before updating the error culprit. + if r.sourcemapStore != nil { + batchProcessors = append(batchProcessors, sourcemap.BatchProcessor{ + Store: r.sourcemapStore, + Timeout: r.cfg.RumConfig.SourceMapping.Timeout, + }) + } + if r.cfg.RumConfig.LibraryPattern != "" { + re, err := regexp.Compile(r.cfg.RumConfig.LibraryPattern) + if err != nil { + return nil, errors.Wrap(err, "invalid library pattern regex") + } + batchProcessors = append(batchProcessors, modelprocessor.SetLibraryFrame{Pattern: re}) + } + if r.cfg.RumConfig.ExcludeFromGrouping != "" { + re, err := regexp.Compile(r.cfg.RumConfig.ExcludeFromGrouping) + if err != nil { + return nil, errors.Wrap(err, "invalid exclude from grouping regex") + } + batchProcessors = append(batchProcessors, modelprocessor.SetExcludeFromGrouping{Pattern: re}) + } + if r.sourcemapStore != nil { + batchProcessors = append(batchProcessors, modelprocessor.SetCulprit{}) + } + batchProcessors = append(batchProcessors, r.batchProcessor) // r.batchProcessor always goes last + h := intake.Handler(newProcessor(r.cfg), requestMetadataFunc, batchProcessors) + return middleware.Wrap(h, rumMiddleware(r.cfg, r.authenticator, r.ratelimitStore, intake.MonitoringMap)...) + } } -func sourcemapHandler(cfg *config.Config, builder *authorization.Builder, reporter publish.Reporter) (request.Handler, error) { - h := sourcemap.Handler(sourcemap.DecodeSourcemapFormData, psourcemap.Processor, reporter) - authHandler := builder.ForPrivilege(authorization.PrivilegeSourcemapWrite.Action) - return middleware.Wrap(h, sourcemapMiddleware(cfg, authHandler)...) +func (r *routeBuilder) sourcemapHandler() (request.Handler, error) { + h := apisourcemap.Handler(r.reporter, r.sourcemapStore) + return middleware.Wrap(h, sourcemapMiddleware(r.cfg, r.authenticator, r.ratelimitStore)...) } -func backendAgentConfigHandler(cfg *config.Config, builder *authorization.Builder, _ publish.Reporter) (request.Handler, error) { - authHandler := builder.ForPrivilege(authorization.PrivilegeAgentConfigRead.Action) - return agentConfigHandler(cfg, authHandler, backendMiddleware) +func (r *routeBuilder) rootHandler(publishReady func() bool) func() (request.Handler, error) { + return func() (request.Handler, error) { + h := root.Handler(root.HandlerConfig{ + Version: r.info.Version, + PublishReady: publishReady, + }) + return middleware.Wrap(h, rootMiddleware(r.cfg, r.authenticator)...) + } } -func rumAgentConfigHandler(cfg *config.Config, _ *authorization.Builder, _ publish.Reporter) (request.Handler, error) { - return agentConfigHandler(cfg, nil, rumMiddleware) +func (r *routeBuilder) backendAgentConfigHandler(f agentcfg.Fetcher) func() (request.Handler, error) { + return func() (request.Handler, error) { + return agentConfigHandler(r.cfg, r.authenticator, r.ratelimitStore, backendMiddleware, f, r.fleetManaged) + } } -type middlewareFunc func(*config.Config, *authorization.Handler, map[request.ResultID]*monitoring.Int) []middleware.Middleware - -func agentConfigHandler(cfg *config.Config, authHandler *authorization.Handler, middlewareFunc middlewareFunc) (request.Handler, error) { - var client kibana.Client - if cfg.Kibana.Enabled { - client = kibana.NewConnectingClient(&cfg.Kibana.ClientConfig) +func (r *routeBuilder) rumAgentConfigHandler(f agentcfg.Fetcher) func() (request.Handler, error) { + return func() (request.Handler, error) { + return agentConfigHandler(r.cfg, r.authenticator, r.ratelimitStore, rumMiddleware, f, r.fleetManaged) } - h := agent.Handler(client, cfg.AgentConfig) - msg := "Agent remote configuration is disabled. " + - "Configure the `apm-server.kibana` section in apm-server.yml to enable it. " + - "If you are using a RUM agent, you also need to configure the `apm-server.rum` section. " + - "If you are not using remote configuration, you can safely ignore this error." - ks := middleware.KillSwitchMiddleware(cfg.Kibana.Enabled, msg) - return middleware.Wrap(h, append(middlewareFunc(cfg, authHandler, agent.MonitoringMap), ks)...) } -func rootHandler(cfg *config.Config, builder *authorization.Builder, _ publish.Reporter) (request.Handler, error) { - return middleware.Wrap(root.Handler(), - rootMiddleware(cfg, builder.ForAnyOfPrivileges(authorization.ActionAny))...) +type middlewareFunc func(*config.Config, *auth.Authenticator, *ratelimit.Store, map[request.ResultID]*monitoring.Int) []middleware.Middleware + +func agentConfigHandler( + cfg *config.Config, + authenticator *auth.Authenticator, + ratelimitStore *ratelimit.Store, + middlewareFunc middlewareFunc, + f agentcfg.Fetcher, + fleetManaged bool, +) (request.Handler, error) { + mw := middlewareFunc(cfg, authenticator, ratelimitStore, agent.MonitoringMap) + h := agent.NewHandler(f, cfg.KibanaAgentConfig, cfg.DefaultServiceEnvironment, cfg.AgentAuth.Anonymous.AllowAgent) + + if !cfg.Kibana.Enabled && !fleetManaged { + msg := "Agent remote configuration is disabled. " + + "Configure the `apm-server.kibana` section in apm-server.yml to enable it. " + + "If you are using a RUM agent, you also need to configure the `apm-server.rum` section. " + + "If you are not using remote configuration, you can safely ignore this error." + mw = append(mw, middleware.KillSwitchMiddleware(cfg.Kibana.Enabled, msg)) + } + + return middleware.Wrap(h, mw...) } func apmMiddleware(m map[request.ResultID]*monitoring.Int) []middleware.Middleware { return []middleware.Middleware{ middleware.LogMiddleware(), + middleware.TimeoutMiddleware(), middleware.RecoverPanicMiddleware(), middleware.MonitoringMiddleware(m), middleware.RequestTimeMiddleware(), } } -func backendMiddleware(cfg *config.Config, auth *authorization.Handler, m map[request.ResultID]*monitoring.Int) []middleware.Middleware { +func backendMiddleware(cfg *config.Config, authenticator *auth.Authenticator, ratelimitStore *ratelimit.Store, m map[request.ResultID]*monitoring.Int) []middleware.Middleware { backendMiddleware := append(apmMiddleware(m), - middleware.AuthorizationMiddleware(auth, true), + middleware.ResponseHeadersMiddleware(cfg.ResponseHeaders), + middleware.AuthMiddleware(authenticator, true), + middleware.AnonymousRateLimitMiddleware(ratelimitStore), ) - if cfg.AugmentEnabled { - backendMiddleware = append(backendMiddleware, middleware.SystemMetadataMiddleware()) - } return backendMiddleware } -func rumMiddleware(cfg *config.Config, _ *authorization.Handler, m map[request.ResultID]*monitoring.Int) []middleware.Middleware { +func rumMiddleware(cfg *config.Config, authenticator *auth.Authenticator, ratelimitStore *ratelimit.Store, m map[request.ResultID]*monitoring.Int) []middleware.Middleware { msg := "RUM endpoint is disabled. " + "Configure the `apm-server.rum` section in apm-server.yml to enable ingestion of RUM events. " + "If you are not using the RUM agent, you can safely ignore this error." rumMiddleware := append(apmMiddleware(m), + middleware.ResponseHeadersMiddleware(cfg.ResponseHeaders), middleware.ResponseHeadersMiddleware(cfg.RumConfig.ResponseHeaders), - middleware.SetRumFlagMiddleware(), - middleware.SetIPRateLimitMiddleware(cfg.RumConfig.EventRate), middleware.CORSMiddleware(cfg.RumConfig.AllowOrigins, cfg.RumConfig.AllowHeaders), - middleware.KillSwitchMiddleware(cfg.RumConfig.IsEnabled(), msg), + middleware.AuthMiddleware(authenticator, true), + middleware.AnonymousRateLimitMiddleware(ratelimitStore), ) - if cfg.AugmentEnabled { - rumMiddleware = append(rumMiddleware, middleware.UserMetadataMiddleware()) - } - return rumMiddleware + return append(rumMiddleware, middleware.KillSwitchMiddleware(cfg.RumConfig.Enabled, msg)) } -func sourcemapMiddleware(cfg *config.Config, auth *authorization.Handler) []middleware.Middleware { +func sourcemapMiddleware(cfg *config.Config, auth *auth.Authenticator, ratelimitStore *ratelimit.Store) []middleware.Middleware { msg := "Sourcemap upload endpoint is disabled. " + "Configure the `apm-server.rum` section in apm-server.yml to enable sourcemap uploads. " + "If you are not using the RUM agent, you can safely ignore this error." - enabled := cfg.RumConfig.IsEnabled() && cfg.RumConfig.SourceMapping.IsEnabled() - return append(backendMiddleware(cfg, auth, sourcemap.MonitoringMap), - middleware.KillSwitchMiddleware(enabled, msg)) + if cfg.DataStreams.Enabled { + msg = "When APM Server is managed by Fleet, Sourcemaps must be uploaded directly to Elasticsearch." + } + enabled := cfg.RumConfig.Enabled && cfg.RumConfig.SourceMapping.Enabled && !cfg.DataStreams.Enabled + backendMiddleware := backendMiddleware(cfg, auth, ratelimitStore, apisourcemap.MonitoringMap) + return append(backendMiddleware, middleware.KillSwitchMiddleware(enabled, msg)) } -func rootMiddleware(_ *config.Config, auth *authorization.Handler) []middleware.Middleware { +func rootMiddleware(cfg *config.Config, authenticator *auth.Authenticator) []middleware.Middleware { return append(apmMiddleware(root.MonitoringMap), - middleware.AuthorizationMiddleware(auth, false)) + middleware.ResponseHeadersMiddleware(cfg.ResponseHeaders), + middleware.AuthMiddleware(authenticator, false), + ) +} + +func emptyRequestMetadata(c *request.Context) model.APMEvent { + return model.APMEvent{} +} + +func backendRequestMetadata(c *request.Context) model.APMEvent { + return model.APMEvent{Host: model.Host{ + IP: c.ClientIP, + }} +} + +func rumRequestMetadata(c *request.Context) model.APMEvent { + var source model.Source + if tcpAddr, ok := c.SourceAddr.(*net.TCPAddr); ok { + source.IP = tcpAddr.IP + source.Port = tcpAddr.Port + } + return model.APMEvent{ + Client: model.Client{IP: c.ClientIP}, + Source: source, + UserAgent: model.UserAgent{Original: c.UserAgent}, + } } diff --git a/beater/api/mux_config_agent_test.go b/beater/api/mux_config_agent_test.go index 5ce408722d1..236c9c9f94f 100644 --- a/beater/api/mux_config_agent_test.go +++ b/beater/api/mux_config_agent_test.go @@ -22,12 +22,10 @@ import ( "net/http/httptest" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/elastic/apm-server/approvaltest" "github.com/elastic/apm-server/beater/api/config/agent" - "github.com/elastic/apm-server/beater/beatertest" "github.com/elastic/apm-server/beater/config" "github.com/elastic/apm-server/beater/headers" "github.com/elastic/apm-server/beater/request" @@ -36,7 +34,7 @@ import ( func TestConfigAgentHandler_AuthorizationMiddleware(t *testing.T) { t.Run("Unauthorized", func(t *testing.T) { cfg := configEnabledConfigAgent() - cfg.SecretToken = "1234" + cfg.AgentAuth.SecretToken = "1234" rec, err := requestToMuxerWithPattern(cfg, AgentConfigPath) require.NoError(t, err) require.Equal(t, http.StatusUnauthorized, rec.Code) @@ -45,9 +43,10 @@ func TestConfigAgentHandler_AuthorizationMiddleware(t *testing.T) { t.Run("Authorized", func(t *testing.T) { cfg := configEnabledConfigAgent() - cfg.SecretToken = "1234" - h := map[string]string{headers.Authorization: "Bearer 1234"} - rec, err := requestToMuxerWithHeader(cfg, AgentConfigPath, http.MethodGet, h) + cfg.AgentAuth.SecretToken = "1234" + header := map[string]string{headers.Authorization: "Bearer 1234"} + queryString := map[string]string{"service.name": "service1"} + rec, err := requestToMuxerWithHeaderAndQueryString(cfg, AgentConfigPath, http.MethodGet, header, queryString) require.NoError(t, err) require.NotEqual(t, http.StatusUnauthorized, rec.Code) approvaltest.ApproveJSON(t, approvalPathConfigAgent(t.Name()), rec.Body.Bytes()) @@ -64,35 +63,48 @@ func TestConfigAgentHandler_KillSwitchMiddleware(t *testing.T) { }) t.Run("On", func(t *testing.T) { - rec, err := requestToMuxerWithPattern(configEnabledConfigAgent(), AgentConfigPath) + queryString := map[string]string{"service.name": "service1"} + rec, err := requestToMuxerWithHeaderAndQueryString(configEnabledConfigAgent(), AgentConfigPath, http.MethodGet, nil, queryString) require.NoError(t, err) require.NotEqual(t, http.StatusForbidden, rec.Code) approvaltest.ApproveJSON(t, approvalPathConfigAgent(t.Name()), rec.Body.Bytes()) }) } +func TestConfigAgentHandler_DirectConfiguration(t *testing.T) { + cfg := config.DefaultConfig() + cfg.AgentConfigs = []config.AgentConfig{ + { + Service: config.Service{Name: "service1", Environment: ""}, + Config: map[string]string{"key1": "val1"}, + Etag: "abc123", + }, + } + + mux, err := muxBuilder{Managed: true}.build(cfg) + require.NoError(t, err) + + r := httptest.NewRequest(http.MethodGet, AgentConfigPath, nil) + r = requestWithQueryString(r, map[string]string{"service.name": "service1"}) + + w := httptest.NewRecorder() + mux.ServeHTTP(w, r) + require.Equal(t, http.StatusOK, w.Code) + approvaltest.ApproveJSON(t, approvalPathConfigAgent(t.Name()), w.Body.Bytes()) + +} + func TestConfigAgentHandler_PanicMiddleware(t *testing.T) { - h := testHandler(t, backendAgentConfigHandler) - rec := &beatertest.WriterPanicOnce{} - c := request.NewContext() - c.Reset(rec, httptest.NewRequest(http.MethodGet, "/", nil)) - h(c) - require.Equal(t, http.StatusInternalServerError, rec.StatusCode) - approvaltest.ApproveJSON(t, approvalPathConfigAgent(t.Name()), rec.Body.Bytes()) + testPanicMiddleware(t, "/config/v1/agents", approvalPathConfigAgent(t.Name())) } func TestConfigAgentHandler_MonitoringMiddleware(t *testing.T) { - h := testHandler(t, backendAgentConfigHandler) - c, _ := beatertest.ContextWithResponseRecorder(http.MethodPost, "/") - - expected := map[request.ResultID]int{ + testMonitoringMiddleware(t, "/config/v1/agents", agent.MonitoringMap, map[request.ResultID]int{ request.IDRequestCount: 1, request.IDResponseCount: 1, request.IDResponseErrorsCount: 1, - request.IDResponseErrorsForbidden: 1} - equal, result := beatertest.CompareMonitoringInt(h, c, expected, agent.MonitoringMap) - assert.True(t, equal, result) - + request.IDResponseErrorsForbidden: 1, + }) } func configEnabledConfigAgent() *config.Config { diff --git a/beater/api/mux_expvar_test.go b/beater/api/mux_expvar_test.go new file mode 100644 index 00000000000..f46406d7c14 --- /dev/null +++ b/beater/api/mux_expvar_test.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package api + +import ( + "encoding/json" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/beater/config" +) + +func TestExpvarDefaultDisabled(t *testing.T) { + cfg := config.DefaultConfig() + recorder, err := requestToMuxerWithPattern(cfg, "/debug/vars") + require.NoError(t, err) + assert.Equal(t, http.StatusNotFound, recorder.Code) + assert.Equal(t, `{"error":"404 page not found"}`+"\n", recorder.Body.String()) +} + +func TestExpvarEnabled(t *testing.T) { + cfg := config.DefaultConfig() + cfg.Expvar.Enabled = true + recorder, err := requestToMuxerWithPattern(cfg, "/debug/vars") + require.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + + decoded := make(map[string]interface{}) + err = json.NewDecoder(recorder.Body).Decode(&decoded) + assert.NoError(t, err) + assert.Contains(t, decoded, "memstats") +} diff --git a/beater/api/mux_intake_backend_test.go b/beater/api/mux_intake_backend_test.go index 2e6db8e014c..9a5a03e7451 100644 --- a/beater/api/mux_intake_backend_test.go +++ b/beater/api/mux_intake_backend_test.go @@ -19,7 +19,6 @@ package api import ( "net/http" - "net/http/httptest" "testing" "github.com/stretchr/testify/assert" @@ -27,7 +26,6 @@ import ( "github.com/elastic/apm-server/approvaltest" "github.com/elastic/apm-server/beater/api/intake" - "github.com/elastic/apm-server/beater/beatertest" "github.com/elastic/apm-server/beater/config" "github.com/elastic/apm-server/beater/headers" "github.com/elastic/apm-server/beater/request" @@ -36,7 +34,7 @@ import ( func TestIntakeBackendHandler_AuthorizationMiddleware(t *testing.T) { t.Run("Unauthorized", func(t *testing.T) { cfg := config.DefaultConfig() - cfg.SecretToken = "1234" + cfg.AgentAuth.SecretToken = "1234" rec, err := requestToMuxerWithPattern(cfg, IntakePath) require.NoError(t, err) @@ -46,7 +44,7 @@ func TestIntakeBackendHandler_AuthorizationMiddleware(t *testing.T) { t.Run("Authorized", func(t *testing.T) { cfg := config.DefaultConfig() - cfg.SecretToken = "1234" + cfg.AgentAuth.SecretToken = "1234" h := map[string]string{headers.Authorization: "Bearer 1234"} rec, err := requestToMuxerWithHeader(cfg, IntakePath, http.MethodGet, h) require.NoError(t, err) @@ -57,27 +55,17 @@ func TestIntakeBackendHandler_AuthorizationMiddleware(t *testing.T) { } func TestIntakeBackendHandler_PanicMiddleware(t *testing.T) { - h := testHandler(t, backendIntakeHandler) - rec := &beatertest.WriterPanicOnce{} - c := request.NewContext() - c.Reset(rec, httptest.NewRequest(http.MethodGet, "/", nil)) - h(c) - assert.Equal(t, http.StatusInternalServerError, rec.StatusCode) - approvaltest.ApproveJSON(t, approvalPathIntakeBackend(t.Name()), rec.Body.Bytes()) + testPanicMiddleware(t, "/intake/v2/events", approvalPathIntakeBackend(t.Name())) } func TestIntakeBackendHandler_MonitoringMiddleware(t *testing.T) { - h := testHandler(t, backendIntakeHandler) - c, _ := beatertest.ContextWithResponseRecorder(http.MethodGet, "/") // send GET request resulting in 405 MethodNotAllowed error - expected := map[request.ResultID]int{ + testMonitoringMiddleware(t, "/intake/v2/events", intake.MonitoringMap, map[request.ResultID]int{ request.IDRequestCount: 1, request.IDResponseCount: 1, request.IDResponseErrorsCount: 1, - request.IDResponseErrorsMethodNotAllowed: 1} - - equal, result := beatertest.CompareMonitoringInt(h, c, expected, intake.MonitoringMap) - assert.True(t, equal, result) + request.IDResponseErrorsMethodNotAllowed: 1, + }) } func approvalPathIntakeBackend(f string) string { diff --git a/beater/api/mux_intake_rum_test.go b/beater/api/mux_intake_rum_test.go index 87e747c36f0..d6d386a3558 100644 --- a/beater/api/mux_intake_rum_test.go +++ b/beater/api/mux_intake_rum_test.go @@ -27,25 +27,29 @@ import ( "github.com/elastic/apm-server/approvaltest" "github.com/elastic/apm-server/beater/api/intake" - "github.com/elastic/apm-server/beater/beatertest" + "github.com/elastic/apm-server/beater/auth" "github.com/elastic/apm-server/beater/config" "github.com/elastic/apm-server/beater/headers" "github.com/elastic/apm-server/beater/middleware" + "github.com/elastic/apm-server/beater/ratelimit" "github.com/elastic/apm-server/beater/request" ) func TestOPTIONS(t *testing.T) { + ratelimitStore, _ := ratelimit.NewStore(1, 1, 1) requestTaken := make(chan struct{}, 1) done := make(chan struct{}, 1) cfg := cfgEnabledRUM() cfg.RumConfig.AllowOrigins = []string{"*"} + authenticator, _ := auth.NewAuthenticator(cfg.AgentAuth) + h, _ := middleware.Wrap( func(c *request.Context) { requestTaken <- struct{}{} <-done }, - rumMiddleware(cfg, nil, intake.MonitoringMap)...) + rumMiddleware(cfg, authenticator, ratelimitStore, intake.MonitoringMap)...) // use this to block the single allowed concurrent requests go func() { @@ -68,7 +72,7 @@ func TestOPTIONS(t *testing.T) { func TestRUMHandler_NoAuthorizationRequired(t *testing.T) { cfg := cfgEnabledRUM() - cfg.SecretToken = "1234" + cfg.AgentAuth.SecretToken = "1234" rec, err := requestToMuxerWithPattern(cfg, IntakeRUMPath) require.NoError(t, err) assert.NotEqual(t, http.StatusUnauthorized, rec.Code) @@ -94,45 +98,38 @@ func TestRUMHandler_KillSwitchMiddleware(t *testing.T) { func TestRUMHandler_CORSMiddleware(t *testing.T) { cfg := cfgEnabledRUM() cfg.RumConfig.AllowOrigins = []string{"foo"} - h, err := rumIntakeHandler(cfg, nil, beatertest.NilReporter) - require.NoError(t, err) - c, w := beatertest.ContextWithResponseRecorder(http.MethodPost, "/") - c.Request.Header.Set(headers.Origin, "bar") - h(c) - - assert.Equal(t, http.StatusForbidden, w.Code) + h := newTestMux(t, cfg) + + for _, path := range []string{"/intake/v2/rum/events", "/intake/v3/rum/events"} { + req := httptest.NewRequest(http.MethodPost, path, nil) + req.Header.Set(headers.Origin, "bar") + w := httptest.NewRecorder() + h.ServeHTTP(w, req) + assert.Equal(t, http.StatusForbidden, w.Code) + } } func TestIntakeRUMHandler_PanicMiddleware(t *testing.T) { - h, err := rumIntakeHandler(config.DefaultConfig(), nil, beatertest.NilReporter) - require.NoError(t, err) - rec := &beatertest.WriterPanicOnce{} - c := request.NewContext() - c.Reset(rec, httptest.NewRequest(http.MethodGet, "/", nil)) - h(c) - assert.Equal(t, http.StatusInternalServerError, rec.StatusCode) - approvaltest.ApproveJSON(t, approvalPathIntakeRUM(t.Name()), rec.Body.Bytes()) + testPanicMiddleware(t, "/intake/v2/rum/events", approvalPathIntakeRUM(t.Name())) + testPanicMiddleware(t, "/intake/v3/rum/events", approvalPathIntakeRUM(t.Name())) } func TestRumHandler_MonitoringMiddleware(t *testing.T) { - h, err := rumIntakeHandler(config.DefaultConfig(), nil, beatertest.NilReporter) - require.NoError(t, err) - c, _ := beatertest.ContextWithResponseRecorder(http.MethodPost, "/") // send GET request resulting in 403 Forbidden error - expected := map[request.ResultID]int{ - request.IDRequestCount: 1, - request.IDResponseCount: 1, - request.IDResponseErrorsCount: 1, - request.IDResponseErrorsForbidden: 1} - - equal, result := beatertest.CompareMonitoringInt(h, c, expected, intake.MonitoringMap) - assert.True(t, equal, result) + for _, path := range []string{"/intake/v2/rum/events", "/intake/v3/rum/events"} { + testMonitoringMiddleware(t, path, intake.MonitoringMap, map[request.ResultID]int{ + request.IDRequestCount: 1, + request.IDResponseCount: 1, + request.IDResponseErrorsCount: 1, + request.IDResponseErrorsForbidden: 1, + }) + } } func cfgEnabledRUM() *config.Config { cfg := config.DefaultConfig() - t := true - cfg.RumConfig.Enabled = &t + cfg.RumConfig.Enabled = true + cfg.AgentAuth.Anonymous.Enabled = true return cfg } diff --git a/beater/api/mux_root_test.go b/beater/api/mux_root_test.go index 4fdbb8b302f..87aeef3ae7c 100644 --- a/beater/api/mux_root_test.go +++ b/beater/api/mux_root_test.go @@ -19,7 +19,6 @@ package api import ( "net/http" - "net/http/httptest" "testing" "github.com/stretchr/testify/assert" @@ -27,7 +26,6 @@ import ( "github.com/elastic/apm-server/approvaltest" "github.com/elastic/apm-server/beater/api/root" - "github.com/elastic/apm-server/beater/beatertest" "github.com/elastic/apm-server/beater/config" "github.com/elastic/apm-server/beater/headers" "github.com/elastic/apm-server/beater/request" @@ -35,7 +33,7 @@ import ( func TestRootHandler_AuthorizationMiddleware(t *testing.T) { cfg := config.DefaultConfig() - cfg.SecretToken = "1234" + cfg.AgentAuth.SecretToken = "1234" t.Run("No auth", func(t *testing.T) { rec, err := requestToMuxerWithPattern(cfg, RootPath) @@ -54,29 +52,16 @@ func TestRootHandler_AuthorizationMiddleware(t *testing.T) { } func TestRootHandler_PanicMiddleware(t *testing.T) { - h := testHandler(t, rootHandler) - rec := &beatertest.WriterPanicOnce{} - c := request.NewContext() - c.Reset(rec, httptest.NewRequest(http.MethodGet, "/", nil)) - h(c) - - assert.Equal(t, http.StatusInternalServerError, rec.StatusCode) - approvaltest.ApproveJSON(t, approvalPathRoot(t.Name()), rec.Body.Bytes()) + testPanicMiddleware(t, "/", approvalPathRoot(t.Name())) } func TestRootHandler_MonitoringMiddleware(t *testing.T) { - h := testHandler(t, rootHandler) - c, _ := beatertest.ContextWithResponseRecorder(http.MethodGet, "/") - - // send GET request resulting in 403 Forbidden error as RUM is disabled by default - expected := map[request.ResultID]int{ + testMonitoringMiddleware(t, "/", root.MonitoringMap, map[request.ResultID]int{ request.IDRequestCount: 1, request.IDResponseCount: 1, request.IDResponseValidCount: 1, - request.IDResponseValidOK: 1} - - equal, result := beatertest.CompareMonitoringInt(h, c, expected, root.MonitoringMap) - assert.True(t, equal, result) + request.IDResponseValidOK: 1, + }) } func approvalPathRoot(f string) string { return "root/test_approved/integration/" + f } diff --git a/beater/api/mux_sourcemap_handler_test.go b/beater/api/mux_sourcemap_handler_test.go index f17d3eb76de..f2a9f4c562e 100644 --- a/beater/api/mux_sourcemap_handler_test.go +++ b/beater/api/mux_sourcemap_handler_test.go @@ -19,15 +19,12 @@ package api import ( "net/http" - "net/http/httptest" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/elastic/apm-server/approvaltest" "github.com/elastic/apm-server/beater/api/asset/sourcemap" - "github.com/elastic/apm-server/beater/beatertest" "github.com/elastic/apm-server/beater/config" "github.com/elastic/apm-server/beater/headers" "github.com/elastic/apm-server/beater/request" @@ -36,16 +33,27 @@ import ( func TestSourcemapHandler_AuthorizationMiddleware(t *testing.T) { t.Run("Unauthorized", func(t *testing.T) { cfg := cfgEnabledRUM() - cfg.SecretToken = "1234" + cfg.AgentAuth.SecretToken = "1234" + cfg.AgentAuth.Anonymous.Enabled = false rec, err := requestToMuxerWithPattern(cfg, AssetSourcemapPath) require.NoError(t, err) require.Equal(t, http.StatusUnauthorized, rec.Code) approvaltest.ApproveJSON(t, approvalPathAsset(t.Name()), rec.Body.Bytes()) }) - + t.Run("Forbidden", func(t *testing.T) { + // anonymous access is not allowed for uploading source maps + cfg := cfgEnabledRUM() + cfg.AgentAuth.SecretToken = "1234" + rec, err := requestToMuxerWithPattern(cfg, AssetSourcemapPath) + require.NoError(t, err) + require.Equal(t, http.StatusForbidden, rec.Code) + approvaltest.ApproveJSON(t, approvalPathAsset(t.Name()), rec.Body.Bytes()) + }) t.Run("Authorized", func(t *testing.T) { + // anonymous access is not allowed for uploading source maps cfg := cfgEnabledRUM() - cfg.SecretToken = "1234" + cfg.AgentAuth.SecretToken = "1234" + cfg.AgentAuth.Anonymous.Enabled = false h := map[string]string{headers.Authorization: "Bearer 1234"} rec, err := requestToMuxerWithHeader(cfg, AssetSourcemapPath, http.MethodPost, h) require.NoError(t, err) @@ -64,17 +72,26 @@ func TestSourcemapHandler_KillSwitchMiddleware(t *testing.T) { t.Run("OffSourcemap", func(t *testing.T) { cfg := config.DefaultConfig() - rum := true - cfg.RumConfig.Enabled = &rum - cfg.RumConfig.SourceMapping.Enabled = new(bool) - rec, err := requestToMuxerWithPattern(config.DefaultConfig(), AssetSourcemapPath) + cfg.RumConfig.SourceMapping.Enabled = true + rec, err := requestToMuxerWithPattern(cfg, AssetSourcemapPath) + require.NoError(t, err) + require.Equal(t, http.StatusForbidden, rec.Code) + approvaltest.ApproveJSON(t, approvalPathAsset(t.Name()), rec.Body.Bytes()) + }) + + t.Run("DataStreams", func(t *testing.T) { + cfg := cfgEnabledRUM() + cfg.DataStreams.Enabled = true + rec, err := requestToMuxerWithPattern(cfg, AssetSourcemapPath) require.NoError(t, err) require.Equal(t, http.StatusForbidden, rec.Code) approvaltest.ApproveJSON(t, approvalPathAsset(t.Name()), rec.Body.Bytes()) }) t.Run("On", func(t *testing.T) { - rec, err := requestToMuxerWithPattern(cfgEnabledRUM(), AssetSourcemapPath) + cfg := cfgEnabledRUM() + cfg.RumConfig.SourceMapping.Enabled = true + rec, err := requestToMuxerWithPattern(cfg, AssetSourcemapPath) require.NoError(t, err) require.NotEqual(t, http.StatusForbidden, rec.Code) approvaltest.ApproveJSON(t, approvalPathAsset(t.Name()), rec.Body.Bytes()) @@ -82,28 +99,17 @@ func TestSourcemapHandler_KillSwitchMiddleware(t *testing.T) { } func TestSourcemapHandler_PanicMiddleware(t *testing.T) { - h := testHandler(t, sourcemapHandler) - rec := &beatertest.WriterPanicOnce{} - c := request.NewContext() - c.Reset(rec, httptest.NewRequest(http.MethodGet, "/", nil)) - h(c) - require.Equal(t, http.StatusInternalServerError, rec.StatusCode) - approvaltest.ApproveJSON(t, approvalPathAsset(t.Name()), rec.Body.Bytes()) + testPanicMiddleware(t, "/assets/v1/sourcemaps", approvalPathAsset(t.Name())) } func TestSourcemapHandler_MonitoringMiddleware(t *testing.T) { - h := testHandler(t, sourcemapHandler) - c, _ := beatertest.ContextWithResponseRecorder(http.MethodPost, "/") - // send GET request resulting in 403 Forbidden error as RUM is disabled by default - expected := map[request.ResultID]int{ + testMonitoringMiddleware(t, "/assets/v1/sourcemaps", sourcemap.MonitoringMap, map[request.ResultID]int{ request.IDRequestCount: 1, request.IDResponseCount: 1, request.IDResponseErrorsCount: 1, - request.IDResponseErrorsForbidden: 1} - - equal, result := beatertest.CompareMonitoringInt(h, c, expected, sourcemap.MonitoringMap) - assert.True(t, equal, result) + request.IDResponseErrorsForbidden: 1, + }) } func approvalPathAsset(f string) string { return "asset/sourcemap/test_approved/integration/" + f } diff --git a/beater/api/mux_test.go b/beater/api/mux_test.go index b64d6d3c52c..0dcec678527 100644 --- a/beater/api/mux_test.go +++ b/beater/api/mux_test.go @@ -18,47 +18,123 @@ package api import ( + "context" "net/http" "net/http/httptest" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/elastic/apm-server/beater/authorization" + "github.com/elastic/apm-server/agentcfg" + "github.com/elastic/apm-server/approvaltest" + "github.com/elastic/apm-server/beater/auth" "github.com/elastic/apm-server/beater/beatertest" "github.com/elastic/apm-server/beater/config" + "github.com/elastic/apm-server/beater/ratelimit" "github.com/elastic/apm-server/beater/request" + "github.com/elastic/apm-server/model" "github.com/elastic/apm-server/publish" + "github.com/elastic/apm-server/sourcemap" + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/monitoring" ) func requestToMuxerWithPattern(cfg *config.Config, pattern string) (*httptest.ResponseRecorder, error) { r := httptest.NewRequest(http.MethodPost, pattern, nil) return requestToMuxer(cfg, r) } + func requestToMuxerWithHeader(cfg *config.Config, pattern string, method string, header map[string]string) (*httptest.ResponseRecorder, error) { r := httptest.NewRequest(method, pattern, nil) + return requestToMuxer(cfg, requestWithHeader(r, header)) +} + +func requestWithHeader(r *http.Request, header map[string]string) *http.Request { for k, v := range header { r.Header.Set(k, v) } + return r +} + +func requestWithQueryString(r *http.Request, queryString map[string]string) *http.Request { + m := r.URL.Query() + for k, v := range queryString { + m.Set(k, v) + } + r.URL.RawQuery = m.Encode() + return r +} + +func requestToMuxerWithHeaderAndQueryString( + cfg *config.Config, + pattern, method string, + header, queryString map[string]string, +) (*httptest.ResponseRecorder, error) { + r := httptest.NewRequest(method, pattern, nil) + r = requestWithQueryString(r, queryString) + r = requestWithHeader(r, header) return requestToMuxer(cfg, r) } func requestToMuxer(cfg *config.Config, r *http.Request) (*httptest.ResponseRecorder, error) { - mux, err := NewMux(cfg, beatertest.NilReporter) + mux, err := muxBuilder{}.build(cfg) if err != nil { return nil, err } w := httptest.NewRecorder() - h, _ := mux.Handler(r) - h.ServeHTTP(w, r) + mux.ServeHTTP(w, r) return w, nil } -func testHandler(t *testing.T, fn func(*config.Config, *authorization.Builder, publish.Reporter) (request.Handler, error)) request.Handler { - cfg := config.DefaultConfig() - builder, err := authorization.NewBuilder(cfg) - require.NoError(t, err) - h, err := fn(cfg, builder, beatertest.NilReporter) +func testPanicMiddleware(t *testing.T, urlPath string, approvalPath string) { + h := newTestMux(t, config.DefaultConfig()) + req := httptest.NewRequest(http.MethodGet, urlPath, nil) + + var rec beatertest.WriterPanicOnce + h.ServeHTTP(&rec, req) + + assert.Equal(t, http.StatusInternalServerError, rec.StatusCode) + approvaltest.ApproveJSON(t, approvalPath, rec.Body.Bytes()) +} + +func testMonitoringMiddleware(t *testing.T, urlPath string, monitoringMap map[request.ResultID]*monitoring.Int, expected map[request.ResultID]int) { + beatertest.ClearRegistry(monitoringMap) + + h := newTestMux(t, config.DefaultConfig()) + req := httptest.NewRequest(http.MethodGet, urlPath, nil) + h.ServeHTTP(httptest.NewRecorder(), req) + + equal, result := beatertest.CompareMonitoringInt(expected, monitoringMap) + assert.True(t, equal, result) +} + +func newTestMux(t *testing.T, cfg *config.Config) http.Handler { + mux, err := muxBuilder{}.build(cfg) require.NoError(t, err) - return h + return mux +} + +type muxBuilder struct { + SourcemapStore *sourcemap.Store + Managed bool +} + +func (m muxBuilder) build(cfg *config.Config) (http.Handler, error) { + nopReporter := func(context.Context, publish.PendingReq) error { return nil } + nopBatchProcessor := model.ProcessBatchFunc(func(context.Context, *model.Batch) error { return nil }) + ratelimitStore, _ := ratelimit.NewStore(1000, 1000, 1000) + authenticator, _ := auth.NewAuthenticator(cfg.AgentAuth) + return NewMux( + beat.Info{Version: "1.2.3"}, + cfg, + nopReporter, + nopBatchProcessor, + authenticator, + agentcfg.NewFetcher(cfg), + ratelimitStore, + m.SourcemapStore, + m.Managed, + func() bool { return true }, + ) } diff --git a/beater/api/profile/convert.go b/beater/api/profile/convert.go new file mode 100644 index 00000000000..15c1f58de09 --- /dev/null +++ b/beater/api/profile/convert.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package profile + +import ( + "fmt" + "time" + + "github.com/cespare/xxhash/v2" + "github.com/gofrs/uuid" + "github.com/google/pprof/profile" + + "github.com/elastic/apm-server/model" +) + +// appendProfileSampleBatch converts a pprof profile into a batch of model.ProfileSamples, +// and appends it to out. +func appendProfileSampleBatch(pp *profile.Profile, baseEvent model.APMEvent, out model.Batch) model.Batch { + + // Precompute value field names for use in each event. + // TODO(axw) limit to well-known value names? + baseEvent.Timestamp = time.Unix(0, pp.TimeNanos) + valueFieldNames := make([]string, len(pp.SampleType)) + for i, sampleType := range pp.SampleType { + sampleUnit := normalizeUnit(sampleType.Unit) + // Go profiles report samples.count, Node.js profiles report sample.count. + // We use samples.count for both so we can aggregate on one field. + if sampleType.Type == "sample" || sampleType.Type == "samples" { + valueFieldNames[i] = "samples.count" + } else { + valueFieldNames[i] = sampleType.Type + "." + sampleUnit + } + + } + + // Generate a unique profile ID shared by all samples in the profile. + // If we can't generate a UUID for whatever reason, omit the profile ID. + var profileID string + if uuid, err := uuid.NewV4(); err == nil { + profileID = fmt.Sprintf("%x", uuid) + } + + for _, sample := range pp.Sample { + var stack []model.ProfileSampleStackframe + if n := len(sample.Location); n > 0 { + hash := xxhash.New() + stack = make([]model.ProfileSampleStackframe, n) + for i := len(sample.Location) - 1; i >= 0; i-- { + loc := sample.Location[i] + line := loc.Line[0] // aggregated at function level + + // NOTE(axw) Currently we hash the function names so that + // we can aggregate stacks across multiple builds, or where + // binaries are not reproducible. + // + // If we decide to identify stack traces and frames using + // function addresses, then need to subtract the mapping's + // start address to eliminate the effects of ASLR, i.e. + // + // var buf [8]byte + // binary.BigEndian.PutUint64(buf[:], loc.Address-loc.Mapping.Start) + // hash.Write(buf[:]) + + hash.WriteString(line.Function.Name) + stack[i] = model.ProfileSampleStackframe{ + ID: fmt.Sprintf("%x", hash.Sum(nil)), + Function: line.Function.Name, + Filename: line.Function.Filename, + Line: line.Line, + } + } + } + + event := baseEvent + event.Processor = model.ProfileProcessor + event.Labels = event.Labels.Clone() + if n := len(sample.Label); n > 0 { + for k, v := range sample.Label { + event.Labels[k] = v + } + } + + values := make(map[string]int64, len(sample.Value)) + for i, value := range sample.Value { + values[valueFieldNames[i]] = value + } + + event.ProfileSample = &model.ProfileSample{ + Duration: time.Duration(pp.DurationNanos), + ProfileID: profileID, + Stack: stack, + Values: values, + } + out = append(out, event) + } + return out +} + +func normalizeUnit(unit string) string { + switch unit { + case "nanoseconds": + unit = "ns" + + case "microseconds": + unit = "us" + } + return unit +} diff --git a/beater/api/profile/handler.go b/beater/api/profile/handler.go index 86c0fd95d39..d3a8dae996d 100644 --- a/beater/api/profile/handler.go +++ b/beater/api/profile/handler.go @@ -33,9 +33,8 @@ import ( "github.com/elastic/apm-server/decoder" "github.com/elastic/apm-server/model" "github.com/elastic/apm-server/model/modeldecoder" + v2 "github.com/elastic/apm-server/model/modeldecoder/v2" "github.com/elastic/apm-server/publish" - "github.com/elastic/apm-server/transform" - "github.com/elastic/apm-server/validation" ) var ( @@ -56,8 +55,13 @@ const ( profileContentLengthLimit = 10 * 1024 * 1024 ) +// RequestMetadataFunc is a function type supplied to Handler for extracting +// metadata from the request. This is used for conditionally injecting the +// source IP address as `client.ip` for RUM. +type RequestMetadataFunc func(*request.Context) model.APMEvent + // Handler returns a request.Handler for managing profile requests. -func Handler(report publish.Reporter) request.Handler { +func Handler(requestMetadataFunc RequestMetadataFunc, processor model.BatchProcessor) request.Handler { handle := func(c *request.Context) (*result, error) { if c.Request.Method != http.MethodPost { return nil, requestError{ @@ -72,17 +76,9 @@ func Handler(report publish.Reporter) request.Handler { } } - ok := c.RateLimiter == nil || c.RateLimiter.Allow() - if !ok { - return nil, requestError{ - id: request.IDResponseErrorsRateLimit, - err: errors.New("rate limit exceeded"), - } - } - var totalLimitRemaining int64 = profileContentLengthLimit var profiles []*pprof_profile.Profile - var profileMetadata model.Metadata + baseEvent := requestMetadataFunc(c) mr, err := c.Request.MultipartReader() if err != nil { return nil, err @@ -104,26 +100,15 @@ func Handler(report publish.Reporter) request.Handler { } } r := &decoder.LimitedReader{R: part, N: metadataContentLengthLimit} - raw, err := decoder.DecodeJSONData(r) - if err != nil { + dec := decoder.NewJSONDecoder(r) + if err := v2.DecodeMetadata(dec, &baseEvent); err != nil { if r.N < 0 { return nil, requestError{ id: request.IDResponseErrorsRequestTooLarge, err: err, } } - return nil, requestError{ - id: request.IDResponseErrorsDecode, - err: errors.Wrap(err, "failed to decode metadata JSON"), - } - } - metadata := model.Metadata{ - UserAgent: model.UserAgent{Original: c.RequestMetadata.UserAgent}, - Client: model.Client{IP: c.RequestMetadata.ClientIP}, - System: model.System{IP: c.RequestMetadata.SystemIP}} - if err := modeldecoder.DecodeMetadata(raw, false, &metadata); err != nil { - var ve *validation.Error - if errors.As(err, &ve) { + if _, ok := err.(modeldecoder.ValidationError); ok { return nil, requestError{ id: request.IDResponseErrorsValidate, err: errors.Wrap(err, "invalid metadata"), @@ -131,10 +116,9 @@ func Handler(report publish.Reporter) request.Handler { } return nil, requestError{ id: request.IDResponseErrorsDecode, - err: errors.Wrap(err, "failed to decode metadata"), + err: errors.Wrap(err, "invalid metadata"), } } - profileMetadata = metadata case "profile": params, err := validateContentType(http.Header(part.Header), pprofMediaType) @@ -172,15 +156,11 @@ func Handler(report publish.Reporter) request.Handler { } } - transformables := make([]transform.Transformable, len(profiles)) - for i, p := range profiles { - transformables[i] = model.PprofProfile{ - Metadata: profileMetadata, - Profile: p, - } + var batch model.Batch + for _, profile := range profiles { + batch = appendProfileSampleBatch(profile, baseEvent, batch) } - - if err := report(c.Request.Context(), publish.PendingReq{Transformables: transformables}); err != nil { + if err := processor.ProcessBatch(c.Request.Context(), &batch); err != nil { switch err { case publish.ErrChannelClosed: return nil, requestError{ @@ -195,7 +175,7 @@ func Handler(report publish.Reporter) request.Handler { } return nil, err } - return &result{Accepted: len(transformables)}, nil + return &result{Accepted: len(batch)}, nil } return func(c *request.Context) { result, err := handle(c) diff --git a/beater/api/profile/handler_test.go b/beater/api/profile/handler_test.go index ce5c19518fa..46efe9f7732 100644 --- a/beater/api/profile/handler_test.go +++ b/beater/api/profile/handler_test.go @@ -23,21 +23,19 @@ import ( "encoding/json" "fmt" "io" + "io/ioutil" "mime/multipart" "net/http" "net/http/httptest" "net/textproto" - "runtime/pprof" "strings" "testing" - "github.com/elastic/apm-server/beater/api/ratelimit" "github.com/elastic/apm-server/model" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/elastic/apm-server/beater/beatertest" "github.com/elastic/apm-server/beater/headers" "github.com/elastic/apm-server/beater/request" "github.com/elastic/apm-server/publish" @@ -46,8 +44,6 @@ import ( const pprofContentType = `application/x-protobuf; messageType="perftools.profiles.Profile"` func TestHandler(t *testing.T) { - var rateLimit, err = ratelimit.NewStore(1, 0, 0) - require.NoError(t, err) for name, tc := range map[string]testcaseIntakeHandler{ "MethodNotAllowed": { r: httptest.NewRequest(http.MethodGet, "/", nil), @@ -61,19 +57,19 @@ func TestHandler(t *testing.T) { }(), id: request.IDResponseErrorsValidate, }, - "RateLimitExceeded": { - rateLimit: rateLimit, - id: request.IDResponseErrorsRateLimit, - }, "Closing": { - reporter: func(t *testing.T) publish.Reporter { - return beatertest.ErrorReporterFn(publish.ErrChannelClosed) + batchProcessor: func(t *testing.T) model.BatchProcessor { + return model.ProcessBatchFunc(func(context.Context, *model.Batch) error { + return publish.ErrChannelClosed + }) }, id: request.IDResponseErrorsShuttingDown, }, "FullQueue": { - reporter: func(t *testing.T) publish.Reporter { - return beatertest.ErrorReporterFn(publish.ErrFull) + batchProcessor: func(t *testing.T) model.BatchProcessor { + return model.ProcessBatchFunc(func(context.Context, *model.Batch) error { + return publish.ErrFull + }) }, id: request.IDResponseErrorsFullQueue, }, @@ -127,30 +123,29 @@ func TestHandler(t *testing.T) { "Profile": { id: request.IDResponseValidAccepted, parts: []part{ - heapProfilePart(), - part{ + heapProfilePart(t), + { name: "profile", // No messageType param specified, so pprof is assumed. contentType: "application/x-protobuf", - body: heapProfileBody(), + body: heapProfileBody(t), }, - part{ + { name: "metadata", contentType: "application/json", - body: strings.NewReader(`{"service":{"name":"foo","agent":{"name":"go","version":"1.0"}}}`), + body: strings.NewReader(`{"service":{"name":"foo","agent":{"name":"java","version":"1.2.0"}}}`), }, }, - body: prettyJSON(map[string]interface{}{"accepted": 2}), + body: prettyJSON(map[string]interface{}{"accepted": 84}), reports: 1, - reporter: func(t *testing.T) publish.Reporter { - return func(ctx context.Context, req publish.PendingReq) error { - require.Len(t, req.Transformables, 2) - for _, tr := range req.Transformables { - profile := tr.(model.PprofProfile) - assert.Equal(t, "foo", profile.Metadata.Service.Name) + batchProcessor: func(t *testing.T) model.BatchProcessor { + return model.ProcessBatchFunc(func(ctx context.Context, batch *model.Batch) error { + assert.Len(t, *batch, 84) + for _, event := range *batch { + assert.Equal(t, "foo", event.Service.Name) } return nil - } + }) }, }, "ProfileInvalidContentType": { @@ -185,7 +180,7 @@ func TestHandler(t *testing.T) { "ProfileTooLarge": { id: request.IDResponseErrorsRequestTooLarge, parts: []part{ - heapProfilePart(), + heapProfilePart(t), part{ name: "profile", contentType: pprofContentType, @@ -197,10 +192,7 @@ func TestHandler(t *testing.T) { } { t.Run(name, func(t *testing.T) { tc.setup(t) - if tc.rateLimit != nil { - tc.c.RateLimiter = tc.rateLimit.ForIP(&http.Request{}) - } - Handler(tc.reporter(t))(tc.c) + Handler(emptyRequestMetadata, tc.batchProcessor(t))(tc.c) assert.Equal(t, string(tc.id), string(tc.c.Result.ID)) resultStatus := request.MapResultIDToStatus[tc.id] @@ -220,32 +212,31 @@ func TestHandler(t *testing.T) { } type testcaseIntakeHandler struct { - c *request.Context - w *httptest.ResponseRecorder - r *http.Request - rateLimit *ratelimit.Store - reporter func(t *testing.T) publish.Reporter - reports int - parts []part + c *request.Context + w *httptest.ResponseRecorder + r *http.Request + batchProcessor func(t *testing.T) model.BatchProcessor + reports int + parts []part id request.ResultID body string } func (tc *testcaseIntakeHandler) setup(t *testing.T) { - if tc.reporter == nil { - tc.reporter = func(t *testing.T) publish.Reporter { - return beatertest.NilReporter + if tc.batchProcessor == nil { + tc.batchProcessor = func(t *testing.T) model.BatchProcessor { + return model.ProcessBatchFunc(func(context.Context, *model.Batch) error { return nil }) } } if tc.reports > 0 { - orig := tc.reporter - tc.reporter = func(t *testing.T) publish.Reporter { + orig := tc.batchProcessor + tc.batchProcessor = func(t *testing.T) model.BatchProcessor { orig := orig(t) - return func(ctx context.Context, req publish.PendingReq) error { + return model.ProcessBatchFunc(func(ctx context.Context, batch *model.Batch) error { tc.reports-- - return orig(ctx, req) - } + return orig.ProcessBatch(ctx, batch) + }) } } if tc.r == nil { @@ -272,16 +263,14 @@ func (tc *testcaseIntakeHandler) setup(t *testing.T) { tc.c.Reset(tc.w, tc.r) } -func heapProfilePart() part { - return part{name: "profile", contentType: pprofContentType, body: heapProfileBody()} +func heapProfilePart(t testing.TB) part { + return part{name: "profile", contentType: pprofContentType, body: heapProfileBody(t)} } -func heapProfileBody() io.Reader { - var buf bytes.Buffer - if err := pprof.WriteHeapProfile(&buf); err != nil { - panic(err) - } - return &buf +func heapProfileBody(t testing.TB) io.Reader { + data, err := ioutil.ReadFile("../../../testdata/profile/heap.pprof") + require.NoError(t, err) + return bytes.NewReader(data) } type part struct { @@ -297,3 +286,7 @@ func prettyJSON(v interface{}) string { enc.Encode(v) return buf.String() } + +func emptyRequestMetadata(*request.Context) model.APMEvent { + return model.APMEvent{} +} diff --git a/beater/api/ratelimit/store_test.go b/beater/api/ratelimit/store_test.go deleted file mode 100644 index e6559e089f0..00000000000 --- a/beater/api/ratelimit/store_test.go +++ /dev/null @@ -1,98 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package ratelimit - -import ( - "net/http" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestCacheInitFails(t *testing.T) { - for _, test := range []struct { - size int - limit int - }{ - {-1, 1}, - {0, 1}, - {1, -1}, - } { - c, err := NewStore(test.size, test.limit, 3) - assert.Error(t, err) - assert.Nil(t, c) - assert.Nil(t, c.ForIP(&http.Request{})) - } -} - -func TestCacheEviction(t *testing.T) { - cacheSize := 2 - limit := 1 //multiplied times BurstMultiplier 3 - - store, err := NewStore(cacheSize, limit, 3) - require.NoError(t, err) - - // add new limiter - rlA := store.acquire("a") - rlA.AllowN(time.Now(), 3) - - // add new limiter - rlB := store.acquire("b") - rlB.AllowN(time.Now(), 2) - - // reuse evicted limiter rlA - rlC := store.acquire("c") - assert.False(t, rlC.Allow()) - assert.Equal(t, rlC, store.evictedLimiter) - - // reuse evicted limiter rlB - rlD := store.acquire("a") - assert.True(t, rlD.Allow()) - assert.False(t, rlD.Allow()) - assert.Equal(t, rlD, store.evictedLimiter) - // check that limiter are independent - assert.True(t, rlD != rlC) - store.evictedLimiter = nil - assert.NotNil(t, rlD) - assert.NotNil(t, rlC) -} - -func TestCacheOk(t *testing.T) { - store, err := NewStore(1, 1, 1) - require.NoError(t, err) - limiter := store.acquire("a") - assert.NotNil(t, limiter) -} - -func TestRateLimitPerIP(t *testing.T) { - store, err := NewStore(2, 1, 1) - require.NoError(t, err) - - var reqFrom = func(ip string) *http.Request { - r := http.Request{} - r.Header = http.Header{} - r.Header.Set("X-Real-Ip", ip) - return &r - } - assert.True(t, store.ForIP(reqFrom("10.10.10.1")).Allow()) - assert.False(t, store.ForIP(reqFrom("10.10.10.1")).Allow()) - assert.True(t, store.ForIP(reqFrom("10.10.10.2")).Allow()) - assert.False(t, store.ForIP(reqFrom("10.10.10.3")).Allow()) -} diff --git a/beater/api/root/handler.go b/beater/api/root/handler.go index 73620de4a0f..a064b6acddb 100644 --- a/beater/api/root/handler.go +++ b/beater/api/root/handler.go @@ -20,12 +20,11 @@ package root import ( "time" - "github.com/elastic/apm-server/beater/authorization" - "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/monitoring" "github.com/elastic/beats/v7/libbeat/version" + "github.com/elastic/apm-server/beater/auth" "github.com/elastic/apm-server/beater/request" ) @@ -35,15 +34,19 @@ var ( registry = monitoring.Default.NewRegistry("apm-server.root") ) +// HandlerConfig holds configuration for Handler. +type HandlerConfig struct { + // PublishReady reports whether or not the server is ready for publishing events. + PublishReady func() bool + + // Version holds the APM Server version. + Version string +} + // Handler returns error if route does not exist, -// otherwise returns information about the server. The detail level differs for authorized and non-authorized requests. +// otherwise returns information about the server. The detail level differs for authenticated and anonymous requests. //TODO: only allow GET, HEAD requests (breaking change) -func Handler() request.Handler { - serverInfo := common.MapStr{ - "build_date": version.BuildTime().Format(time.RFC3339), - "build_sha": version.Commit(), - "version": version.GetDefaultVersion(), - } +func Handler(cfg HandlerConfig) request.Handler { return func(c *request.Context) { if c.Request.URL.Path != "/" { @@ -52,15 +55,19 @@ func Handler() request.Handler { return } - c.Result.SetDefault(request.IDResponseValidOK) - authorized, err := c.Authorization.AuthorizedFor(c.Request.Context(), authorization.ResourceInternal) - if err != nil { - c.Result.Err = err + serverInfo := common.MapStr{ + "build_date": version.BuildTime().Format(time.RFC3339), + "build_sha": version.Commit(), + "version": cfg.Version, } - if authorized { - c.Result.Body = serverInfo + if cfg.PublishReady != nil { + serverInfo["publish_ready"] = cfg.PublishReady() } + c.Result.SetDefault(request.IDResponseValidOK) + if c.Authentication.Method != auth.MethodAnonymous { + c.Result.Body = serverInfo + } c.Write() } } diff --git a/beater/api/root/handler_test.go b/beater/api/root/handler_test.go index 2eb2b28c00b..9a2427a6cac 100644 --- a/beater/api/root/handler_test.go +++ b/beater/api/root/handler_test.go @@ -22,21 +22,18 @@ import ( "net/http" "testing" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/assert" "github.com/elastic/beats/v7/libbeat/version" - "github.com/elastic/apm-server/beater/authorization" + "github.com/elastic/apm-server/beater/auth" "github.com/elastic/apm-server/beater/beatertest" - "github.com/elastic/apm-server/beater/config" ) func TestRootHandler(t *testing.T) { t.Run("404", func(t *testing.T) { c, w := beatertest.ContextWithResponseRecorder(http.MethodGet, "/abc/xyz") - Handler()(c) + Handler(HandlerConfig{Version: "1.2.3"})(c) assert.Equal(t, http.StatusNotFound, w.Code) assert.Equal(t, `{"error":"404 page not found"}`+"\n", w.Body.String()) @@ -44,32 +41,45 @@ func TestRootHandler(t *testing.T) { t.Run("ok", func(t *testing.T) { c, w := beatertest.ContextWithResponseRecorder(http.MethodGet, "/") - c.Authorization = &authorization.DenyAuth{} - Handler()(c) + Handler(HandlerConfig{Version: "1.2.3"})(c) assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "", w.Body.String()) }) - t.Run("unauthorized", func(t *testing.T) { + t.Run("unauthenticated", func(t *testing.T) { c, w := beatertest.ContextWithResponseRecorder(http.MethodGet, "/") - c.Authorization = authorization.DenyAuth{} - Handler()(c) + c.Authentication.Method = auth.MethodAnonymous + Handler(HandlerConfig{Version: "1.2.3"})(c) assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "", w.Body.String()) }) - t.Run("authorized", func(t *testing.T) { + t.Run("authenticated", func(t *testing.T) { c, w := beatertest.ContextWithResponseRecorder(http.MethodGet, "/") - builder, err := authorization.NewBuilder(&config.Config{SecretToken: "abc"}) - require.NoError(t, err) - c.Authorization = builder.ForPrivilege("").AuthorizationFor("Bearer", "abc") - Handler()(c) + c.Authentication.Method = auth.MethodNone + Handler(HandlerConfig{Version: "1.2.3"})(c) assert.Equal(t, http.StatusOK, w.Code) - body := fmt.Sprintf("{\"build_date\":\"0001-01-01T00:00:00Z\",\"build_sha\":\"%s\",\"version\":\"%s\"}\n", - version.Commit(), version.GetDefaultVersion()) + body := fmt.Sprintf("{\"build_date\":\"0001-01-01T00:00:00Z\",\"build_sha\":\"%s\",\"version\":\"1.2.3\"}\n", + version.Commit()) assert.Equal(t, body, w.Body.String()) }) + + t.Run("publish_ready", func(t *testing.T) { + c, w := beatertest.ContextWithResponseRecorder(http.MethodGet, "/") + c.Authentication.Method = auth.MethodNone + + Handler(HandlerConfig{ + PublishReady: func() bool { return false }, + Version: "1.2.3", + })(c) + + assert.Equal(t, http.StatusOK, w.Code) + assert.Equal(t, fmt.Sprintf( + `{"build_date":"0001-01-01T00:00:00Z","build_sha":%q,"publish_ready":false,"version":"1.2.3"}`+"\n", + version.Commit(), + ), w.Body.String()) + }) } diff --git a/beater/api/root/test_approved/integration/TestRootHandler_AuthorizationMiddleware/Authorized.approved.json b/beater/api/root/test_approved/integration/TestRootHandler_AuthorizationMiddleware/Authorized.approved.json index 05ae72701a0..e039c79ba0a 100644 --- a/beater/api/root/test_approved/integration/TestRootHandler_AuthorizationMiddleware/Authorized.approved.json +++ b/beater/api/root/test_approved/integration/TestRootHandler_AuthorizationMiddleware/Authorized.approved.json @@ -1,5 +1,6 @@ { "build_date": "0001-01-01T00:00:00Z", "build_sha": "unknown", - "version": "8.0.0" + "publish_ready": true, + "version": "1.2.3" } diff --git a/beater/auth/allow.go b/beater/auth/allow.go new file mode 100644 index 00000000000..353d7ef5d14 --- /dev/null +++ b/beater/auth/allow.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package auth + +import ( + "context" +) + +// allowAuth implements the Authorizer interface. +type allowAuth struct{} + +// Authorize always returns nil, indicating the request is authorized. +func (allowAuth) Authorize(context.Context, Action, Resource) error { + return nil +} diff --git a/beater/auth/allow_test.go b/beater/auth/allow_test.go new file mode 100644 index 00000000000..664183d9384 --- /dev/null +++ b/beater/auth/allow_test.go @@ -0,0 +1,32 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package auth + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAllowAuth(t *testing.T) { + handler := allowAuth{} + + err := handler.Authorize(context.Background(), "", Resource{}) + assert.NoError(t, err) +} diff --git a/beater/auth/anonymous.go b/beater/auth/anonymous.go new file mode 100644 index 00000000000..0fea0b4b2aa --- /dev/null +++ b/beater/auth/anonymous.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package auth + +import ( + "context" + "fmt" +) + +func newAnonymousAuth(allowAgent, allowService []string) *anonymousAuth { + a := &anonymousAuth{ + allowedAgents: make(map[string]bool), + allowedServices: make(map[string]bool), + } + for _, name := range allowAgent { + a.allowedAgents[name] = true + } + for _, name := range allowService { + a.allowedServices[name] = true + } + return a +} + +// anonymousAuth implements the Authorization interface, allowing anonymous access with +// optional restriction on agent and service name. +type anonymousAuth struct { + allowedAgents map[string]bool + allowedServices map[string]bool +} + +// Authorize checks if anonymous access is authorized for the given action and resource. +func (a *anonymousAuth) Authorize(ctx context.Context, action Action, resource Resource) error { + switch action { + case ActionAgentConfig: + // Anonymous access to agent config should be restricted by service. + // Agent config queries do not provide an agent name, so that is not + // checked here. Instead, the agent config handlers will filter results + // down to those in the allowed agent list. + if len(a.allowedServices) != 0 && !a.allowedServices[resource.ServiceName] { + return fmt.Errorf( + "%w: anonymous access not permitted for service %q", + ErrUnauthorized, resource.ServiceName, + ) + } + return nil + case ActionEventIngest: + if len(a.allowedServices) != 0 && !a.allowedServices[resource.ServiceName] { + return fmt.Errorf( + "%w: anonymous access not permitted for service %q", + ErrUnauthorized, resource.ServiceName, + ) + } + if len(a.allowedAgents) != 0 && !a.allowedAgents[resource.AgentName] { + return fmt.Errorf( + "%w: anonymous access not permitted for agent %q", + ErrUnauthorized, resource.AgentName, + ) + } + return nil + case ActionSourcemapUpload: + return fmt.Errorf("%w: anonymous access not permitted for sourcemap uploads", ErrUnauthorized) + default: + return fmt.Errorf("unknown action %q", action) + } +} diff --git a/beater/auth/anonymous_test.go b/beater/auth/anonymous_test.go new file mode 100644 index 00000000000..194166e34d0 --- /dev/null +++ b/beater/auth/anonymous_test.go @@ -0,0 +1,140 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package auth_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/beater/auth" + "github.com/elastic/apm-server/beater/config" +) + +func TestAnonymousAuthorizer(t *testing.T) { + for name, test := range map[string]struct { + allowAgent []string + allowService []string + action auth.Action + resource auth.Resource + expectErr error + }{ + "deny_sourcemap_upload": { + allowAgent: nil, + allowService: nil, + action: auth.ActionSourcemapUpload, + resource: auth.Resource{AgentName: "iOS/swift", ServiceName: "opbeans-ios"}, + expectErr: fmt.Errorf(`%w: anonymous access not permitted for sourcemap uploads`, auth.ErrUnauthorized), + }, + "deny_unknown_action": { + allowAgent: nil, + allowService: nil, + action: "discombobulate", + resource: auth.Resource{AgentName: "iOS/swift", ServiceName: "opbeans-ios"}, + expectErr: errors.New(`unknown action "discombobulate"`), + }, + "allow_any_agent_config": { + allowAgent: nil, + allowService: nil, + action: auth.ActionAgentConfig, + resource: auth.Resource{AgentName: "iOS/swift", ServiceName: "opbeans-ios"}, + }, + "allow_any_ingest": { + allowAgent: nil, + allowService: nil, + action: auth.ActionEventIngest, + resource: auth.Resource{AgentName: "iOS/swift", ServiceName: "opbeans-ios"}, + }, + "allow_agent": { + allowAgent: []string{"iOS/swift"}, + allowService: nil, + action: auth.ActionAgentConfig, + resource: auth.Resource{AgentName: "iOS/swift", ServiceName: "opbeans-ios"}, + }, + "deny_agent": { + allowAgent: []string{"rum-js"}, + allowService: nil, + action: auth.ActionEventIngest, + resource: auth.Resource{AgentName: "iOS/swift", ServiceName: "opbeans-ios"}, + expectErr: fmt.Errorf(`%w: anonymous access not permitted for agent "iOS/swift"`, auth.ErrUnauthorized), + }, + "allow_service": { + allowService: []string{"opbeans-ios"}, + action: auth.ActionAgentConfig, + resource: auth.Resource{AgentName: "iOS/swift", ServiceName: "opbeans-ios"}, + }, + "deny_service": { + allowService: []string{"opbeans-rum"}, + action: auth.ActionAgentConfig, + resource: auth.Resource{AgentName: "iOS/swift", ServiceName: "opbeans-ios"}, + expectErr: fmt.Errorf(`%w: anonymous access not permitted for service "opbeans-ios"`, auth.ErrUnauthorized), + }, + "allow_agent_config_agent_unspecified": { + allowAgent: []string{"iOS/swift"}, + action: auth.ActionAgentConfig, + resource: auth.Resource{ServiceName: "opbeans-ios"}, // AgentName not checked for agent config + }, + "deny_agent_config_service_unspecified": { + allowService: []string{"opbeans-ios"}, + action: auth.ActionAgentConfig, + resource: auth.Resource{AgentName: "iOS/swift"}, + expectErr: fmt.Errorf(`%w: anonymous access not permitted for service ""`, auth.ErrUnauthorized), + }, + "deny_event_ingest_agent_unspecified": { + allowAgent: []string{"iOS/swift"}, + action: auth.ActionEventIngest, + resource: auth.Resource{ServiceName: "opbeans-ios"}, + expectErr: fmt.Errorf(`%w: anonymous access not permitted for agent ""`, auth.ErrUnauthorized), + }, + "deny_event_ingest_service_unspecified": { + allowService: []string{"opbeans-ios"}, + action: auth.ActionAgentConfig, + resource: auth.Resource{AgentName: "iOS/swift"}, + expectErr: fmt.Errorf(`%w: anonymous access not permitted for service ""`, auth.ErrUnauthorized), + }, + } { + t.Run(name, func(t *testing.T) { + authorizer := getAnonymousAuthorizer(t, config.AnonymousAgentAuth{ + Enabled: true, + AllowAgent: test.allowAgent, + AllowService: test.allowService, + }) + err := authorizer.Authorize(context.Background(), test.action, test.resource) + if test.expectErr != nil { + assert.Equal(t, test.expectErr, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func getAnonymousAuthorizer(t testing.TB, cfg config.AnonymousAgentAuth) auth.Authorizer { + authenticator, err := auth.NewAuthenticator(config.AgentAuth{ + SecretToken: "whatever", // required to enable anonymous auth + Anonymous: cfg, + }) + require.NoError(t, err) + _, authorizer, err := authenticator.Authenticate(context.Background(), "", "") + require.NoError(t, err) + return authorizer +} diff --git a/beater/auth/apikey.go b/beater/auth/apikey.go new file mode 100644 index 00000000000..cc1c0068f6d --- /dev/null +++ b/beater/auth/apikey.go @@ -0,0 +1,202 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package auth + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "net/http" + "time" + + "github.com/patrickmn/go-cache" + + es "github.com/elastic/apm-server/elasticsearch" +) + +const cleanupInterval = 60 * time.Second + +const ( + // Application is a constant mapped to the "application" field for the Elasticsearch security API + // This identifies privileges and keys created for APM + Application es.AppName = "apm" + + // ResourceInternal is only valid for first authorization of a request. + // The API Key needs to grant privileges to additional resources for successful processing of requests. + ResourceInternal = es.Resource("-") +) + +var ( + // PrivilegeAgentConfigRead identifies the Elasticsearch API Key privilege + // required for authorizing agent config queries. + PrivilegeAgentConfigRead = es.NewPrivilege("agentConfig", "config_agent:read") + + // PrivilegeEventWrite identifies the Elasticsearch API Key privilege required + // for authorizing event ingestion. + PrivilegeEventWrite = es.NewPrivilege("event", "event:write") + + // PrivilegeSourcemapWrite identifies the Elasticsearch API Key privilege + // required for authorizing source map uploads. + PrivilegeSourcemapWrite = es.NewPrivilege("sourcemap", "sourcemap:write") +) + +// AllPrivilegeActions returns all Elasticsearch privilege actions used by APM Server. +func AllPrivilegeActions() []es.PrivilegeAction { + return []es.PrivilegeAction{ + PrivilegeAgentConfigRead.Action, + PrivilegeEventWrite.Action, + PrivilegeSourcemapWrite.Action, + } +} + +type apikeyAuth struct { + esClient es.Client + cache *privilegesCache +} + +type apikeyAuthorizer struct { + permissions es.Permissions +} + +func newApikeyAuth(client es.Client, cache *privilegesCache) *apikeyAuth { + return &apikeyAuth{client, cache} +} + +func (a *apikeyAuth) authenticate(ctx context.Context, credentials string) (*APIKeyAuthenticationDetails, *apikeyAuthorizer, error) { + decoded, err := base64.StdEncoding.DecodeString(credentials) + if err != nil { + return nil, nil, fmt.Errorf("%w: %s", ErrAuthFailed, err) + } + colon := bytes.IndexByte(decoded, ':') + if colon == -1 { + return nil, nil, fmt.Errorf("%w: improperly formatted ApiKey credentials: expected base64(ID:APIKey)", ErrAuthFailed) + } + id := string(decoded[:colon]) + + // Check that the user has any privileges for the internal resource. + response, err := a.hasPrivileges(ctx, id, credentials, ResourceInternal) + if err != nil { + return nil, nil, err + } + permissions := response.Application[Application][ResourceInternal] + haveAny := false + for _, havePermission := range permissions { + if havePermission { + haveAny = true + break + } + } + if !haveAny { + return nil, nil, ErrAuthFailed + } + details := &APIKeyAuthenticationDetails{ID: id, Username: response.Username} + return details, &apikeyAuthorizer{permissions}, nil +} + +func (a *apikeyAuth) hasPrivileges(ctx context.Context, id, credentials string, resource es.Resource) (*es.HasPrivilegesResponse, error) { + cacheKey := id + "_" + string(resource) + if response, ok := a.cache.get(cacheKey); ok { + if response == nil { + return nil, ErrAuthFailed + } + return response, nil + } + + if a.cache.isFull() { + return nil, errors.New( + "api_key limit reached, check your logs for failed authorization attempts " + + "or consider increasing config option `apm-server.api_key.limit`", + ) + } + + request := es.HasPrivilegesRequest{ + Applications: []es.Application{{ + Name: Application, + // it is important to query all privilege actions because they are cached by api key+resources + // querying a.anyOfPrivileges would result in an incomplete cache entry + Privileges: AllPrivilegeActions(), + Resources: []es.Resource{resource}, + }}, + } + info, err := es.HasPrivileges(ctx, a.esClient, request, credentials) + if err != nil { + var eserr *es.Error + if errors.As(err, &eserr) && eserr.StatusCode == http.StatusUnauthorized { + // Cache authorization failures to avoid hitting Elasticsearch every time. + a.cache.add(cacheKey, nil) + return nil, ErrAuthFailed + } + return nil, err + } + a.cache.add(cacheKey, &info) + return &info, nil +} + +// Authorize checks if the configured API Key is authorized for the given action and resource. +// +// An API Key is considered to be authorized when the API Key has the configured privileges +// for the requested resource. Permissions are fetched from Elasticsearch and then cached in +// a global cache. +func (a *apikeyAuthorizer) Authorize(ctx context.Context, action Action, _ Resource) error { + // TODO if resource is non-zero, map to different application resources in the privilege queries. + // + // For now, having any valid "apm" application API Key grants access to any agent and service. + // In the future, it should be possible to have API Keys that can be restricted to a set of agent + // and service names. + var apikeyPrivilegeAction es.PrivilegeAction + switch action { + case ActionAgentConfig: + apikeyPrivilegeAction = PrivilegeAgentConfigRead.Action + case ActionEventIngest: + apikeyPrivilegeAction = PrivilegeEventWrite.Action + case ActionSourcemapUpload: + apikeyPrivilegeAction = PrivilegeSourcemapWrite.Action + default: + return fmt.Errorf("unknown action %q", action) + } + if a.permissions[apikeyPrivilegeAction] { + return nil + } + return fmt.Errorf("%w: API Key not permitted action %q", ErrUnauthorized, apikeyPrivilegeAction) +} + +type privilegesCache struct { + cache *cache.Cache + size int +} + +func newPrivilegesCache(expiration time.Duration, size int) *privilegesCache { + return &privilegesCache{cache: cache.New(expiration, cleanupInterval), size: size} +} + +func (c *privilegesCache) isFull() bool { + return c.cache.ItemCount() >= c.size +} + +func (c *privilegesCache) get(id string) (*es.HasPrivilegesResponse, bool) { + if val, exists := c.cache.Get(id); exists { + return val.(*es.HasPrivilegesResponse), true + } + return nil, false +} + +func (c *privilegesCache) add(id string, privileges *es.HasPrivilegesResponse) { + c.cache.SetDefault(id, privileges) +} diff --git a/beater/auth/apikey_test.go b/beater/auth/apikey_test.go new file mode 100644 index 00000000000..00c5a1a55ea --- /dev/null +++ b/beater/auth/apikey_test.go @@ -0,0 +1,72 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package auth + +import ( + "context" + "encoding/base64" + "errors" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/beater/config" + "github.com/elastic/apm-server/beater/headers" + "github.com/elastic/apm-server/elasticsearch" +) + +func TestAPIKeyAuthorizer(t *testing.T) { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{ + "username": "api_key_username", + "application": { + "apm": { + "-": {"config_agent:read": true, "event:write": true, "sourcemap:write": false} + } + } + }`)) + })) + defer srv.Close() + + esConfig := elasticsearch.DefaultConfig() + esConfig.Hosts = elasticsearch.Hosts{srv.URL} + apikeyAuthConfig := config.APIKeyAgentAuth{Enabled: true, LimitPerMin: 1, ESConfig: esConfig} + authenticator, err := NewAuthenticator(config.AgentAuth{APIKey: apikeyAuthConfig}) + require.NoError(t, err) + + credentials := base64.StdEncoding.EncodeToString([]byte("valid_id:key_value")) + _, authz, err := authenticator.Authenticate(context.Background(), headers.APIKey, credentials) + require.NoError(t, err) + + err = authz.Authorize(context.Background(), ActionAgentConfig, Resource{}) + assert.NoError(t, err) + + err = authz.Authorize(context.Background(), ActionEventIngest, Resource{}) + assert.NoError(t, err) + + err = authz.Authorize(context.Background(), ActionSourcemapUpload, Resource{}) + assert.EqualError(t, err, `unauthorized: API Key not permitted action "sourcemap:write"`) + assert.True(t, errors.Is(err, ErrUnauthorized)) + + err = authz.Authorize(context.Background(), "unknown", Resource{}) + assert.EqualError(t, err, `unknown action "unknown"`) +} diff --git a/beater/auth/authenticator.go b/beater/auth/authenticator.go new file mode 100644 index 00000000000..5bb7ab2491c --- /dev/null +++ b/beater/auth/authenticator.go @@ -0,0 +1,204 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package auth + +import ( + "context" + "crypto/subtle" + "errors" + "fmt" + "time" + + "github.com/elastic/apm-server/beater/config" + "github.com/elastic/apm-server/beater/headers" + "github.com/elastic/apm-server/elasticsearch" +) + +// Method identifies an authentication and authorization method. +type Method string + +const ( + // MethodNone is used when the server has no auth methods configured, + // meaning access is entirely unrestricted to unauthenticated clients. + // + // This exists to differentiate from allowed unauthenticated/anonymous + // access when the server has other auth methods defined. + MethodNone Method = "none" + + // MethodAPIKey identifies the auth method using Elasticsearch API Keys. + // Clients that authenticate with an API Key may have restricted privileges. + MethodAPIKey Method = "api_key" + + // MethodSecretToken identifies the auth methd using a shared secret token. + // Clients with this secret token have unrestricted privileges. + MethodSecretToken Method = "secret_token" + + // MethodAnonymous identifies the anonymous access auth method. + // Anonymous clients will typically be restricted by agent and/or service. + MethodAnonymous Method = "" +) + +// Action identifies an action to authorize. +type Action string + +const ( + // ActionAgentConfig is an Action describing an attempt to read agent config. + ActionAgentConfig Action = "agent_config" + + // ActionEventIngest is an Action describing an attempt to ingest events. + ActionEventIngest Action = "event_ingest" + + // ActionSourcemapUpload is an Action describing an attempt to upload a source map. + ActionSourcemapUpload Action = "sourcemap" +) + +const ( + cacheTimeoutMinute = 1 * time.Minute + expectedAuthHeaderFormat = "expected 'Authorization: Bearer secret_token' or 'Authorization: ApiKey base64(API key ID:API key)'" +) + +// ErrAuthFailed is an error returned by Authenticator.Authenticate to indicate +// that a client has failed to authenticate, for example by failing to provide +// credentials or by providing an invalid or expired API Key. +var ErrAuthFailed = errors.New("authentication failed") + +var errAuthMissing = fmt.Errorf("%w: missing or improperly formatted Authorization header: %s", ErrAuthFailed, expectedAuthHeaderFormat) + +// ErrUnauthorized is an error returned by Authorizer.Authorize to indicate that +// the client is unauthorized for some action and resource. This should be wrapped +// to provide a reason, and checked using `errors.Is`. +var ErrUnauthorized = errors.New("unauthorized") + +// Authenticator authenticates clients. +type Authenticator struct { + secretToken string + + apikey *apikeyAuth + anonymous *anonymousAuth +} + +// Authorizer provides an interface for authorizing an action and resource. +type Authorizer interface { + // Authorize checks if the client is authorized for the given action and + // resource, returning ErrUnauthorized if it is not. Other errors may be + // returned, for example because the server cannot communicate with + // external systems. + Authorize(context.Context, Action, Resource) error +} + +// Resource holds parameters for restricting access that may be checked by +// Authorizer.Authorize. +type Resource struct { + // AgentName holds the agent name associated with the agent making the + // request. This may be empty if the agent is unknown or irrelevant, + // such as in a request to the healthcheck endpoint. + AgentName string + + // ServiceName holds the service name associated with the agent making + // the request. This may be empty if the agent is unknown or irrelevant, + // such as in a request to the healthcheck endpoint. + ServiceName string +} + +// AuthenticationDetails holds authentication details for a client. +type AuthenticationDetails struct { + // Method holds the authentication kind used. + // + // Method will be empty for unauthenticated (anonymous) requests when + // the server has at least one auth method defined. When the server + // has no auth methods defined, this will be MethodNone. + Method Method + + // APIKey holds authentication details related to API Key auth. + // This will be set when Method is MethodAPIKey. + APIKey *APIKeyAuthenticationDetails +} + +// APIKeyAuthenticationDetails holds API Key related authentication details. +type APIKeyAuthenticationDetails struct { + // ID holds the non-secret ID of the API Key. + ID string + + // Username holds the username associated with the API Key. + Username string +} + +// NewAuthenticator creates an Authenticator with config, authenticating +// clients with one of the allowed methods. +func NewAuthenticator(cfg config.AgentAuth) (*Authenticator, error) { + b := Authenticator{secretToken: cfg.SecretToken} + if cfg.APIKey.Enabled { + // Do not use apm-server's credentials for API Key requests; + // we should only use API Key credentials provided by clients + // to the Authenticate method. + cfg.APIKey.ESConfig.Username = "" + cfg.APIKey.ESConfig.Password = "" + cfg.APIKey.ESConfig.APIKey = "" + client, err := elasticsearch.NewClient(cfg.APIKey.ESConfig) + if err != nil { + return nil, err + } + + cache := newPrivilegesCache(cacheTimeoutMinute, cfg.APIKey.LimitPerMin) + b.apikey = newApikeyAuth(client, cache) + } + if cfg.Anonymous.Enabled { + b.anonymous = newAnonymousAuth(cfg.Anonymous.AllowAgent, cfg.Anonymous.AllowService) + } + return &b, nil +} + +// Authenticate authenticates a client given an authentication method and token, +// returning the authentication details and an Authorizer for authorizing specific +// actions and resources. +// +// Authenticate will return ErrAuthFailed (possibly wrapped) if at least one auth +// method is configured and no valid credentials have been supplied. Other errors +// may be returned, for example because the server cannot communicate with external +// systems. +func (a *Authenticator) Authenticate(ctx context.Context, kind string, token string) (AuthenticationDetails, Authorizer, error) { + if a.apikey == nil && a.secretToken == "" { + // No auth required, let everyone through. + return AuthenticationDetails{Method: MethodNone}, allowAuth{}, nil + } + switch kind { + case "": + if a.anonymous != nil { + return AuthenticationDetails{Method: MethodAnonymous}, a.anonymous, nil + } + return AuthenticationDetails{}, nil, errAuthMissing + case headers.APIKey: + if a.apikey != nil { + details, authz, err := a.apikey.authenticate(ctx, token) + if err != nil { + return AuthenticationDetails{}, nil, err + } + return AuthenticationDetails{Method: MethodAPIKey, APIKey: details}, authz, nil + } + case headers.Bearer: + if a.secretToken != "" && subtle.ConstantTimeCompare([]byte(a.secretToken), []byte(token)) == 1 { + return AuthenticationDetails{Method: MethodSecretToken}, allowAuth{}, nil + } + default: + return AuthenticationDetails{}, nil, fmt.Errorf( + "%w: unknown Authentication header %s: %s", + ErrAuthFailed, kind, expectedAuthHeaderFormat, + ) + } + return AuthenticationDetails{}, nil, ErrAuthFailed +} diff --git a/beater/auth/authenticator_test.go b/beater/auth/authenticator_test.go new file mode 100644 index 00000000000..f3799055e30 --- /dev/null +++ b/beater/auth/authenticator_test.go @@ -0,0 +1,295 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package auth + +import ( + "context" + "encoding/base64" + "errors" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.elastic.co/apm/apmtest" + + "github.com/elastic/apm-server/beater/config" + "github.com/elastic/apm-server/beater/headers" + "github.com/elastic/apm-server/elasticsearch" +) + +func TestAuthenticatorNone(t *testing.T) { + authenticator, err := NewAuthenticator(config.AgentAuth{}) + require.NoError(t, err) + + // If the server has no configured auth methods, all requests are allowed. + for _, kind := range []string{"", headers.APIKey, headers.Bearer} { + details, authz, err := authenticator.Authenticate(context.Background(), kind, "") + require.NoError(t, err) + assert.Equal(t, AuthenticationDetails{Method: MethodNone}, details) + assert.Equal(t, allowAuth{}, authz) + } +} + +func TestAuthenticatorAuthRequired(t *testing.T) { + withSecretToken := config.AgentAuth{SecretToken: "secret_token"} + withAPIKey := config.AgentAuth{ + APIKey: config.APIKeyAgentAuth{Enabled: true, ESConfig: elasticsearch.DefaultConfig()}, + } + for _, cfg := range []config.AgentAuth{withSecretToken, withAPIKey} { + authenticator, err := NewAuthenticator(cfg) + require.NoError(t, err) + + details, authz, err := authenticator.Authenticate(context.Background(), "", "") + assert.Error(t, err) + assert.EqualError(t, err, "authentication failed: missing or improperly formatted Authorization header: expected 'Authorization: Bearer secret_token' or 'Authorization: ApiKey base64(API key ID:API key)'") + assert.True(t, errors.Is(err, ErrAuthFailed)) + assert.Zero(t, details) + assert.Nil(t, authz) + + details, authz, err = authenticator.Authenticate(context.Background(), "magic", "") + assert.Error(t, err) + assert.EqualError(t, err, `authentication failed: unknown Authentication header magic: expected 'Authorization: Bearer secret_token' or 'Authorization: ApiKey base64(API key ID:API key)'`) + assert.True(t, errors.Is(err, ErrAuthFailed)) + assert.Zero(t, details) + assert.Nil(t, authz) + } +} + +func TestAuthenticatorSecretToken(t *testing.T) { + authenticator, err := NewAuthenticator(config.AgentAuth{SecretToken: "valid"}) + require.NoError(t, err) + + details, authz, err := authenticator.Authenticate(context.Background(), headers.Bearer, "invalid") + assert.Equal(t, ErrAuthFailed, err) + assert.Zero(t, details) + assert.Nil(t, authz) + + details, authz, err = authenticator.Authenticate(context.Background(), headers.Bearer, "valid") + assert.NoError(t, err) + assert.Equal(t, AuthenticationDetails{Method: MethodSecretToken}, details) + assert.Equal(t, allowAuth{}, authz) +} + +func TestAuthenticatorAPIKey(t *testing.T) { + var requestURLPath string + var requestBody []byte + var requestAuthorizationHeader string + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requestURLPath = r.URL.Path + requestBody, _ = ioutil.ReadAll(r.Body) + requestAuthorizationHeader = r.Header.Get("Authorization") + w.Write([]byte(`{ + "username": "api_key_username", + "application": { + "apm": { + "-": {"config_agent:read": true, "event:write": true, "sourcemap:write": false} + } + } + }`)) + })) + defer srv.Close() + + esConfig := elasticsearch.DefaultConfig() + esConfig.Hosts = elasticsearch.Hosts{srv.URL} + authenticator, err := NewAuthenticator(config.AgentAuth{ + APIKey: config.APIKeyAgentAuth{Enabled: true, LimitPerMin: 100, ESConfig: esConfig}, + }) + require.NoError(t, err) + + credentials := base64.StdEncoding.EncodeToString([]byte("id_value:key_value")) + details, authz, err := authenticator.Authenticate(context.Background(), headers.APIKey, credentials) + assert.NoError(t, err) + assert.Equal(t, AuthenticationDetails{ + Method: MethodAPIKey, + APIKey: &APIKeyAuthenticationDetails{ + ID: "id_value", + Username: "api_key_username", + }, + }, details) + assert.Equal(t, &apikeyAuthorizer{permissions: elasticsearch.Permissions{ + "config_agent:read": true, + "event:write": true, + "sourcemap:write": false, + }}, authz) + + assert.Equal(t, "/_security/user/_has_privileges", requestURLPath) + assert.Equal(t, `{"application":[{"application":"apm","privileges":["config_agent:read","event:write","sourcemap:write"],"resources":["-"]}]}`+"\n", string(requestBody)) + assert.Equal(t, "ApiKey "+credentials, requestAuthorizationHeader) +} + +func TestAuthenticatorAPIKeyErrors(t *testing.T) { + esConfig := elasticsearch.DefaultConfig() + esConfig.Hosts = elasticsearch.Hosts{"testing.invalid"} + esConfig.Backoff.Init = time.Nanosecond + esConfig.Backoff.Max = time.Nanosecond + authenticator, err := NewAuthenticator(config.AgentAuth{ + APIKey: config.APIKeyAgentAuth{Enabled: true, LimitPerMin: 100, ESConfig: esConfig}, + }) + require.NoError(t, err) + + // Make sure that we can't auth with an empty secret token if secret token auth is not configured, but API Key auth is. + details, authz, err := authenticator.Authenticate(context.Background(), headers.Bearer, "") + assert.Equal(t, ErrAuthFailed, err) + assert.Zero(t, details) + assert.Nil(t, authz) + + details, authz, err = authenticator.Authenticate(context.Background(), headers.APIKey, "invalid_base64") + assert.EqualError(t, err, "authentication failed: illegal base64 data at input byte 7") + assert.True(t, errors.Is(err, ErrAuthFailed)) + assert.Zero(t, details) + assert.Nil(t, authz) + + credentials := base64.StdEncoding.EncodeToString([]byte("malformatted_credentials")) + details, authz, err = authenticator.Authenticate(context.Background(), headers.APIKey, credentials) + assert.EqualError(t, err, "authentication failed: improperly formatted ApiKey credentials: expected base64(ID:APIKey)") + assert.True(t, errors.Is(err, ErrAuthFailed)) + assert.Zero(t, details) + assert.Nil(t, authz) + + credentials = base64.StdEncoding.EncodeToString([]byte("id_value:key_value")) + details, authz, err = authenticator.Authenticate(context.Background(), headers.APIKey, credentials) + assert.Error(t, err) + assert.False(t, errors.Is(err, ErrAuthFailed)) // failure to communicate with elastiscsearch is *not* an auth failure + assert.Zero(t, details) + assert.Nil(t, authz) + + responseStatusCode := http.StatusUnauthorized + responseBody := "" + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(responseStatusCode) + w.Write([]byte(responseBody)) + })) + defer srv.Close() + esConfig.Hosts = elasticsearch.Hosts{srv.URL} + authenticator, err = NewAuthenticator(config.AgentAuth{ + APIKey: config.APIKeyAgentAuth{Enabled: true, LimitPerMin: 2, ESConfig: esConfig}, + }) + require.NoError(t, err) + details, authz, err = authenticator.Authenticate(context.Background(), headers.APIKey, credentials) + assert.Equal(t, ErrAuthFailed, err) + assert.Zero(t, details) + assert.Nil(t, authz) + + // API Key is valid, but grants none of the requested privileges. + responseStatusCode = http.StatusOK + responseBody = `{ + "application": { + "apm": { + "-": {"config_agent:read": false, "event:write": false, "sourcemap:write": false} + } + } + }` + defer srv.Close() + esConfig.Hosts = elasticsearch.Hosts{srv.URL} + authenticator, err = NewAuthenticator(config.AgentAuth{ + APIKey: config.APIKeyAgentAuth{Enabled: true, LimitPerMin: 100, ESConfig: esConfig}, + }) + require.NoError(t, err) + details, authz, err = authenticator.Authenticate(context.Background(), headers.APIKey, credentials) + assert.Equal(t, ErrAuthFailed, err) + assert.Zero(t, details) + assert.Nil(t, authz) +} + +func TestAuthenticatorAPIKeyCache(t *testing.T) { + validCredentials := base64.StdEncoding.EncodeToString([]byte("valid_id:key_value")) + validCredentials2 := base64.StdEncoding.EncodeToString([]byte("valid_id:key_value_2")) + invalidCredentials := base64.StdEncoding.EncodeToString([]byte("invalid_id:key_value")) + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + credentials := strings.Fields(r.Header.Get("Authorization"))[1] + switch credentials { + case validCredentials: + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{ + "username": "api_key_username", + "application": { + "apm": { + "-": {"config_agent:read": true, "event:write": true, "sourcemap:write": false} + } + } + }`)) + case invalidCredentials: + w.WriteHeader(http.StatusUnauthorized) + default: + panic("unexpected credentials: " + credentials) + } + })) + defer srv.Close() + + esConfig := elasticsearch.DefaultConfig() + esConfig.Hosts = elasticsearch.Hosts{srv.URL} + apikeyAuthConfig := config.APIKeyAgentAuth{Enabled: true, LimitPerMin: 2, ESConfig: esConfig} + authenticator, err := NewAuthenticator(config.AgentAuth{APIKey: apikeyAuthConfig}) + require.NoError(t, err) + + _, spans, _ := apmtest.WithTransaction(func(ctx context.Context) { + for i := 0; i < apikeyAuthConfig.LimitPerMin+1; i++ { + _, _, err := authenticator.Authenticate(ctx, headers.APIKey, validCredentials) + assert.NoError(t, err) + } + }) + assert.Len(t, spans, 1) + assert.Equal(t, "elasticsearch", spans[0].Subtype) + + _, spans, _ = apmtest.WithTransaction(func(ctx context.Context) { + // API Key checks are cached based on the API Key ID, not the full credential. + _, _, err := authenticator.Authenticate(ctx, headers.APIKey, validCredentials2) + assert.NoError(t, err) + }) + assert.Len(t, spans, 0) + + _, spans, _ = apmtest.WithTransaction(func(ctx context.Context) { + for i := 0; i < apikeyAuthConfig.LimitPerMin+1; i++ { + _, _, err = authenticator.Authenticate(ctx, headers.APIKey, invalidCredentials) + assert.Equal(t, ErrAuthFailed, err) + } + }) + assert.Len(t, spans, 1) + + credentials := base64.StdEncoding.EncodeToString([]byte("id_value3:key_value")) + _, _, err = authenticator.Authenticate(context.Background(), headers.APIKey, credentials) + assert.EqualError(t, err, "api_key limit reached, check your logs for failed authorization attempts or consider increasing config option `apm-server.api_key.limit`") +} + +func TestAuthenticatorAnonymous(t *testing.T) { + // Anonymous access is only effective when some other auth method is enabled. + authenticator, err := NewAuthenticator(config.AgentAuth{ + Anonymous: config.AnonymousAgentAuth{Enabled: true}, + }) + require.NoError(t, err) + details, authz, err := authenticator.Authenticate(context.Background(), "", "") + assert.NoError(t, err) + assert.Equal(t, AuthenticationDetails{Method: MethodNone}, details) + assert.Equal(t, allowAuth{}, authz) + + authenticator, err = NewAuthenticator(config.AgentAuth{ + SecretToken: "secret_token", + Anonymous: config.AnonymousAgentAuth{Enabled: true}, + }) + require.NoError(t, err) + details, authz, err = authenticator.Authenticate(context.Background(), "", "") + assert.NoError(t, err) + assert.Equal(t, AuthenticationDetails{Method: MethodAnonymous}, details) + assert.Equal(t, newAnonymousAuth(nil, nil), authz) +} diff --git a/beater/auth/context.go b/beater/auth/context.go new file mode 100644 index 00000000000..ddf05c26bf6 --- /dev/null +++ b/beater/auth/context.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package auth + +import ( + "context" + "errors" +) + +// ErrNoAuthorizer is returned from Authorize when the context does not contain an Authorizer. +var ErrNoAuthorizer = errors.New("no Authorizer in context") + +type authorizationKey struct{} + +// ContextWithAuthorizer returns a copy of parent associated with auth. +func ContextWithAuthorizer(parent context.Context, auth Authorizer) context.Context { + return context.WithValue(parent, authorizationKey{}, auth) +} + +// authorizationFromContext returns the Authorizer stored in ctx, if any, and a boolean +// indicating whether there one was found. The boolean is false if and only if the +// Authorizer is nil. +func authorizationFromContext(ctx context.Context) (Authorizer, bool) { + auth, ok := ctx.Value(authorizationKey{}).(Authorizer) + return auth, ok +} + +// Authorize is a shortcut for obtaining an Authorizer from ctx and calling its Authorize +// method. Authorize returns ErrNoAuthorizer if ctx does not contain an Authorizer. +func Authorize(ctx context.Context, action Action, resource Resource) error { + auth, ok := authorizationFromContext(ctx) + if !ok { + return ErrNoAuthorizer + } + return auth.Authorize(ctx, action, resource) +} diff --git a/beater/authorization/header.go b/beater/auth/header.go similarity index 98% rename from beater/authorization/header.go rename to beater/auth/header.go index 810fc0996d9..c0d2f4029a1 100644 --- a/beater/authorization/header.go +++ b/beater/auth/header.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. -package authorization +package auth import ( "strings" diff --git a/beater/authorization/allow.go b/beater/authorization/allow.go deleted file mode 100644 index b5d5ef21128..00000000000 --- a/beater/authorization/allow.go +++ /dev/null @@ -1,37 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package authorization - -import ( - "context" - - "github.com/elastic/apm-server/elasticsearch" -) - -// AllowAuth implements the Authorization interface. It allows all authorization requests. -type AllowAuth struct{} - -// AuthorizedFor always returns true -func (AllowAuth) AuthorizedFor(context.Context, elasticsearch.Resource) (bool, error) { - return true, nil -} - -// IsAuthorizationConfigured always returns false. -func (AllowAuth) IsAuthorizationConfigured() bool { - return false -} diff --git a/beater/authorization/allow_test.go b/beater/authorization/allow_test.go deleted file mode 100644 index 79223c02d89..00000000000 --- a/beater/authorization/allow_test.go +++ /dev/null @@ -1,39 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package authorization - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestAllowAuth(t *testing.T) { - handler := AllowAuth{} - - t.Run("IsAuthorizationConfigured", func(t *testing.T) { - assert.False(t, handler.IsAuthorizationConfigured()) - }) - - t.Run("AuthorizedFor", func(t *testing.T) { - authorized, err := handler.AuthorizedFor(context.Background(), "") - assert.True(t, authorized) - assert.NoError(t, err) - }) -} diff --git a/beater/authorization/apikey.go b/beater/authorization/apikey.go deleted file mode 100644 index cb5bd6f18d8..00000000000 --- a/beater/authorization/apikey.go +++ /dev/null @@ -1,130 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package authorization - -import ( - "context" - "errors" - "net/http" - "time" - - es "github.com/elastic/apm-server/elasticsearch" -) - -const cleanupInterval = 60 * time.Second - -const ( - // Application is a constant mapped to the "application" field for the Elasticsearch security API - // This identifies privileges and keys created for APM - Application es.AppName = "apm" - // ResourceInternal is only valid for first authorization of a request. - // The API Key needs to grant privileges to additional resources for successful processing of requests. - ResourceInternal = es.Resource("-") - ResourceAny = es.Resource("*") -) - -type apikeyBuilder struct { - esClient es.Client - cache *privilegesCache - anyOfPrivileges []es.PrivilegeAction -} - -type apikeyAuth struct { - *apikeyBuilder - // key is base64(id:apiKey) - key string -} - -func newApikeyBuilder(client es.Client, cache *privilegesCache, anyOfPrivileges []es.PrivilegeAction) *apikeyBuilder { - return &apikeyBuilder{client, cache, anyOfPrivileges} -} - -func (a *apikeyBuilder) forKey(key string) *apikeyAuth { - return &apikeyAuth{a, key} -} - -// IsAuthorizationConfigured will return true if a non-empty token is required. -func (a *apikeyAuth) IsAuthorizationConfigured() bool { - return true -} - -// AuthorizedFor checks if the configured api key is authorized. -// An api key is considered to be authorized when the api key has the configured privileges for the requested resource. -// Permissions are fetched from Elasticsearch and then cached in a global cache. -func (a *apikeyAuth) AuthorizedFor(ctx context.Context, resource es.Resource) (bool, error) { - privileges := a.cache.get(id(a.key, resource)) - if privileges != nil { - return a.allowed(privileges), nil - } - - if a.cache.isFull() { - return false, errors.New("api_key limit reached, " + - "check your logs for failed authorization attempts " + - "or consider increasing config option `apm-server.api_key.limit`") - } - - privileges, err := a.queryES(ctx, resource) - if err != nil { - return false, err - } - a.cache.add(id(a.key, resource), privileges) - return a.allowed(privileges), nil -} - -func (a *apikeyAuth) allowed(permissions es.Permissions) bool { - var allowed bool - for _, privilege := range a.anyOfPrivileges { - if privilege == ActionAny { - for _, value := range permissions { - allowed = allowed || value - } - } - allowed = allowed || permissions[privilege] - } - return allowed -} - -func (a *apikeyAuth) queryES(ctx context.Context, resource es.Resource) (es.Permissions, error) { - request := es.HasPrivilegesRequest{ - Applications: []es.Application{ - { - Name: Application, - // it is important to query all privilege actions because they are cached by api key+resources - // querying a.anyOfPrivileges would result in an incomplete cache entry - Privileges: ActionsAll(), - Resources: []es.Resource{resource}, - }, - }, - } - info, err := es.HasPrivileges(ctx, a.esClient, request, a.key) - if err != nil { - var eserr *es.Error - if errors.As(err, &eserr) && eserr.StatusCode == http.StatusUnauthorized { - return es.Permissions{}, nil - } - return nil, err - } - if permissions, ok := info.Application[Application][resource]; ok { - return permissions, nil - } - return es.Permissions{}, nil -} - -func id(apiKey string, resource es.Resource) string { - return apiKey + "_" + string(resource) -} diff --git a/beater/authorization/apikey_test.go b/beater/authorization/apikey_test.go deleted file mode 100644 index b437e697099..00000000000 --- a/beater/authorization/apikey_test.go +++ /dev/null @@ -1,226 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package authorization - -import ( - "context" - "net/http" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.elastic.co/apm/apmtest" - - "github.com/elastic/apm-server/elasticsearch" - "github.com/elastic/apm-server/elasticsearch/estest" -) - -func TestApikeyBuilder(t *testing.T) { - // in case handler does not read from cache, but from ES an error is returned - tc := &apikeyTestcase{ - cache: newPrivilegesCache(time.Minute, 5), - transport: estest.NewTransport(t, http.StatusInternalServerError, nil)} - - tc.setup(t) - key := "myApiKey" - handler1 := tc.builder.forKey(key) - handler2 := tc.builder.forKey(key) - - // add existing privileges to shared cache - privilegesValid := elasticsearch.Permissions{} - for _, p := range PrivilegesAll { - privilegesValid[p.Action] = true - } - resource := elasticsearch.Resource("service-go") - tc.cache.add(id(key, resource), privilegesValid) - - // check that cache is actually shared between apiKeyHandlers - allowed, err := handler1.AuthorizedFor(context.Background(), resource) - assert.NoError(t, err) - assert.True(t, allowed) - - allowed, err = handler2.AuthorizedFor(context.Background(), resource) - assert.NoError(t, err) - assert.True(t, allowed) -} - -func TestApikeyAuth_IsAuthorizationConfigured(t *testing.T) { - tc := &apikeyTestcase{} - tc.setup(t) - assert.True(t, tc.builder.forKey("xyz").IsAuthorizationConfigured()) -} - -func TestAPIKey_AuthorizedFor(t *testing.T) { - t.Run("cache full", func(t *testing.T) { - tc := &apikeyTestcase{cache: newPrivilegesCache(time.Millisecond, 1)} - tc.setup(t) - handler := tc.builder.forKey("") - - authorized, err := handler.AuthorizedFor(context.Background(), "data:ingest") - assert.False(t, authorized) - assert.NoError(t, err) - - authorized, err = handler.AuthorizedFor(context.Background(), "apm:read") - assert.Error(t, err) - assert.False(t, authorized) - }) - - t.Run("from cache", func(t *testing.T) { - // in case handler does not read from cache, but from ES an error is returned - tc := &apikeyTestcase{transport: estest.NewTransport(t, http.StatusInternalServerError, nil)} - tc.setup(t) - key := "" - handler := tc.builder.forKey(key) - resourceValid := elasticsearch.Resource("foo") - resourceInvalid := elasticsearch.Resource("bar") - resourceMissing := elasticsearch.Resource("missing") - - tc.cache.add(id(key, resourceValid), elasticsearch.Permissions{tc.anyOfPrivileges[0]: true}) - tc.cache.add(id(key, resourceInvalid), elasticsearch.Permissions{tc.anyOfPrivileges[0]: false}) - - valid, err := handler.AuthorizedFor(context.Background(), resourceValid) - require.NoError(t, err) - assert.True(t, valid) - - valid, err = handler.AuthorizedFor(context.Background(), resourceInvalid) - require.NoError(t, err) - assert.False(t, valid) - - valid, err = handler.AuthorizedFor(context.Background(), resourceMissing) - require.Error(t, err) - assert.False(t, valid) - }) - - t.Run("from ES", func(t *testing.T) { - tc := &apikeyTestcase{} - tc.setup(t) - handler := tc.builder.forKey("key") - - valid, err := handler.AuthorizedFor(context.Background(), "foo") - require.NoError(t, err) - assert.True(t, valid) - - valid, err = handler.AuthorizedFor(context.Background(), "bar") - require.NoError(t, err) - assert.False(t, valid) - - valid, err = handler.AuthorizedFor(context.Background(), "missing") - require.NoError(t, err) - assert.False(t, valid) - assert.Equal(t, 3, tc.cache.cache.ItemCount()) - }) - - t.Run("client error", func(t *testing.T) { - tc := &apikeyTestcase{ - transport: estest.NewTransport(t, -1, nil)} - tc.setup(t) - handler := tc.builder.forKey("12a3") - - valid, err := handler.AuthorizedFor(context.Background(), "xyz") - require.Error(t, err) - assert.Contains(t, err.Error(), "client error") - assert.False(t, valid) - assert.Zero(t, tc.cache.cache.ItemCount()) - }) - - t.Run("unauthorized status from ES", func(t *testing.T) { - tc := &apikeyTestcase{transport: estest.NewTransport(t, http.StatusUnauthorized, nil)} - tc.setup(t) - handler := tc.builder.forKey("12a3") - - valid, err := handler.AuthorizedFor(context.Background(), "xyz") - require.NoError(t, err) - assert.False(t, valid) - assert.Equal(t, 1, tc.cache.cache.ItemCount()) // unauthorized responses are cached - }) - - t.Run("invalid status from ES", func(t *testing.T) { - tc := &apikeyTestcase{transport: estest.NewTransport(t, http.StatusNotFound, nil)} - tc.setup(t) - handler := tc.builder.forKey("12a3") - - valid, err := handler.AuthorizedFor(context.Background(), "xyz") - require.Error(t, err) - assert.False(t, valid) - assert.Equal(t, 0, tc.cache.cache.ItemCount()) - }) - - t.Run("decode error from ES", func(t *testing.T) { - tc := &apikeyTestcase{transport: estest.NewTransport(t, http.StatusOK, nil)} - tc.setup(t) - handler := tc.builder.forKey("123") - valid, err := handler.AuthorizedFor(context.Background(), "foo") - require.Error(t, err) - assert.False(t, valid) - assert.Zero(t, tc.cache.cache.ItemCount()) - }) -} - -type apikeyTestcase struct { - transport *estest.Transport - client elasticsearch.Client - cache *privilegesCache - anyOfPrivileges []elasticsearch.PrivilegeAction - - builder *apikeyBuilder -} - -func (tc *apikeyTestcase) setup(t *testing.T) { - var err error - if tc.client == nil { - if tc.transport == nil { - tc.transport = estest.NewTransport(t, http.StatusOK, map[string]interface{}{ - "application": map[string]interface{}{ - "apm": map[string]map[string]interface{}{ - "foo": {"config_agent:read": true, "event:write": true, "sourcemap:write": false}, - "bar": {"config_agent:read": true, "event:write": false}, - }}}) - } - tc.client, err = estest.NewElasticsearchClient(tc.transport) - require.NoError(t, err) - } - if tc.cache == nil { - tc.cache = newPrivilegesCache(time.Minute, 5) - } - if tc.anyOfPrivileges == nil { - tc.anyOfPrivileges = []elasticsearch.PrivilegeAction{PrivilegeEventWrite.Action, PrivilegeSourcemapWrite.Action} - } - tc.builder = newApikeyBuilder(tc.client, tc.cache, tc.anyOfPrivileges) -} - -func TestApikeyBuilderTraceContext(t *testing.T) { - transport := estest.NewTransport(t, http.StatusOK, map[string]interface{}{}) - client, err := estest.NewElasticsearchClient(transport) - require.NoError(t, err) - - cache := newPrivilegesCache(time.Minute, 5) - anyOfPrivileges := []elasticsearch.PrivilegeAction{PrivilegeEventWrite.Action, PrivilegeSourcemapWrite.Action} - builder := newApikeyBuilder(client, cache, anyOfPrivileges) - handler := builder.forKey("12a3") - - _, spans, _ := apmtest.WithTransaction(func(ctx context.Context) { - // When AuthorizedFor is called with a context containing - // a transaction, the underlying Elasticsearch query should - // create a span. - handler.AuthorizedFor(ctx, ResourceInternal) - handler.AuthorizedFor(ctx, ResourceInternal) // cached, no query - }) - require.Len(t, spans, 1) - assert.Equal(t, "elasticsearch", spans[0].Subtype) -} diff --git a/beater/authorization/bearer.go b/beater/authorization/bearer.go deleted file mode 100644 index b68f7fbb1ec..00000000000 --- a/beater/authorization/bearer.go +++ /dev/null @@ -1,52 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package authorization - -import ( - "context" - "crypto/subtle" - - "github.com/elastic/apm-server/elasticsearch" -) - -type bearerBuilder struct { - required string -} - -type bearerAuth struct { - authorized bool - configured bool -} - -func (b bearerBuilder) forToken(token string) *bearerAuth { - if b.required == "" { - return &bearerAuth{authorized: true, configured: false} - } - return &bearerAuth{ - authorized: subtle.ConstantTimeCompare([]byte(b.required), []byte(token)) == 1, - configured: true} -} - -func (b *bearerAuth) AuthorizedFor(context.Context, elasticsearch.Resource) (bool, error) { - return b.authorized, nil -} - -// IsAuthorizationConfigured will return true if a non-empty token is required. -func (b bearerAuth) IsAuthorizationConfigured() bool { - return b.configured -} diff --git a/beater/authorization/bearer_test.go b/beater/authorization/bearer_test.go deleted file mode 100644 index a66c97843e1..00000000000 --- a/beater/authorization/bearer_test.go +++ /dev/null @@ -1,47 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package authorization - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestBearerAuth(t *testing.T) { - for name, tc := range map[string]struct { - builder bearerBuilder - token string - authorized, configured bool - }{ - "empty": {builder: bearerBuilder{}, authorized: true, configured: false}, - "empty for token": {builder: bearerBuilder{}, authorized: true, configured: false, token: "1"}, - "no token": {builder: bearerBuilder{"123"}, authorized: false, configured: true}, - "invalid token": {builder: bearerBuilder{"123"}, authorized: false, configured: true, token: "1"}, - "valid token": {builder: bearerBuilder{"123"}, authorized: true, configured: true, token: "123"}, - } { - t.Run(name, func(t *testing.T) { - bearer := tc.builder.forToken(tc.token) - authorized, err := bearer.AuthorizedFor(context.Background(), "") - assert.NoError(t, err) - assert.Equal(t, tc.authorized, authorized) - assert.Equal(t, tc.configured, bearer.IsAuthorizationConfigured()) - }) - } -} diff --git a/beater/authorization/builder.go b/beater/authorization/builder.go deleted file mode 100644 index d526eceff52..00000000000 --- a/beater/authorization/builder.go +++ /dev/null @@ -1,106 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package authorization - -import ( - "context" - "time" - - "github.com/elastic/apm-server/beater/config" - "github.com/elastic/apm-server/beater/headers" - "github.com/elastic/apm-server/elasticsearch" -) - -// Builder creates an authorization Handler depending on configuration options -type Builder struct { - apikey *apikeyBuilder - bearer *bearerBuilder - fallback Authorization -} - -// Handler returns the authorization method according to provided information -type Handler Builder - -// Authorization interface to be implemented by different auth types -type Authorization interface { - AuthorizedFor(context.Context, elasticsearch.Resource) (bool, error) - IsAuthorizationConfigured() bool -} - -const ( - cacheTimeoutMinute = 1 * time.Minute -) - -// NewBuilder creates authorization builder based off of the given information -// if apm-server.api_key is enabled, authorization is granted/denied solely -// based on the request Authorization header -func NewBuilder(cfg *config.Config) (*Builder, error) { - b := Builder{} - b.fallback = AllowAuth{} - if cfg.APIKeyConfig.IsEnabled() { - // do not use username+password for API Key requests - cfg.APIKeyConfig.ESConfig.Username = "" - cfg.APIKeyConfig.ESConfig.Password = "" - cfg.APIKeyConfig.ESConfig.APIKey = "" - client, err := elasticsearch.NewClient(cfg.APIKeyConfig.ESConfig) - if err != nil { - return nil, err - } - - cache := newPrivilegesCache(cacheTimeoutMinute, cfg.APIKeyConfig.LimitPerMin) - b.apikey = newApikeyBuilder(client, cache, []elasticsearch.PrivilegeAction{}) - b.fallback = DenyAuth{} - } - if cfg.SecretToken != "" { - b.bearer = &bearerBuilder{cfg.SecretToken} - b.fallback = DenyAuth{} - } - return &b, nil -} - -// ForPrivilege creates an authorization Handler checking for this privilege -func (b *Builder) ForPrivilege(privilege elasticsearch.PrivilegeAction) *Handler { - return b.ForAnyOfPrivileges(privilege) -} - -// ForAnyOfPrivileges creates an authorization Handler checking for any of the provided privileges -func (b *Builder) ForAnyOfPrivileges(privileges ...elasticsearch.PrivilegeAction) *Handler { - handler := Handler{bearer: b.bearer, fallback: b.fallback} - if b.apikey != nil { - handler.apikey = newApikeyBuilder(b.apikey.esClient, b.apikey.cache, privileges) - } - return &handler -} - -// AuthorizationFor returns proper authorization implementation depending on the given kind, configured with the token. -func (h *Handler) AuthorizationFor(kind string, token string) Authorization { - switch kind { - case headers.APIKey: - if h.apikey == nil { - return h.fallback - } - return h.apikey.forKey(token) - case headers.Bearer: - if h.bearer == nil { - return h.fallback - } - return h.bearer.forToken(token) - default: - return h.fallback - } -} diff --git a/beater/authorization/builder_test.go b/beater/authorization/builder_test.go deleted file mode 100644 index 3c4a836108a..00000000000 --- a/beater/authorization/builder_test.go +++ /dev/null @@ -1,104 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package authorization - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/apm-server/beater/config" - "github.com/elastic/apm-server/elasticsearch" -) - -func TestBuilder(t *testing.T) { - for name, tc := range map[string]struct { - withBearer, withApikey bool - bearer *bearerBuilder - fallback Authorization - }{ - "no auth": {fallback: AllowAuth{}}, - "bearer": {withBearer: true, fallback: DenyAuth{}, bearer: &bearerBuilder{"xvz"}}, - "apikey": {withApikey: true, fallback: DenyAuth{}}, - "all": {withApikey: true, withBearer: true, fallback: DenyAuth{}, bearer: &bearerBuilder{"xvz"}}, - } { - - setup := func() *Builder { - cfg := config.DefaultConfig() - - if tc.withBearer { - cfg.SecretToken = "xvz" - } - if tc.withApikey { - cfg.APIKeyConfig = &config.APIKeyConfig{ - Enabled: true, LimitPerMin: 100, ESConfig: elasticsearch.DefaultConfig()} - } - - builder, err := NewBuilder(cfg) - require.NoError(t, err) - return builder - } - t.Run("NewBuilder"+name, func(t *testing.T) { - builder := setup() - assert.Equal(t, tc.fallback, builder.fallback) - assert.Equal(t, tc.bearer, builder.bearer) - if tc.withApikey { - assert.NotNil(t, builder.apikey) - assert.Equal(t, 100, builder.apikey.cache.size) - assert.NotNil(t, builder.apikey.esClient) - } - }) - - t.Run("ForPrivilege"+name, func(t *testing.T) { - builder := setup() - h := builder.ForPrivilege(PrivilegeSourcemapWrite.Action) - assert.Equal(t, builder.bearer, h.bearer) - assert.Equal(t, builder.fallback, h.fallback) - if tc.withApikey { - assert.Equal(t, []elasticsearch.PrivilegeAction{}, builder.apikey.anyOfPrivileges) - assert.Equal(t, []elasticsearch.PrivilegeAction{PrivilegeSourcemapWrite.Action}, h.apikey.anyOfPrivileges) - assert.Equal(t, builder.apikey.esClient, h.apikey.esClient) - assert.Equal(t, builder.apikey.cache, h.apikey.cache) - } - }) - - t.Run("AuthorizationFor"+name, func(t *testing.T) { - builder := setup() - h := builder.ForPrivilege(PrivilegeSourcemapWrite.Action) - auth := h.AuthorizationFor("ApiKey", "") - if tc.withApikey { - assert.IsType(t, &apikeyAuth{}, auth) - } else { - assert.Equal(t, h.fallback, auth) - } - - auth = h.AuthorizationFor("Bearer", "") - if tc.withBearer { - assert.IsType(t, &bearerAuth{}, auth) - } else { - assert.Equal(t, h.fallback, auth) - } - - auth = h.AuthorizationFor("Anything", "") - assert.Equal(t, h.fallback, auth) - }) - } - -} diff --git a/beater/authorization/deny.go b/beater/authorization/deny.go deleted file mode 100644 index d1b620181f7..00000000000 --- a/beater/authorization/deny.go +++ /dev/null @@ -1,37 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package authorization - -import ( - "context" - - "github.com/elastic/apm-server/elasticsearch" -) - -// DenyAuth implements the Authorization interface. It denies all authorization requests. -type DenyAuth struct{} - -// AuthorizedFor always returns false -func (DenyAuth) AuthorizedFor(context.Context, elasticsearch.Resource) (bool, error) { - return false, nil -} - -// IsAuthorizationConfigured always returns true. -func (DenyAuth) IsAuthorizationConfigured() bool { - return true -} diff --git a/beater/authorization/deny_test.go b/beater/authorization/deny_test.go deleted file mode 100644 index f809a8ce3e1..00000000000 --- a/beater/authorization/deny_test.go +++ /dev/null @@ -1,39 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package authorization - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestDenyAuth(t *testing.T) { - handler := DenyAuth{} - - t.Run("IsAuthorizationConfigured", func(t *testing.T) { - assert.True(t, handler.IsAuthorizationConfigured()) - }) - - t.Run("AuthorizedFor", func(t *testing.T) { - authorized, err := handler.AuthorizedFor(context.Background(), "") - assert.False(t, authorized) - assert.NoError(t, err) - }) -} diff --git a/beater/authorization/privilege.go b/beater/authorization/privilege.go deleted file mode 100644 index 329da04ab1c..00000000000 --- a/beater/authorization/privilege.go +++ /dev/null @@ -1,66 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package authorization - -import ( - "time" - - es "github.com/elastic/apm-server/elasticsearch" - - "github.com/patrickmn/go-cache" -) - -var ( - PrivilegeAgentConfigRead = es.NewPrivilege("agentConfig", "config_agent:read") - PrivilegeEventWrite = es.NewPrivilege("event", "event:write") - PrivilegeSourcemapWrite = es.NewPrivilege("sourcemap", "sourcemap:write") - PrivilegesAll = []es.NamedPrivilege{PrivilegeAgentConfigRead, PrivilegeEventWrite, PrivilegeSourcemapWrite} - // ActionAny can't be used for querying, use ActionsAll instead - ActionAny = es.PrivilegeAction("*") - ActionsAll = func() []es.PrivilegeAction { - actions := make([]es.PrivilegeAction, 0) - for _, privilege := range PrivilegesAll { - actions = append(actions, privilege.Action) - } - return actions - } -) - -type privilegesCache struct { - cache *cache.Cache - size int -} - -func newPrivilegesCache(expiration time.Duration, size int) *privilegesCache { - return &privilegesCache{cache: cache.New(expiration, cleanupInterval), size: size} -} - -func (c *privilegesCache) isFull() bool { - return c.cache.ItemCount() >= c.size -} - -func (c *privilegesCache) get(id string) es.Permissions { - if val, exists := c.cache.Get(id); exists { - return val.(es.Permissions) - } - return nil -} - -func (c *privilegesCache) add(id string, privileges es.Permissions) { - c.cache.SetDefault(id, privileges) -} diff --git a/beater/authorization/privilege_test.go b/beater/authorization/privilege_test.go deleted file mode 100644 index 0d6bab77a4d..00000000000 --- a/beater/authorization/privilege_test.go +++ /dev/null @@ -1,48 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package authorization - -import ( - "fmt" - "testing" - "time" - - "github.com/elastic/apm-server/elasticsearch" - - "github.com/stretchr/testify/assert" -) - -func TestPrivilegesCache(t *testing.T) { - n := 10 - cache := newPrivilegesCache(time.Millisecond, n) - assert.False(t, cache.isFull()) - for i := 0; i < n-1; i++ { - cache.add(fmt.Sprintf("%v", i), elasticsearch.Permissions{}) - assert.False(t, cache.isFull()) - } - cache.add("oneMore", elasticsearch.Permissions{}) - assert.True(t, cache.isFull()) - assert.NotNil(t, cache.get("oneMore")) - time.Sleep(time.Millisecond) - assert.Nil(t, cache.get("oneMore")) - - p := elasticsearch.Permissions{"a": true, "b": false} - cache.add("id1", p) - assert.Equal(t, p, cache.get("id1")) - assert.Nil(t, cache.get("oneMore")) -} diff --git a/beater/beater.go b/beater/beater.go index 02f172abc6a..14e8144c07f 100644 --- a/beater/beater.go +++ b/beater/beater.go @@ -19,39 +19,55 @@ package beater import ( "context" + "fmt" "net" - "regexp" + "net/http" + "os" + "runtime" "strings" "sync" + "time" + + "github.com/elastic/beats/v7/libbeat/common/transport" + "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" + "github.com/elastic/beats/v7/libbeat/monitoring" + "github.com/elastic/go-ucfg" "github.com/pkg/errors" "go.elastic.co/apm" + "go.elastic.co/apm/module/apmhttp" "golang.org/x/sync/errgroup" "github.com/elastic/beats/v7/libbeat/beat" - "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/common/reload" "github.com/elastic/beats/v7/libbeat/esleg/eslegclient" "github.com/elastic/beats/v7/libbeat/instrumentation" + "github.com/elastic/beats/v7/libbeat/licenser" "github.com/elastic/beats/v7/libbeat/logp" esoutput "github.com/elastic/beats/v7/libbeat/outputs/elasticsearch" + "github.com/elastic/beats/v7/libbeat/processors" + "github.com/elastic/beats/v7/libbeat/publisher/pipetool" "github.com/elastic/apm-server/beater/config" "github.com/elastic/apm-server/elasticsearch" "github.com/elastic/apm-server/ingest/pipeline" + "github.com/elastic/apm-server/kibana" logs "github.com/elastic/apm-server/log" + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modelprocessor" "github.com/elastic/apm-server/publish" "github.com/elastic/apm-server/sampling" "github.com/elastic/apm-server/sourcemap" - "github.com/elastic/apm-server/transform" -) - -var ( - errSetupDashboardRemoved = errors.New("setting 'setup.dashboards' has been removed") ) // CreatorParams holds parameters for creating beat.Beaters. type CreatorParams struct { + // Logger is a logger to use in Beaters created by the beat.Creator. + // + // If Logger is nil, logp.NewLogger will be used to create a new one. + Logger *logp.Logger + // WrapRunServer is used to wrap the RunServerFunc used to run the APM Server. // // WrapRunServer is optional. If provided, it must return a function that calls @@ -63,191 +79,607 @@ type CreatorParams struct { // using the provided CreatorParams. func NewCreator(args CreatorParams) beat.Creator { return func(b *beat.Beat, ucfg *common.Config) (beat.Beater, error) { - logger := logp.NewLogger(logs.Beater) - if err := checkConfig(logger); err != nil { - return nil, err + logger := args.Logger + if logger != nil { + logger = logger.Named(logs.Beater) + } else { + logger = logp.NewLogger(logs.Beater) } - var esOutputCfg *common.Config - if isElasticsearchOutput(b) { - esOutputCfg = b.Config.Output.Config() + bt := &beater{ + rawConfig: ucfg, + stopped: false, + logger: logger, + wrapRunServer: args.WrapRunServer, + waitPublished: newWaitPublishedAcker(), } - beaterConfig, err := config.NewConfig(ucfg, esOutputCfg) + var err error + bt.config, err = config.NewConfig(bt.rawConfig, elasticsearchOutputConfig(b)) if err != nil { return nil, err } - // send configs to telemetry - recordConfigs(b.Info, beaterConfig, ucfg, logger) + if err := recordRootConfig(b.Info, bt.rawConfig); err != nil { + bt.logger.Errorf("Error recording telemetry data", err) + } - bt := &beater{ - config: beaterConfig, - stopped: false, - logger: logger, - wrapRunServer: args.WrapRunServer, + if bt.config.Pprof.Enabled { + // Profiling rates should be set once, early on in the program. + runtime.SetBlockProfileRate(bt.config.Pprof.BlockProfileRate) + runtime.SetMutexProfileFraction(bt.config.Pprof.MutexProfileRate) + if bt.config.Pprof.MemProfileRate > 0 { + runtime.MemProfileRate = bt.config.Pprof.MemProfileRate + } } - // setup pipelines if explicitly directed to or setup --pipelines and config is not set at all - shouldSetupPipelines := beaterConfig.Register.Ingest.Pipeline.IsEnabled() || - (b.InSetupCmd && beaterConfig.Register.Ingest.Pipeline.Enabled == nil) - if isElasticsearchOutput(b) && shouldSetupPipelines { - logger.Info("Registering pipeline callback") - err := bt.registerPipelineCallback(b) - if err != nil { - return nil, err + if !bt.config.DataStreams.Enabled { + if b.Manager != nil && b.Manager.Enabled() { + return nil, errors.New("data streams must be enabled when the server is managed") } - } else { - logger.Info("No pipeline callback registered") } + bt.registerPipelineSetupCallback(b) return bt, nil } } -// checkConfig verifies the global configuration doesn't use unsupported settings -func checkConfig(logger *logp.Logger) error { - cfg, err := cfgfile.Load("", nil) +type beater struct { + rawConfig *common.Config + config *config.Config + logger *logp.Logger + wrapRunServer func(RunServerFunc) RunServerFunc + waitPublished *waitPublishedAcker + + mutex sync.Mutex // guards stopServer and stopped + stopServer func() + stopped bool +} + +// Run runs the APM Server, blocking until the beater's Stop method is called, +// or a fatal error occurs. +func (bt *beater) Run(b *beat.Beat) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + done, err := bt.start(ctx, cancel, b) + if err != nil { + return err + } + <-done + bt.waitPublished.Wait(ctx) + return nil +} + +func (bt *beater) start(ctx context.Context, cancelContext context.CancelFunc, b *beat.Beat) (<-chan struct{}, error) { + done := make(chan struct{}) + bt.mutex.Lock() + defer bt.mutex.Unlock() + if bt.stopped { + close(done) + return done, nil + } + + tracer, tracerServer, err := initTracing(b, bt.config, bt.logger) if err != nil { - // responsibility for failing to load configuration lies elsewhere - // this is not reachable after going through normal beat creation - return nil + return nil, err + } + closeTracer := func() error { return nil } + if tracer != nil { + closeTracer = func() error { + tracer.Close() + if tracerServer != nil { + return tracerServer.Close() + } + return nil + } + } + + sharedArgs := sharedServerRunnerParams{ + Beat: b, + WrapRunServer: bt.wrapRunServer, + Logger: bt.logger, + Tracer: tracer, + TracerServer: tracerServer, + Acker: bt.waitPublished, } - var s struct { - Dashboards *common.Config `config:"setup.dashboards"` + reloader := reloader{ + runServerContext: ctx, + args: sharedArgs, + } + bt.stopServer = func() { + defer close(done) + defer closeTracer() + if bt.config.ShutdownTimeout > 0 { + time.AfterFunc(bt.config.ShutdownTimeout, cancelContext) + } + reloader.stop() } - if err := cfg.Unpack(&s); err != nil { + if b.Manager != nil && b.Manager.Enabled() { + reload.Register.MustRegisterList("inputs", &reloader) + } else { + // Management disabled, use statically defined config. + if err := reloader.reload(bt.rawConfig, "default", nil); err != nil { + return nil, err + } + } + return done, nil +} + +type reloader struct { + runServerContext context.Context + args sharedServerRunnerParams + + mu sync.Mutex + runner *serverRunner +} + +func (r *reloader) stop() { + r.mu.Lock() + defer r.mu.Unlock() + if r.runner != nil { + r.runner.cancelRunServerContext() + <-r.runner.done + r.runner = nil + } +} + +// Reload is invoked when the initial, or updated, integration policy, is received. +func (r *reloader) Reload(configs []*reload.ConfigWithMeta) error { + if n := len(configs); n != 1 { + return fmt.Errorf("only 1 input supported, got %d", n) + } + cfg := configs[0] + integrationConfig, err := config.NewIntegrationConfig(cfg.Config) + if err != nil { return err } - if s.Dashboards != nil { - if s.Dashboards.Enabled() { - return errSetupDashboardRemoved + var namespace string + if integrationConfig.DataStream != nil { + namespace = integrationConfig.DataStream.Namespace + } + apmServerCommonConfig := integrationConfig.APMServer + apmServerCommonConfig.Merge(common.MustNewConfigFrom(`{"data_streams.enabled": true}`)) + return r.reload(apmServerCommonConfig, namespace, &integrationConfig.Fleet) +} + +func (r *reloader) reload(rawConfig *common.Config, namespace string, fleetConfig *config.Fleet) error { + r.mu.Lock() + defer r.mu.Unlock() + runner, err := newServerRunner(r.runServerContext, serverRunnerParams{ + sharedServerRunnerParams: r.args, + Namespace: namespace, + RawConfig: rawConfig, + FleetConfig: fleetConfig, + }) + if err != nil { + return err + } + // Start listening before we stop the existing runner (if any), to ensure zero downtime. + listener, err := listen(runner.config, runner.logger) + if err != nil { + return err + } + go func() { + defer listener.Close() + if err := runner.run(listener); err != nil { + r.args.Logger.Error(err) } - logger.Warn(errSetupDashboardRemoved) + }() + // If the old runner exists, cancel it + if r.runner != nil { + r.runner.cancelRunServerContext() + <-r.runner.done } + r.runner = runner return nil } -type beater struct { +type serverRunner struct { + // backgroundContext is used for operations that should block on Stop, + // up to the process shutdown timeout limit. This allows the publisher to + // drain its queue when the server is stopped, for example. + backgroundContext context.Context + + // runServerContext is used for the runServer call, and will be cancelled + // immediately when the Stop method is invoked. + runServerContext context.Context + cancelRunServerContext context.CancelFunc + done chan struct{} + + pipeline beat.PipelineConnector + acker *waitPublishedAcker + namespace string config *config.Config + rawConfig *common.Config + fleetConfig *config.Fleet + beat *beat.Beat logger *logp.Logger + tracer *apm.Tracer + tracerServer *tracerServer wrapRunServer func(RunServerFunc) RunServerFunc +} - mutex sync.Mutex // guards stopServer and stopped - stopServer func() - stopped bool +type serverRunnerParams struct { + sharedServerRunnerParams + + Namespace string + RawConfig *common.Config + FleetConfig *config.Fleet } -// Run runs the APM Server, blocking until the beater's Stop method is called, -// or a fatal error occurs. -func (bt *beater) Run(b *beat.Beat) error { +type sharedServerRunnerParams struct { + Beat *beat.Beat + WrapRunServer func(RunServerFunc) RunServerFunc + Logger *logp.Logger + Tracer *apm.Tracer + TracerServer *tracerServer + Acker *waitPublishedAcker +} - tracer, tracerServer, err := bt.initTracing(b) +func newServerRunner(ctx context.Context, args serverRunnerParams) (*serverRunner, error) { + cfg, err := config.NewConfig(args.RawConfig, elasticsearchOutputConfig(args.Beat)) if err != nil { - return err + return nil, err } - runServer := runServer - if tracerServer != nil { - runServer = runServerWithTracerServer(runServer, tracerServer, tracer) + runServerContext, cancel := context.WithCancel(ctx) + return &serverRunner{ + backgroundContext: ctx, + runServerContext: runServerContext, + cancelRunServerContext: cancel, + done: make(chan struct{}), + + config: cfg, + rawConfig: args.RawConfig, + fleetConfig: args.FleetConfig, + acker: args.Acker, + pipeline: args.Beat.Publisher, + namespace: args.Namespace, + beat: args.Beat, + logger: args.Logger, + tracer: args.Tracer, + tracerServer: args.TracerServer, + wrapRunServer: args.WrapRunServer, + }, nil +} + +func (s *serverRunner) run(listener net.Listener) error { + defer close(s.done) + + // Send config to telemetry. + recordAPMServerConfig(s.config) + + publisherConfig := &publish.PublisherConfig{ + Pipeline: s.config.Pipeline, + Namespace: s.namespace, } - if bt.wrapRunServer != nil { - // Wrap runServer function, enabling injection of - // behaviour into the processing/reporting pipeline. - runServer = bt.wrapRunServer(runServer) + if !s.config.DataStreams.Enabled { + // Logs are only supported with data streams; + // add a beat.Processor which drops them. + dropLogsProcessor, err := newDropLogsBeatProcessor() + if err != nil { + return err + } + publisherConfig.Processor = dropLogsProcessor + } + + var kibanaClient kibana.Client + if s.config.Kibana.Enabled { + kibanaClient = kibana.NewConnectingClient(&s.config.Kibana) + } + + cfg := ucfg.Config(*s.rawConfig) + parentCfg := cfg.Parent() + // Check for an environment variable set when running in a cloud environment + if eac := os.Getenv("ELASTIC_AGENT_CLOUD"); eac != "" && s.config.Kibana.Enabled { + // Don't block server startup sending the config. + go func() { + if err := kibana.SendConfig(s.runServerContext, kibanaClient, parentCfg); err != nil { + s.logger.Infof("failed to upload config to kibana: %v", err) + } + }() } - publisher, err := newPublisher(b, bt.config, tracer) + g, ctx := errgroup.WithContext(s.runServerContext) + + // Ensure the libbeat output and go-elasticsearch clients do not index + // any events to Elasticsearch before the integration is ready. + publishReady := make(chan struct{}) + g.Go(func() error { + defer close(publishReady) + err := s.waitReady(ctx, kibanaClient) + return errors.Wrap(err, "error waiting for server to be ready") + }) + callbackUUID, err := esoutput.RegisterConnectCallback(func(*eslegclient.Connection) error { + select { + case <-publishReady: + return nil + default: + } + return errors.New("not ready for publishing events") + }) if err != nil { return err } + defer esoutput.DeregisterConnectCallback(callbackUUID) + newElasticsearchClient := func(cfg *elasticsearch.Config) (elasticsearch.Client, error) { + httpTransport, err := elasticsearch.NewHTTPTransport(cfg) + if err != nil { + return nil, err + } + transport := &waitReadyRoundTripper{Transport: httpTransport, ready: publishReady} + return elasticsearch.NewClientParams(elasticsearch.ClientParams{ + Config: cfg, + Transport: transport, + }) + } - // shutdownContext may be updated by stopServer below, - // to initiate the shutdown timeout. - shutdownContext := context.Background() - var cancelShutdownContext context.CancelFunc - defer func() { - if cancelShutdownContext != nil { - defer cancelShutdownContext() + // Register a libbeat elasticsearch output connect callback which + // ensures the pipeline is installed. The callback does nothing + // when data streams are in use. + pipelineCallback := newPipelineElasticsearchConnectCallback(s.config) + callbackUUID, err = esoutput.RegisterConnectCallback(pipelineCallback) + if err != nil { + return err + } + defer esoutput.DeregisterConnectCallback(callbackUUID) + + var sourcemapStore *sourcemap.Store + if s.config.RumConfig.Enabled && s.config.RumConfig.SourceMapping.Enabled { + store, err := newSourcemapStore( + s.beat.Info, s.config.RumConfig.SourceMapping, s.fleetConfig, + newElasticsearchClient, + ) + if err != nil { + return err } - publisher.Stop(shutdownContext) - }() + sourcemapStore = store + } + // When the publisher stops cleanly it will close its pipeline client, + // calling the acker's Close method. We need to call Open for each new + // publisher to ensure we wait for all clients and enqueued events to + // be closed at shutdown time. + s.acker.Open() + pipeline := pipetool.WithACKer(s.pipeline, s.acker) + publisher, err := publish.NewPublisher(pipeline, s.tracer, publisherConfig) + if err != nil { + return err + } + defer publisher.Stop(s.backgroundContext) + + // Create the runServer function. We start with newBaseRunServer, and then + // wrap depending on the configuration in order to inject behaviour. + // + // The reporter is passed into newBaseRunServer for legacy event publishers + // that bypass the model processor framework, i.e. sourcemap uploads, and + // onboarding docs. Because these bypass the model processor framework, we + // must augment the reporter to set common `observer` and `ecs.version` fields. reporter := publisher.Send - if !bt.config.Sampling.KeepUnsampled { + runServer := newBaseRunServer(listener, augmentedReporter(reporter, s.beat.Info)) + if s.tracerServer != nil { + runServer = runServerWithTracerServer(runServer, s.tracerServer, s.tracer) + } + if s.wrapRunServer != nil { + // Wrap runServer function, enabling injection of + // behaviour into the processing/reporting pipeline. + runServer = s.wrapRunServer(runServer) + } + runServer = s.wrapRunServerWithPreprocessors(runServer) + + batchProcessor := s.newFinalBatchProcessor(reporter) + if !s.config.Sampling.KeepUnsampled { // The server has been configured to discard unsampled // transactions. Make sure this is done just before calling // the publisher to avoid affecting aggregations. - reporter = sampling.NewDiscardUnsampledReporter(reporter) - } - - stopped := make(chan struct{}) - defer close(stopped) - ctx, cancelContext := context.WithCancel(context.Background()) - defer cancelContext() - var stopOnce sync.Once - stopServer := func() { - stopOnce.Do(func() { - if bt.config.ShutdownTimeout > 0 { - shutdownContext, cancelShutdownContext = context.WithTimeout( - shutdownContext, bt.config.ShutdownTimeout, + batchProcessor = modelprocessor.Chained{ + sampling.NewDiscardUnsampledBatchProcessor(), batchProcessor, + } + } + + g.Go(func() error { + return runServer(ctx, ServerParams{ + Info: s.beat.Info, + Config: s.config, + Managed: s.beat.Manager != nil && s.beat.Manager.Enabled(), + Namespace: s.namespace, + Logger: s.logger, + Tracer: s.tracer, + BatchProcessor: batchProcessor, + SourcemapStore: sourcemapStore, + PublishReady: publishReady, + NewElasticsearchClient: newElasticsearchClient, + }) + }) + if err := g.Wait(); err != nil { + return err + } + return publisher.Stop(s.backgroundContext) +} + +// waitReady waits until the server is ready to index events. +func (s *serverRunner) waitReady(ctx context.Context, kibanaClient kibana.Client) error { + var preconditions []func(context.Context) error + var esOutputClient elasticsearch.Client + if cfg := elasticsearchOutputConfig(s.beat); cfg != nil { + esConfig := elasticsearch.DefaultConfig() + err := cfg.Unpack(&esConfig) + if err != nil { + return err + } + esOutputClient, err = elasticsearch.NewClient(esConfig) + if err != nil { + return err + } + } + + // libbeat and go-elasticsearch both ensure a minimum level of Basic. + // + // If any configured features require a higher license level, add a + // precondition which checks this. + if esOutputClient != nil { + requiredLicenseLevel := licenser.Basic + licensedFeature := "" + if s.config.Sampling.Tail.Enabled { + requiredLicenseLevel = licenser.Platinum + licensedFeature = "tail-based sampling" + } + if requiredLicenseLevel > licenser.Basic { + preconditions = append(preconditions, func(ctx context.Context) error { + license, err := elasticsearch.GetLicense(ctx, esOutputClient) + if err != nil { + return errors.Wrap(err, "error getting Elasticsearch licensing information") + } + if licenser.IsExpired(license) { + return errors.New("Elasticsearch license is expired") + } + if license.Type == licenser.Trial || license.Cover(requiredLicenseLevel) { + return nil + } + return fmt.Errorf( + "invalid license level %s: %s requires license level %s", + license.Type, licensedFeature, requiredLicenseLevel, ) - } - cancelContext() - <-stopped + }) + } + } + + // When running standalone with data streams enabled, by default we will add + // a precondition that ensures the integration is installed. + fleetManaged := s.beat.Manager != nil && s.beat.Manager.Enabled() + if !fleetManaged && s.config.DataStreams.Enabled && s.config.DataStreams.WaitForIntegration { + if kibanaClient == nil && esOutputClient == nil { + return errors.New("cannot wait for integration without either Kibana or Elasticsearch config") + } + preconditions = append(preconditions, func(ctx context.Context) error { + return checkIntegrationInstalled(ctx, kibanaClient, esOutputClient, s.logger) }) } - bt.mutex.Lock() - if bt.stopped { - bt.mutex.Unlock() + if len(preconditions) == 0 { return nil } - bt.stopServer = stopServer - bt.mutex.Unlock() + check := func(ctx context.Context) error { + for _, pre := range preconditions { + if err := pre(ctx); err != nil { + return err + } + } + return nil + } + return waitReady(ctx, s.config.WaitReadyInterval, s.tracer, s.logger, check) +} - return runServer(ctx, ServerParams{ - Config: bt.config, - Logger: bt.logger, - Tracer: tracer, - Reporter: reporter, - }) +// newFinalBatchProcessor returns the final model.BatchProcessor that publishes events. +func (s *serverRunner) newFinalBatchProcessor(libbeatReporter publish.Reporter) model.BatchProcessor { + return &reporterBatchProcessor{libbeatReporter} +} + +func (s *serverRunner) wrapRunServerWithPreprocessors(runServer RunServerFunc) RunServerFunc { + processors := []model.BatchProcessor{ + modelprocessor.SetHostHostname{}, + modelprocessor.SetServiceNodeName{}, + modelprocessor.SetMetricsetName{}, + modelprocessor.SetGroupingKey{}, + modelprocessor.SetErrorMessage{}, + newObserverBatchProcessor(s.beat.Info), + model.ProcessBatchFunc(ecsVersionBatchProcessor), + modelprocessor.NewEventCounter(monitoring.Default.GetRegistry("apm-server")), + } + if s.config.DefaultServiceEnvironment != "" { + processors = append(processors, &modelprocessor.SetDefaultServiceEnvironment{ + DefaultServiceEnvironment: s.config.DefaultServiceEnvironment, + }) + } + if s.config.DataStreams.Enabled { + processors = append(processors, &modelprocessor.SetDataStream{ + Namespace: s.namespace, + }) + } + return WrapRunServerWithProcessors(runServer, processors...) +} + +// elasticsearchOutputConfig returns nil if the output is not elasticsearch +func elasticsearchOutputConfig(b *beat.Beat) *common.Config { + if hasElasticsearchOutput(b) { + return b.Config.Output.Config() + } + return nil } -func isElasticsearchOutput(b *beat.Beat) bool { +func hasElasticsearchOutput(b *beat.Beat) bool { return b.Config != nil && b.Config.Output.Name() == "elasticsearch" } -func (bt *beater) registerPipelineCallback(b *beat.Beat) error { - overwrite := bt.config.Register.Ingest.Pipeline.ShouldOverwrite() +// registerPipelineCallback registers a callback which is invoked when +// `setup --pipelines` is called, to either register pipelines or return +// an error depending on the configuration. +func (bt *beater) registerPipelineSetupCallback(b *beat.Beat) { + if !hasElasticsearchOutput(b) { + bt.logger.Info("Output is not Elasticsearch: pipeline registration disabled") + return + } + + if bt.config.DataStreams.Enabled { + bt.logger.Info("Data streams enabled: pipeline registration disabled") + b.OverwritePipelinesCallback = func(esConfig *common.Config) error { + return errors.New("index pipeline setup must be performed externally when using data streams, by installing the 'apm' integration package") + } + return + } + + if !bt.config.Register.Ingest.Pipeline.Enabled { + bt.logger.Info("Pipeline registration disabled") + return + } + + bt.logger.Info("Registering pipeline callback") + overwrite := bt.config.Register.Ingest.Pipeline.Overwrite path := bt.config.Register.Ingest.Pipeline.Path // ensure setup cmd is working properly b.OverwritePipelinesCallback = func(esConfig *common.Config) error { - conn, err := eslegclient.NewConnectedClient(esConfig) + conn, err := eslegclient.NewConnectedClient(esConfig, b.Info.Beat) if err != nil { return err } return pipeline.RegisterPipelines(conn, overwrite, path) } - // ensure pipelines are registered when new ES connection is established. - _, err := esoutput.RegisterConnectCallback(func(conn *eslegclient.Connection) error { +} + +// newPipelineElasticsearchConnectCallback returns an Elasticsearch connect +// callback that ensures the configured pipeline is installed, if configured +// to do so. If data streams are enabled, then pipeline registration is always +// disabled. +func newPipelineElasticsearchConnectCallback(cfg *config.Config) esoutput.ConnectCallback { + return func(conn *eslegclient.Connection) error { + if cfg.DataStreams.Enabled || !cfg.Register.Ingest.Pipeline.Enabled { + return nil + } + overwrite := cfg.Register.Ingest.Pipeline.Overwrite + path := cfg.Register.Ingest.Pipeline.Path return pipeline.RegisterPipelines(conn, overwrite, path) - }) - return err + } } -func (bt *beater) initTracing(b *beat.Beat) (*apm.Tracer, *tracerServer, error) { - var err error +func initTracing(b *beat.Beat, cfg *config.Config, logger *logp.Logger) (*apm.Tracer, *tracerServer, error) { tracer := b.Instrumentation.Tracer() listener := b.Instrumentation.Listener() - if !tracer.Active() && bt.config != nil { - tracer, listener, err = initLegacyTracer(b.Info, bt.config) + if !tracer.Active() && cfg != nil { + var err error + tracer, listener, err = initLegacyTracer(b.Info, cfg) if err != nil { return nil, nil, err } } - tracerServer := newTracerServer(bt.config, listener) + var tracerServer *tracerServer + if listener != nil { + var err error + tracerServer, err = newTracerServer(listener, logger) + if err != nil { + return nil, nil, err + } + } return tracer, tracerServer, nil } @@ -255,7 +687,7 @@ func (bt *beater) initTracing(b *beat.Beat) (*apm.Tracer, *tracerServer, error) // it does not instrument the beat output func initLegacyTracer(info beat.Info, cfg *config.Config) (*apm.Tracer, net.Listener, error) { selfInstrumentation := cfg.SelfInstrumentation - if selfInstrumentation == nil || !selfInstrumentation.IsEnabled() { + if !selfInstrumentation.Enabled { return apm.DefaultTracer, nil, nil } conf, err := common.NewConfigFrom(cfg.SelfInstrumentation) @@ -304,16 +736,7 @@ func runServerWithTracerServer(runServer RunServerFunc, tracerServer *tracerServ return func(ctx context.Context, args ServerParams) error { g, ctx := errgroup.WithContext(ctx) g.Go(func() error { - defer tracerServer.stop() - <-ctx.Done() - // Close the tracer now to prevent the server - // from waiting for more events during graceful - // shutdown. - tracer.Close() - return nil - }) - g.Go(func() error { - return tracerServer.serve(args.Reporter) + return tracerServer.serve(ctx, args.BatchProcessor) }) g.Go(func() error { return runServer(ctx, args) @@ -322,42 +745,110 @@ func runServerWithTracerServer(runServer RunServerFunc, tracerServer *tracerServ } } -func newPublisher(b *beat.Beat, cfg *config.Config, tracer *apm.Tracer) (*publish.Publisher, error) { - transformConfig, err := newTransformConfig(b.Info, cfg) +func newSourcemapStore( + beatInfo beat.Info, + cfg config.SourceMapping, + fleetCfg *config.Fleet, + newElasticsearchClient func(*elasticsearch.Config) (elasticsearch.Client, error), +) (*sourcemap.Store, error) { + if fleetCfg != nil { + var ( + c = *http.DefaultClient + rt = http.DefaultTransport + ) + var tlsConfig *tlscommon.TLSConfig + var err error + if fleetCfg.TLS.IsEnabled() { + if tlsConfig, err = tlscommon.LoadTLSConfig(fleetCfg.TLS); err != nil { + return nil, err + } + } + + timeout := 30 * time.Second + dialer := transport.NetDialer(timeout) + tlsDialer := transport.TLSDialer(dialer, tlsConfig, timeout) + + rt = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: dialer.Dial, + DialTLS: tlsDialer.Dial, + TLSClientConfig: tlsConfig.ToConfig(), + } + + c.Transport = apmhttp.WrapRoundTripper(rt) + return sourcemap.NewFleetStore(&c, fleetCfg, cfg.Metadata, cfg.Cache.Expiration) + } + c, err := newElasticsearchClient(cfg.ESConfig) if err != nil { return nil, err } - return publish.NewPublisher(b.Publisher, tracer, &publish.PublisherConfig{ - Info: b.Info, - Pipeline: cfg.Pipeline, - TransformConfig: transformConfig, - }) + index := strings.ReplaceAll(cfg.IndexPattern, "%{[observer.version]}", beatInfo.Version) + return sourcemap.NewElasticsearchStore(c, index, cfg.Cache.Expiration) } -func newTransformConfig(beatInfo beat.Info, cfg *config.Config) (*transform.Config, error) { - transformConfig := &transform.Config{ - RUM: transform.RUMConfig{ - LibraryPattern: regexp.MustCompile(cfg.RumConfig.LibraryPattern), - ExcludeFromGrouping: regexp.MustCompile(cfg.RumConfig.ExcludeFromGrouping), - }, +// WrapRunServerWithProcessors wraps runServer such that it wraps args.Reporter +// with a function that event batches are first passed through the given processors +// in order. +func WrapRunServerWithProcessors(runServer RunServerFunc, processors ...model.BatchProcessor) RunServerFunc { + if len(processors) == 0 { + return runServer } - - if cfg.RumConfig.IsEnabled() && cfg.RumConfig.SourceMapping.IsEnabled() && cfg.RumConfig.SourceMapping.ESConfig != nil { - store, err := newSourcemapStore(beatInfo, cfg.RumConfig.SourceMapping) - if err != nil { - return nil, err - } - transformConfig.RUM.SourcemapStore = store + return func(ctx context.Context, args ServerParams) error { + processors := append(processors, args.BatchProcessor) + args.BatchProcessor = modelprocessor.Chained(processors) + return runServer(ctx, args) } +} - return transformConfig, nil +type disablePublisherTracingKey struct{} + +type reporterBatchProcessor struct { + reporter publish.Reporter } -func newSourcemapStore(beatInfo beat.Info, cfg *config.SourceMapping) (*sourcemap.Store, error) { - esClient, err := elasticsearch.NewClient(cfg.ESConfig) - if err != nil { - return nil, err +func (p *reporterBatchProcessor) ProcessBatch(ctx context.Context, batch *model.Batch) error { + disableTracing, _ := ctx.Value(disablePublisherTracingKey{}).(bool) + return p.reporter(ctx, publish.PendingReq{Transformable: batch, Trace: !disableTracing}) +} + +// augmentedReporter wraps publish.Reporter such that the events it reports have +// `observer` and `ecs.version` fields injected. +func augmentedReporter(reporter publish.Reporter, info beat.Info) publish.Reporter { + observerBatchProcessor := newObserverBatchProcessor(info) + return func(ctx context.Context, req publish.PendingReq) error { + orig := req.Transformable + req.Transformable = transformerFunc(func(ctx context.Context) []beat.Event { + // Merge common fields into each event. + events := orig.Transform(ctx) + batch := make(model.Batch, 1) + observerBatchProcessor(ctx, &batch) + ecsVersionBatchProcessor(ctx, &batch) + for _, event := range events { + event.Fields.Put("ecs.version", batch[0].ECSVersion) + event.Fields.DeepUpdate(common.MapStr{"observer": batch[0].Observer.Fields()}) + } + return events + }) + return reporter(ctx, req) } - index := strings.ReplaceAll(cfg.IndexPattern, "%{[observer.version]}", beatInfo.Version) - return sourcemap.NewStore(esClient, index, cfg.Cache.Expiration) +} + +type transformerFunc func(context.Context) []beat.Event + +func (f transformerFunc) Transform(ctx context.Context) []beat.Event { + return f(ctx) +} + +func newDropLogsBeatProcessor() (beat.ProcessorList, error) { + return processors.New(processors.PluginConfig{ + common.MustNewConfigFrom(map[string]interface{}{ + "drop_event": map[string]interface{}{ + "when": map[string]interface{}{ + "contains": map[string]interface{}{ + "processor.event": "log", + }, + }, + }, + }), + }) } diff --git a/beater/beater_test.go b/beater/beater_test.go index 4781ea4713e..d3a43965306 100644 --- a/beater/beater_test.go +++ b/beater/beater_test.go @@ -18,8 +18,11 @@ package beater import ( + "compress/zlib" "context" "errors" + "fmt" + "io/ioutil" "net" "net/http" "net/http/httptest" @@ -28,95 +31,202 @@ import ( "testing" "time" + "github.com/gofrs/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" "github.com/elastic/apm-server/beater/config" + "github.com/elastic/apm-server/elasticsearch" "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/publish" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/idxmgmt" + "github.com/elastic/beats/v7/libbeat/instrumentation" + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/outputs" ) type testBeater struct { *beater + b *beat.Beat + logs *observer.ObservedLogs + runCh chan error listenAddr string baseURL string client *http.Client } +func setupServer(t *testing.T, cfg *common.Config, beatConfig *beat.BeatConfig, events chan beat.Event) (*testBeater, error) { + if testing.Short() { + t.Skip("skipping server test") + } + apmBeat, cfg := newBeat(t, cfg, beatConfig, events) + return setupBeater(t, apmBeat, cfg, beatConfig) +} + +func newBeat(t *testing.T, cfg *common.Config, beatConfig *beat.BeatConfig, events chan beat.Event) (*beat.Beat, *common.Config) { + info := beat.Info{ + Beat: "test-apm-server", + IndexPrefix: "test-apm-server", + Version: "1.2.3", // hard-coded to avoid changing approvals + ID: uuid.Must(uuid.FromString("fbba762a-14dd-412c-b7e9-b79f903eb492")), + } + + combinedConfig := common.MustNewConfigFrom(map[string]interface{}{ + "host": "localhost:0", + + // Enable instrumentation so the profile endpoint is + // available, but set the profiling interval to something + // long enough that it won't kick in. + "instrumentation": map[string]interface{}{ + "enabled": true, + "profiling": map[string]interface{}{ + "cpu": map[string]interface{}{ + "enabled": true, + "interval": "360s", + }, + }, + }, + }) + if cfg != nil { + require.NoError(t, cfg.Unpack(combinedConfig)) + } + + var pub beat.Pipeline + if events != nil { + // capture events using the supplied channel + pubClient := newChanClientWith(events) + pub = dummyPipeline(cfg, info, pubClient) + } else if beatConfig != nil && beatConfig.Output.Name() == "elasticsearch" { + // capture events using the configured elasticsearch output + supporter, err := idxmgmt.DefaultSupport(logp.NewLogger("beater_test"), info, nil) + require.NoError(t, err) + outputGroup, err := outputs.Load(supporter, info, nil, "elasticsearch", beatConfig.Output.Config()) + require.NoError(t, err) + pub = dummyPipeline(cfg, info, outputGroup.Clients...) + } else { + // don't capture events + pub = dummyPipeline(cfg, info) + } + + instrumentation, err := instrumentation.New(combinedConfig, info.Beat, info.Version) + require.NoError(t, err) + return &beat.Beat{ + Publisher: pub, + Info: info, + Config: beatConfig, + Instrumentation: instrumentation, + }, combinedConfig +} + func setupBeater( t *testing.T, apmBeat *beat.Beat, ucfg *common.Config, beatConfig *beat.BeatConfig, ) (*testBeater, error) { + tb, err := newTestBeater(t, apmBeat, ucfg, beatConfig) + if err != nil { + return nil, err + } + tb.start() + + listenAddr, err := tb.waitListenAddr(10 * time.Second) + if err != nil { + return nil, err + } + tb.initClient(tb.config, listenAddr) + + res, err := tb.client.Get(tb.baseURL) + require.NoError(t, err) + defer res.Body.Close() + require.Equal(t, http.StatusOK, res.StatusCode) + return tb, nil +} + +func newTestBeater( + t *testing.T, + apmBeat *beat.Beat, + ucfg *common.Config, + beatConfig *beat.BeatConfig, +) (*testBeater, error) { + + core, observedLogs := observer.New(zapcore.DebugLevel) + logger := logp.NewLogger("", zap.WrapCore(func(in zapcore.Core) zapcore.Core { + return zapcore.NewTee(in, core) + })) - onboardingDocs := make(chan onboardingDoc, 1) createBeater := NewCreator(CreatorParams{ + Logger: logger, WrapRunServer: func(runServer RunServerFunc) RunServerFunc { - return func(ctx context.Context, args ServerParams) error { - // Wrap the reporter so we can intercept the - // onboarding doc, to extract the listen address. - origReporter := args.Reporter - args.Reporter = func(ctx context.Context, req publish.PendingReq) error { - for _, tf := range req.Transformables { - switch tf := tf.(type) { - case onboardingDoc: - select { - case <-ctx.Done(): - return ctx.Err() - case onboardingDocs <- tf: - } - - case *model.Transaction: - // Add a label to test that everything - // goes through the wrapped reporter. - if tf.Labels == nil { - labels := make(model.Labels) - tf.Labels = &labels - } - (*tf.Labels)["wrapped_reporter"] = true - } + var processor model.ProcessBatchFunc = func(ctx context.Context, batch *model.Batch) error { + for i := range *batch { + event := &(*batch)[i] + if event.Processor != model.TransactionProcessor { + continue + } + // Add a label to test that everything + // goes through the wrapped reporter. + if event.Labels == nil { + event.Labels = common.MapStr{} } - return origReporter(ctx, req) + event.Labels["wrapped_reporter"] = true } - return runServer(ctx, args) + return nil } + return WrapRunServerWithProcessors(runServer, processor) }, }) - // create our beater beatBeater, err := createBeater(apmBeat, ucfg) if err != nil { return nil, err } require.NotNil(t, beatBeater) + t.Cleanup(func() { + beatBeater.Stop() + }) + + return &testBeater{ + beater: beatBeater.(*beater), + b: apmBeat, + logs: observedLogs, + runCh: make(chan error), + }, nil +} - errCh := make(chan error) +// start starts running a beater created with newTestBeater. +func (tb *testBeater) start() { go func() { - err := beatBeater.Run(apmBeat) - if err != nil { - errCh <- err - } + tb.runCh <- tb.beater.Run(tb.b) }() +} - tb := &testBeater{beater: beatBeater.(*beater)} - select { - case err := <-errCh: - return nil, err - case o := <-onboardingDocs: - tb.initClient(tb.config, o.listenAddr) - case <-time.After(time.Second * 10): - return nil, errors.New("timeout waiting for server to start listening") +func (tb *testBeater) waitListenAddr(timeout time.Duration) (string, error) { + deadline := time.After(timeout) + for { + for _, entry := range tb.logs.TakeAll() { + const prefix = "Listening on: " + if strings.HasPrefix(entry.Message, prefix) { + listenAddr := entry.Message[len(prefix):] + return listenAddr, nil + } + } + select { + case err := <-tb.runCh: + if err != nil { + return "", err + } + return "", errors.New("server exited cleanly without logging expected message") + case <-deadline: + return "", errors.New("timeout waiting for server to start listening") + case <-time.After(10 * time.Millisecond): + } } - - res, err := tb.client.Get(tb.baseURL) - require.NoError(t, err) - defer res.Body.Close() - require.Equal(t, http.StatusOK, res.StatusCode) - return tb, nil } func (tb *testBeater) initClient(cfg *config.Config, listenAddr string) { @@ -147,16 +257,18 @@ func TestTransformConfigIndex(t *testing.T) { defer srv.Close() cfg := config.DefaultConfig() - cfg.RumConfig.Enabled = newBool(true) + cfg.RumConfig.Enabled = true cfg.RumConfig.SourceMapping.ESConfig.Hosts = []string{srv.URL} if indexPattern != "" { cfg.RumConfig.SourceMapping.IndexPattern = indexPattern } - transformConfig, err := newTransformConfig(beat.Info{Version: "1.2.3"}, cfg) + store, err := newSourcemapStore( + beat.Info{Version: "1.2.3"}, cfg.RumConfig.SourceMapping, nil, + elasticsearch.NewClient, + ) require.NoError(t, err) - require.NotNil(t, transformConfig.RUM.SourcemapStore) - transformConfig.RUM.SourcemapStore.Added(context.Background(), "name", "version", "path") + store.NotifyAdded(context.Background(), "name", "version", "path") require.Len(t, requestPaths, 1) path := requestPaths[0] @@ -168,33 +280,66 @@ func TestTransformConfigIndex(t *testing.T) { t.Run("with-observer-version", func(t *testing.T) { test(t, "blah-%{[observer.version]}-blah", "blah-1.2.3-blah") }) } -func TestTransformConfig(t *testing.T) { - test := func(rumEnabled, sourcemapEnabled *bool, expectSourcemapStore bool) { - cfg := config.DefaultConfig() - cfg.RumConfig.Enabled = rumEnabled - cfg.RumConfig.SourceMapping.Enabled = sourcemapEnabled - transformConfig, err := newTransformConfig(beat.Info{Version: "1.2.3"}, cfg) - require.NoError(t, err) - if expectSourcemapStore { - assert.NotNil(t, transformConfig.RUM.SourcemapStore) - } else { - assert.Nil(t, transformConfig.RUM.SourcemapStore) - } - } +var validSourcemap, _ = ioutil.ReadFile("../testdata/sourcemap/bundle.js.map") - test(nil, nil, false) - test(nil, newBool(false), false) - test(nil, newBool(true), false) +func TestStoreUsesRUMElasticsearchConfig(t *testing.T) { + var called bool + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called = true + w.Write(validSourcemap) + })) + defer ts.Close() - test(newBool(false), nil, false) - test(newBool(false), newBool(false), false) - test(newBool(false), newBool(true), false) + cfg := config.DefaultConfig() + cfg.RumConfig.Enabled = true + cfg.RumConfig.SourceMapping.Enabled = true + cfg.RumConfig.SourceMapping.ESConfig = elasticsearch.DefaultConfig() + cfg.RumConfig.SourceMapping.ESConfig.Hosts = []string{ts.URL} - test(newBool(true), nil, true) // sourcemap.enabled is true by default - test(newBool(true), newBool(false), false) - test(newBool(true), newBool(true), true) + store, err := newSourcemapStore( + beat.Info{Version: "1.2.3"}, cfg.RumConfig.SourceMapping, nil, + elasticsearch.NewClient, + ) + require.NoError(t, err) + // Check that the provided rum elasticsearch config was used and + // Fetch() goes to the test server. + _, err = store.Fetch(context.Background(), "app", "1.0", "/bundle/path") + require.NoError(t, err) + + assert.True(t, called) } -func newBool(v bool) *bool { - return &v +func TestFleetStoreUsed(t *testing.T) { + var called bool + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called = true + wr := zlib.NewWriter(w) + defer wr.Close() + wr.Write([]byte(fmt.Sprintf(`{"sourceMap":%s}`, validSourcemap))) + })) + defer ts.Close() + + cfg := config.DefaultConfig() + cfg.RumConfig.Enabled = true + cfg.RumConfig.SourceMapping.Enabled = true + cfg.RumConfig.SourceMapping.Metadata = []config.SourceMapMetadata{{ + ServiceName: "app", + ServiceVersion: "1.0", + BundleFilepath: "/bundle/path", + SourceMapURL: "/my/path", + }} + + fleetCfg := &config.Fleet{ + Hosts: []string{ts.URL[7:]}, + Protocol: "http", + AccessAPIKey: "my-key", + TLS: nil, + } + + store, err := newSourcemapStore(beat.Info{Version: "1.2.3"}, cfg.RumConfig.SourceMapping, fleetCfg, nil) + require.NoError(t, err) + _, err = store.Fetch(context.Background(), "app", "1.0", "/bundle/path") + require.NoError(t, err) + + assert.True(t, called) } diff --git a/beater/beatertest/monitoring.go b/beater/beatertest/monitoring.go index 36ed5bfb08e..8d91b79bffb 100644 --- a/beater/beatertest/monitoring.go +++ b/beater/beatertest/monitoring.go @@ -26,17 +26,14 @@ import ( ) // CompareMonitoringInt matches expected with real monitoring counters and -// returns false and an a string showind diffs if not matching +// returns false and an a string showind diffs if not matching. +// +// The caller is expected to call ClearRegistry before invoking some code +// path that should update monitoring counters. func CompareMonitoringInt( - handler func(c *request.Context), - c *request.Context, expected map[request.ResultID]int, m map[request.ResultID]*monitoring.Int, ) (bool, string) { - - ClearRegistry(m) - handler(c) - var result string for _, id := range AllRequestResultIDs() { monitoringIntVal := int64(0) @@ -52,7 +49,6 @@ func CompareMonitoringInt( result += fmt.Sprintf("[%s] Expected: %d, Received: %d", id, expectedVal, monitoringIntVal) } } - return len(result) == 0, result } diff --git a/beater/beatertest/reporter.go b/beater/beatertest/reporter.go deleted file mode 100644 index 8183e2a6619..00000000000 --- a/beater/beatertest/reporter.go +++ /dev/null @@ -1,34 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package beatertest - -import ( - "context" - - "github.com/elastic/apm-server/publish" -) - -// NilReporter is a noop implementation of the reporter interface -func NilReporter(ctx context.Context, p publish.PendingReq) error { return nil } - -// ErrorReporterFn returns a function implementing the reporter interface, returning given error -func ErrorReporterFn(err error) func(ctx context.Context, p publish.PendingReq) error { - return func(ctx context.Context, p publish.PendingReq) error { - return err - } -} diff --git a/beater/checkintegration.go b/beater/checkintegration.go new file mode 100644 index 00000000000..a60e0f08029 --- /dev/null +++ b/beater/checkintegration.go @@ -0,0 +1,131 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package beater + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" + + "github.com/elastic/beats/v7/libbeat/logp" + + "github.com/elastic/apm-server/elasticsearch" + "github.com/elastic/apm-server/kibana" + "github.com/elastic/go-elasticsearch/v7/esapi" +) + +// checkIntegrationInstalled checks if the APM integration is installed by querying Kibana +// and/or Elasticsearch, returning nil if and only if it is installed. +func checkIntegrationInstalled( + ctx context.Context, + kibanaClient kibana.Client, + esClient elasticsearch.Client, + logger *logp.Logger, +) error { + if kibanaClient != nil { + installed, err := checkIntegrationInstalledKibana(ctx, kibanaClient, logger) + if err != nil { + // We only return the Kibana error if we have no Elasticsearch client, + // as we may not have sufficient privileges to query the Fleet API. + if esClient == nil { + return fmt.Errorf("error querying Kibana for integration package status: %w", err) + } + } else if !installed { + // We were able to query Kibana, but the package is not yet installed. + // We should continue querying the package status via Kibana, as it is + // more authoritative than checking for index template installation. + return errors.New("integration package not yet installed") + } + // Fall through and query Elasticsearch (if we have a client). Kibana may prematurely + // report packages as installed: https://github.com/elastic/kibana/issues/108649 + } + if esClient != nil { + installed, err := checkIntegrationInstalledElasticsearch(ctx, esClient, logger) + if err != nil { + return fmt.Errorf("error querying Elasticsearch for integration index templates: %w", err) + } else if !installed { + return errors.New("integration index templates not installed") + } + } + return nil +} + +// checkIntegrationInstalledKibana checks if the APM integration package +// is installed by querying Kibana. +func checkIntegrationInstalledKibana(ctx context.Context, kibanaClient kibana.Client, logger *logp.Logger) (bool, error) { + resp, err := kibanaClient.Send(ctx, "GET", "/api/fleet/epm/packages/apm", nil, nil, nil) + if err != nil { + return false, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := ioutil.ReadAll(resp.Body) + return false, fmt.Errorf("unexpected HTTP status: %s (%s)", resp.Status, bytes.TrimSpace(body)) + } + var result struct { + Response struct { + Status string `json:"status"` + } `json:"response"` + } + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + return false, errors.Wrap(err, "error decoding integration package response") + } + logger.Infof("integration package status: %s", result.Response.Status) + return result.Response.Status == "installed", nil +} + +func checkIntegrationInstalledElasticsearch(ctx context.Context, esClient elasticsearch.Client, logger *logp.Logger) (bool, error) { + // TODO(axw) generate the list of expected index templates. + templates := []string{ + "traces-apm", + "traces-apm.sampled", + "metrics-apm.app", + "metrics-apm.internal", + "logs-apm.error", + } + // IndicesGetIndexTemplateRequest accepts a slice of template names, + // but the REST API expects just one index template name. Query them + // in parallel. + g, ctx := errgroup.WithContext(ctx) + for _, template := range templates { + template := template // copy for closure + g.Go(func() error { + req := esapi.IndicesGetIndexTemplateRequest{Name: []string{template}} + resp, err := req.Do(ctx, esClient) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.IsError() { + body, _ := ioutil.ReadAll(resp.Body) + return fmt.Errorf("unexpected HTTP status: %s (%s)", resp.Status(), bytes.TrimSpace(body)) + } + return nil + }) + } + err := g.Wait() + return err == nil, err +} diff --git a/beater/config/agentconfig.go b/beater/config/agentconfig.go new file mode 100644 index 00000000000..30bc9ca4a51 --- /dev/null +++ b/beater/config/agentconfig.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package config + +import ( + "crypto/md5" + "encoding/json" + "fmt" + "strings" + "time" +) + +// KibanaAgentConfig holds remote agent config information +type KibanaAgentConfig struct { + Cache Cache `config:"cache"` +} + +// Cache holds config information about cache expiration +type Cache struct { + Expiration time.Duration `config:"expiration"` +} + +// defaultKibanaAgentConfig holds the default KibanaAgentConfig +func defaultKibanaAgentConfig() KibanaAgentConfig { + return KibanaAgentConfig{ + Cache: Cache{ + Expiration: 30 * time.Second, + }, + } +} + +// AgentConfig defines configuration for agents. +type AgentConfig struct { + Service Service `config:"service"` + AgentName string `config:"agent.name"` + Etag string `config:"etag"` + Config map[string]string +} + +func (s *AgentConfig) setup() error { + if s.Config == nil { + return errInvalidAgentConfigMissingConfig + } + + if s.Etag == "" { + m, err := json.Marshal(s) + if err != nil { + return fmt.Errorf("error generating etag for %s: %v", s.Service, err) + } + s.Etag = fmt.Sprintf("%x", md5.Sum(m)) + } + return nil +} + +// Service defines a unique way of identifying a running agent. +type Service struct { + Name string `config:"name"` + Environment string `config:"environment"` +} + +// String implements the Stringer interface. +func (s *Service) String() string { + var name, env string + if s.Name != "" { + name = "service.name=" + s.Name + } + if s.Environment != "" { + env = "service.environment=" + s.Environment + } + return strings.Join([]string{name, env}, " ") +} diff --git a/beater/config/aggregation.go b/beater/config/aggregation.go index d674dedabd6..8d194f574c9 100644 --- a/beater/config/aggregation.go +++ b/beater/config/aggregation.go @@ -25,7 +25,6 @@ const ( defaultTransactionAggregationInterval = time.Minute defaultTransactionAggregationMaxGroups = 10000 defaultTransactionAggregationHDRHistogramSignificantFigures = 2 - defaultTransactionAggregationRUMUserAgentLRUSize = 5000 defaultServiceDestinationAggregationInterval = time.Minute defaultServiceDestinationAggregationMaxGroups = 10000 @@ -43,7 +42,6 @@ type TransactionAggregationConfig struct { Interval time.Duration `config:"interval" validate:"min=1"` MaxTransactionGroups int `config:"max_groups" validate:"min=1"` HDRHistogramSignificantFigures int `config:"hdrhistogram_significant_figures" validate:"min=1, max=5"` - RUMUserAgentLRUSize int `config:"rum.user_agent.lru_size" validate:"min=1"` } // ServiceDestinationAggregationConfig holds configuration related to span metrics aggregation for service maps. @@ -56,10 +54,10 @@ type ServiceDestinationAggregationConfig struct { func defaultAggregationConfig() AggregationConfig { return AggregationConfig{ Transactions: TransactionAggregationConfig{ + Enabled: true, Interval: defaultTransactionAggregationInterval, MaxTransactionGroups: defaultTransactionAggregationMaxGroups, HDRHistogramSignificantFigures: defaultTransactionAggregationHDRHistogramSignificantFigures, - RUMUserAgentLRUSize: defaultTransactionAggregationRUMUserAgentLRUSize, }, ServiceDestinations: ServiceDestinationAggregationConfig{ Enabled: true, diff --git a/beater/config/api_key.go b/beater/config/api_key.go deleted file mode 100644 index e278960bbdc..00000000000 --- a/beater/config/api_key.go +++ /dev/null @@ -1,74 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package config - -import ( - "github.com/pkg/errors" - - "github.com/elastic/beats/v7/libbeat/logp" - - "github.com/elastic/beats/v7/libbeat/common" - - "github.com/elastic/apm-server/elasticsearch" -) - -const apiKeyLimit = 100 - -// APIKeyConfig can be used for authorizing against the APM Server via API Keys. -type APIKeyConfig struct { - Enabled bool `config:"enabled"` - LimitPerMin int `config:"limit"` - ESConfig *elasticsearch.Config `config:"elasticsearch"` - - esConfigured bool -} - -// IsEnabled returns whether or not API Key authorization is enabled -func (c *APIKeyConfig) IsEnabled() bool { - return c != nil && c.Enabled -} - -func (c *APIKeyConfig) setup(log *logp.Logger, outputESCfg *common.Config) error { - if c == nil || !c.Enabled || c.esConfigured || outputESCfg == nil { - return nil - } - log.Info("Falling back to elasticsearch output for API Key usage") - if err := outputESCfg.Unpack(c.ESConfig); err != nil { - return errors.Wrap(err, "unpacking Elasticsearch config into API key config") - } - return nil - -} - -func defaultAPIKeyConfig() *APIKeyConfig { - return &APIKeyConfig{Enabled: false, LimitPerMin: apiKeyLimit, ESConfig: elasticsearch.DefaultConfig()} -} - -func (c *APIKeyConfig) Unpack(inp *common.Config) error { - cfg := tmpAPIKeyConfig(*defaultAPIKeyConfig()) - if err := inp.Unpack(&cfg); err != nil { - return errors.Wrap(err, "error unpacking api_key config") - } - *c = APIKeyConfig(cfg) - if inp.HasField("elasticsearch") { - c.esConfigured = true - } - return nil -} - -type tmpAPIKeyConfig APIKeyConfig diff --git a/beater/config/api_key_test.go b/beater/config/api_key_test.go deleted file mode 100644 index 7edb66b247e..00000000000 --- a/beater/config/api_key_test.go +++ /dev/null @@ -1,103 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package config - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/beats/v7/libbeat/common" - "github.com/elastic/beats/v7/libbeat/logp" - - "github.com/elastic/apm-server/elasticsearch" -) - -func TestAPIKeyConfig_IsEnabled(t *testing.T) { - assert.False(t, (&APIKeyConfig{}).IsEnabled()) - assert.False(t, defaultAPIKeyConfig().IsEnabled()) - assert.True(t, (&APIKeyConfig{Enabled: true}).IsEnabled()) -} - -func TestAPIKeyConfig_ESConfig(t *testing.T) { - for name, tc := range map[string]struct { - cfg *common.Config - esCfg *common.Config - - expectedConfig *APIKeyConfig - expectedErr error - }{ - "default": { - cfg: common.NewConfig(), - expectedConfig: defaultAPIKeyConfig(), - }, - "ES config missing": { - cfg: common.MustNewConfigFrom(`{"enabled": true}`), - expectedConfig: &APIKeyConfig{ - Enabled: true, - LimitPerMin: apiKeyLimit, - ESConfig: elasticsearch.DefaultConfig()}, - }, - "ES configured": { - cfg: common.MustNewConfigFrom(`{"enabled": true, "elasticsearch.timeout":"7s"}`), - esCfg: common.MustNewConfigFrom(`{"hosts":["186.0.0.168:9200"]}`), - expectedConfig: &APIKeyConfig{ - Enabled: true, - LimitPerMin: apiKeyLimit, - ESConfig: &elasticsearch.Config{ - Hosts: elasticsearch.Hosts{"localhost:9200"}, - Protocol: "http", - Timeout: 7 * time.Second}, - esConfigured: true}, - }, - "disabled with ES from output": { - cfg: common.NewConfig(), - esCfg: common.MustNewConfigFrom(`{"hosts":["192.0.0.168:9200"]}`), - expectedConfig: defaultAPIKeyConfig(), - }, - "ES from output": { - cfg: common.MustNewConfigFrom(`{"enabled": true, "limit": 20}`), - esCfg: common.MustNewConfigFrom(`{"hosts":["192.0.0.168:9200"],"username":"foo","password":"bar"}`), - expectedConfig: &APIKeyConfig{ - Enabled: true, - LimitPerMin: 20, - ESConfig: &elasticsearch.Config{ - Timeout: 5 * time.Second, - Username: "foo", - Password: "bar", - Protocol: "http", - Hosts: elasticsearch.Hosts{"192.0.0.168:9200"}}}, - }, - } { - t.Run(name, func(t *testing.T) { - var apiKeyConfig APIKeyConfig - require.NoError(t, tc.cfg.Unpack(&apiKeyConfig)) - err := apiKeyConfig.setup(logp.NewLogger("api_key"), tc.esCfg) - if tc.expectedErr == nil { - assert.NoError(t, err) - } else { - assert.Error(t, err) - } - assert.Equal(t, tc.expectedConfig, &apiKeyConfig) - - }) - } - -} diff --git a/beater/config/auth.go b/beater/config/auth.go new file mode 100644 index 00000000000..6bc308baf64 --- /dev/null +++ b/beater/config/auth.go @@ -0,0 +1,129 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package config + +import ( + "github.com/pkg/errors" + + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/logp" + + "github.com/elastic/apm-server/elasticsearch" +) + +// AgentAuth holds config related to agent auth. +type AgentAuth struct { + Anonymous AnonymousAgentAuth `config:"anonymous"` + APIKey APIKeyAgentAuth `config:"api_key"` + SecretToken string `config:"secret_token"` +} + +func (a *AgentAuth) setAnonymousDefaults(logger *logp.Logger, rumEnabled bool) error { + if a.Anonymous.configured { + // Anonymous access explicitly configured. + return nil + } + if !a.APIKey.Enabled && a.SecretToken == "" { + // No auth is required. + return nil + } + if rumEnabled { + logger.Info("anonymous access enabled for RUM") + a.Anonymous.Enabled = true + } + return nil +} + +// APIKeyAgentAuth holds config related to API Key auth for agents. +type APIKeyAgentAuth struct { + Enabled bool `config:"enabled"` + LimitPerMin int `config:"limit"` + ESConfig *elasticsearch.Config `config:"elasticsearch"` + + configured bool // api_key explicitly defined + esConfigured bool // api_key.elasticsearch explicitly defined +} + +func (a *APIKeyAgentAuth) Unpack(in *common.Config) error { + type underlyingAPIKeyAgentAuth APIKeyAgentAuth + if err := in.Unpack((*underlyingAPIKeyAgentAuth)(a)); err != nil { + return errors.Wrap(err, "error unpacking api_key config") + } + a.configured = true + a.esConfigured = in.HasField("elasticsearch") + return nil +} + +func (a *APIKeyAgentAuth) setup(log *logp.Logger, outputESCfg *common.Config) error { + if !a.Enabled || a.esConfigured || outputESCfg == nil { + return nil + } + log.Info("Falling back to elasticsearch output for API Key usage") + if err := outputESCfg.Unpack(&a.ESConfig); err != nil { + return errors.Wrap(err, "unpacking Elasticsearch config into API key config") + } + return nil +} + +// AnonymousAgentAuth holds config related to anonymous access for agents. +// +// If RUM is enabled, and either secret_token or api_key auth is defined, +// then anonymous auth will be enabled for RUM by default. +type AnonymousAgentAuth struct { + Enabled bool `config:"enabled"` + AllowAgent []string `config:"allow_agent"` + AllowService []string `config:"allow_service"` + RateLimit RateLimit `config:"rate_limit"` + + configured bool // anon explicitly defined +} + +func (a *AnonymousAgentAuth) Unpack(in *common.Config) error { + type underlyingAnonymousAgentAuth AnonymousAgentAuth + if err := in.Unpack((*underlyingAnonymousAgentAuth)(a)); err != nil { + return errors.Wrap(err, "error unpacking anon config") + } + a.configured = true + return nil +} + +func defaultAgentAuth() AgentAuth { + return AgentAuth{ + Anonymous: defaultAnonymousAgentAuth(), + APIKey: defaultAPIKeyAgentAuth(), + } +} + +func defaultAnonymousAgentAuth() AnonymousAgentAuth { + return AnonymousAgentAuth{ + Enabled: false, + AllowAgent: []string{"rum-js", "js-base"}, + RateLimit: RateLimit{ + EventLimit: 300, + IPLimit: 1000, + }, + } +} + +func defaultAPIKeyAgentAuth() APIKeyAgentAuth { + return APIKeyAgentAuth{ + Enabled: false, + LimitPerMin: 100, + ESConfig: elasticsearch.DefaultConfig(), + } +} diff --git a/beater/config/auth_test.go b/beater/config/auth_test.go new file mode 100644 index 00000000000..c55c222c826 --- /dev/null +++ b/beater/config/auth_test.go @@ -0,0 +1,196 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package config + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/elasticsearch" + "github.com/elastic/beats/v7/libbeat/common" +) + +func TestAPIKeyAgentAuth_ESConfig(t *testing.T) { + for name, tc := range map[string]struct { + cfg *common.Config + esCfg *common.Config + expectedConfig APIKeyAgentAuth + }{ + "default": { + cfg: nil, + expectedConfig: defaultAPIKeyAgentAuth(), + }, + "ES config missing": { + cfg: common.MustNewConfigFrom(`{"enabled": true}`), + expectedConfig: APIKeyAgentAuth{ + Enabled: true, + LimitPerMin: 100, + ESConfig: elasticsearch.DefaultConfig(), + configured: true, + }, + }, + "ES configured": { + cfg: common.MustNewConfigFrom(`{"enabled": true, "elasticsearch.timeout":"7s"}`), + esCfg: common.MustNewConfigFrom(`{"hosts":["186.0.0.168:9200"]}`), + expectedConfig: APIKeyAgentAuth{ + Enabled: true, + LimitPerMin: 100, + ESConfig: &elasticsearch.Config{ + Hosts: elasticsearch.Hosts{"localhost:9200"}, + Protocol: "http", + Timeout: 7 * time.Second, + MaxRetries: 3, + Backoff: elasticsearch.DefaultBackoffConfig, + }, + configured: true, + esConfigured: true, + }, + }, + "disabled with ES from output": { + cfg: nil, + esCfg: common.MustNewConfigFrom(`{"hosts":["192.0.0.168:9200"]}`), + expectedConfig: defaultAPIKeyAgentAuth(), + }, + "ES from output": { + cfg: common.MustNewConfigFrom(`{"enabled": true, "limit": 20}`), + esCfg: common.MustNewConfigFrom(`{"hosts":["192.0.0.168:9200"],"username":"foo","password":"bar"}`), + expectedConfig: APIKeyAgentAuth{ + Enabled: true, + LimitPerMin: 20, + ESConfig: &elasticsearch.Config{ + Timeout: 5 * time.Second, + Username: "foo", + Password: "bar", + Protocol: "http", + Hosts: elasticsearch.Hosts{"192.0.0.168:9200"}, + MaxRetries: 3, + Backoff: elasticsearch.DefaultBackoffConfig, + }, + configured: true, + }, + }, + } { + t.Run(name, func(t *testing.T) { + for _, key := range []string{"api_key", "auth.api_key"} { + input := common.NewConfig() + if tc.cfg != nil { + input.SetChild(key, -1, tc.cfg) + } + cfg, err := NewConfig(input, tc.esCfg) + require.NoError(t, err) + assert.Equal(t, tc.expectedConfig, cfg.AgentAuth.APIKey) + } + }) + } +} + +func TestAnonymousAgentAuth(t *testing.T) { + for name, tc := range map[string]struct { + cfg *common.Config + expectedConfig AnonymousAgentAuth + }{ + "default": { + cfg: common.NewConfig(), + expectedConfig: defaultAnonymousAgentAuth(), + }, + "allow_service": { + cfg: common.MustNewConfigFrom(`{"auth.anonymous.allow_service":["service-one"]}`), + expectedConfig: AnonymousAgentAuth{ + AllowAgent: []string{"rum-js", "js-base"}, + AllowService: []string{"service-one"}, + RateLimit: RateLimit{ + EventLimit: 300, + IPLimit: 1000, + }, + configured: true, + }, + }, + "deprecated_rum_allow_service_names": { + cfg: common.MustNewConfigFrom(`{"rum.allow_service_names":["service-two"]}`), + expectedConfig: AnonymousAgentAuth{ + AllowAgent: []string{"rum-js", "js-base"}, + AllowService: []string{"service-two"}, + RateLimit: RateLimit{ + EventLimit: 300, + IPLimit: 1000, + }, + }, + }, + "deprecated_rum_event_rate": { + cfg: common.MustNewConfigFrom(`{"rum.event_rate.limit":1,"rum.event_rate.lru_size":2}`), + expectedConfig: AnonymousAgentAuth{ + AllowAgent: []string{"rum-js", "js-base"}, + RateLimit: RateLimit{ + EventLimit: 1, + IPLimit: 2, + }, + }, + }, + "deprecated_rum_allow_service_names_conflict": { + cfg: common.MustNewConfigFrom(`{"auth.anonymous.allow_service":["service-one"], "rum.allow_service_names":["service-two"]}`), + expectedConfig: AnonymousAgentAuth{ + AllowAgent: []string{"rum-js", "js-base"}, + AllowService: []string{"service-one"}, + RateLimit: RateLimit{ + EventLimit: 300, + IPLimit: 1000, + }, + configured: true, + }, + }, + } { + t.Run(name, func(t *testing.T) { + cfg, err := NewConfig(tc.cfg, nil) + require.NoError(t, err) + assert.Equal(t, tc.expectedConfig, cfg.AgentAuth.Anonymous) + }) + } +} + +func TestSecretTokenAuth(t *testing.T) { + for name, tc := range map[string]struct { + cfg *common.Config + expected string + }{ + "default": { + cfg: common.NewConfig(), + expected: "", + }, + "secret_token_auth": { + cfg: common.MustNewConfigFrom(`{"auth.secret_token":"token-one"}`), + expected: "token-one", + }, + "deprecated_secret_token": { + cfg: common.MustNewConfigFrom(`{"secret_token":"token-two"}`), + expected: "token-two", + }, + "deprecated_secret_token_conflict": { + cfg: common.MustNewConfigFrom(`{"auth.secret_token":"token-one","secret_token":"token-two"}`), + expected: "token-one", + }, + } { + t.Run(name, func(t *testing.T) { + cfg, err := NewConfig(tc.cfg, nil) + require.NoError(t, err) + assert.Equal(t, tc.expected, cfg.AgentAuth.SecretToken) + }) + } +} diff --git a/beater/config/config.go b/beater/config/config.go index 6b4f2a66333..bfe061215e3 100644 --- a/beater/config/config.go +++ b/beater/config/config.go @@ -19,14 +19,12 @@ package config import ( "net" - "strings" "time" "github.com/pkg/errors" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" - "github.com/elastic/beats/v7/libbeat/kibana" "github.com/elastic/beats/v7/libbeat/logp" logs "github.com/elastic/apm-server/log" @@ -39,70 +37,51 @@ const ( msgInvalidConfigAgentCfg = "invalid value for `apm-server.agent.config.cache.expiration`, only accepting full seconds" ) -type KibanaConfig struct { - Enabled bool `config:"enabled"` - kibana.ClientConfig `config:",inline"` -} - -func (k *KibanaConfig) Unpack(cfg *common.Config) error { - if err := cfg.Unpack(&k.ClientConfig); err != nil { - return err - } - k.Enabled = cfg.Enabled() - k.Host = strings.TrimRight(k.Host, "/") - - return nil -} - -func defaultKibanaConfig() KibanaConfig { - return KibanaConfig{ - Enabled: false, - ClientConfig: kibana.DefaultClientConfig(), - } -} +var ( + errInvalidAgentConfigMissingConfig = errors.New("agent_config: no config set") +) // Config holds configuration information nested under the key `apm-server` type Config struct { - Host string `config:"host"` - MaxHeaderSize int `config:"max_header_size"` - IdleTimeout time.Duration `config:"idle_timeout"` - ReadTimeout time.Duration `config:"read_timeout"` - WriteTimeout time.Duration `config:"write_timeout"` - MaxEventSize int `config:"max_event_size"` - ShutdownTimeout time.Duration `config:"shutdown_timeout"` - TLS *tlscommon.ServerConfig `config:"ssl"` - MaxConnections int `config:"max_connections"` - Expvar *ExpvarConfig `config:"expvar"` - AugmentEnabled bool `config:"capture_personal_data"` - SelfInstrumentation *InstrumentationConfig `config:"instrumentation"` - RumConfig *RumConfig `config:"rum"` - Register *RegisterConfig `config:"register"` - Mode Mode `config:"mode"` - Kibana KibanaConfig `config:"kibana"` - AgentConfig *AgentConfig `config:"agent.config"` - SecretToken string `config:"secret_token"` - APIKeyConfig *APIKeyConfig `config:"api_key"` - JaegerConfig JaegerConfig `config:"jaeger"` - Aggregation AggregationConfig `config:"aggregation"` - Sampling SamplingConfig `config:"sampling"` + // Host holds the hostname or address that the server should bind to + // when listening for requests from agents. + Host string `config:"host"` + + // AgentAuth holds agent auth config. + AgentAuth AgentAuth `config:"auth"` + + MaxHeaderSize int `config:"max_header_size"` + IdleTimeout time.Duration `config:"idle_timeout"` + ReadTimeout time.Duration `config:"read_timeout"` + WriteTimeout time.Duration `config:"write_timeout"` + MaxEventSize int `config:"max_event_size"` + ShutdownTimeout time.Duration `config:"shutdown_timeout"` + TLS *tlscommon.ServerConfig `config:"ssl"` + MaxConnections int `config:"max_connections"` + ResponseHeaders map[string][]string `config:"response_headers"` + Expvar ExpvarConfig `config:"expvar"` + Pprof PprofConfig `config:"pprof"` + AugmentEnabled bool `config:"capture_personal_data"` + SelfInstrumentation InstrumentationConfig `config:"instrumentation"` + RumConfig RumConfig `config:"rum"` + Register RegisterConfig `config:"register"` + Kibana KibanaConfig `config:"kibana"` + KibanaAgentConfig KibanaAgentConfig `config:"agent.config"` + JaegerConfig JaegerConfig `config:"jaeger"` + Aggregation AggregationConfig `config:"aggregation"` + Sampling SamplingConfig `config:"sampling"` + DataStreams DataStreamsConfig `config:"data_streams"` + DefaultServiceEnvironment string `config:"default_service_environment"` + JavaAttacherConfig JavaAttacherConfig `config:"java_attacher"` Pipeline string -} -// ExpvarConfig holds config information about exposing expvar -type ExpvarConfig struct { - Enabled *bool `config:"enabled"` - URL string `config:"url"` -} + AgentConfigs []AgentConfig `config:"agent_config"` -// AgentConfig holds remote agent config information -type AgentConfig struct { - Cache *Cache `config:"cache"` -} - -// Cache holds config information about cache expiration -type Cache struct { - Expiration time.Duration `config:"expiration"` + // WaitReadyInterval holds the interval for checks when waiting for + // the integration package to be installed, and for checking the + // Elasticsearch license level. + WaitReadyInterval time.Duration `config:"wait_ready_interval"` } // NewConfig creates a Config struct based on the default config and the given input params @@ -113,23 +92,29 @@ func NewConfig(ucfg *common.Config, outputESCfg *common.Config) (*Config, error) return nil, errors.Wrap(err, "Error processing configuration") } - if float64(int(c.AgentConfig.Cache.Expiration.Seconds())) != c.AgentConfig.Cache.Expiration.Seconds() { + if float64(int(c.KibanaAgentConfig.Cache.Expiration.Seconds())) != c.KibanaAgentConfig.Cache.Expiration.Seconds() { return nil, errors.New(msgInvalidConfigAgentCfg) } - if outputESCfg != nil && (outputESCfg.HasField("pipeline") || outputESCfg.HasField("pipelines")) { - c.Pipeline = "" + for i := range c.AgentConfigs { + if err := c.AgentConfigs[i].setup(); err != nil { + return nil, err + } + } + + if err := setDeprecatedConfig(c, ucfg, logger); err != nil { + return nil, err } - if err := c.RumConfig.setup(logger, outputESCfg); err != nil { + if err := c.RumConfig.setup(logger, c.DataStreams.Enabled, outputESCfg); err != nil { return nil, err } - if err := c.APIKeyConfig.setup(logger, outputESCfg); err != nil { + if err := c.AgentAuth.setAnonymousDefaults(logger, c.RumConfig.Enabled); err != nil { return nil, err } - if err := c.SelfInstrumentation.setup(logger); err != nil { + if err := c.AgentAuth.APIKey.setup(logger, outputESCfg); err != nil { return nil, err } @@ -137,6 +122,10 @@ func NewConfig(ucfg *common.Config, outputESCfg *common.Config) (*Config, error) return nil, err } + if err := c.Sampling.Tail.setup(logger, c.DataStreams.Enabled, outputESCfg); err != nil { + return nil, err + } + if !c.Sampling.KeepUnsampled && !c.Aggregation.Transactions.Enabled { // Unsampled transactions should only be dropped // when transaction aggregation is enabled in the @@ -149,12 +138,77 @@ func NewConfig(ucfg *common.Config, outputESCfg *common.Config) (*Config, error) "which will lead to incorrect metrics being reported in the APM UI", ) } + + if c.DataStreams.Enabled || (outputESCfg != nil && (outputESCfg.HasField("pipeline") || outputESCfg.HasField("pipelines"))) { + c.Pipeline = "" + } return c, nil } -// IsEnabled indicates whether expvar is enabled or not -func (c *ExpvarConfig) IsEnabled() bool { - return c != nil && (c.Enabled == nil || *c.Enabled) +// setDeprecatedConfig translates deprecated top-level config attributes to the +// current config structure. +func setDeprecatedConfig(out *Config, in *common.Config, logger *logp.Logger) error { + type deprecatedRUMEventRateConfig struct { + Limit *int `config:"limit"` + LruSize *int `config:"lru_size"` + } + type deprecatedRUMConfig struct { + EventRate *deprecatedRUMEventRateConfig `config:"event_rate"` + AllowServiceNames []string `config:"allow_service_names"` + } + var deprecatedConfig struct { + APIKey APIKeyAgentAuth `config:"api_key"` + RUM *deprecatedRUMConfig `config:"rum"` + SecretToken string `config:"secret_token"` + } + deprecatedConfig.APIKey = defaultAPIKeyAgentAuth() + if err := in.Unpack(&deprecatedConfig); err != nil { + return err + } + + warnIgnored := func(deprecated, replacement string) { + logger.Warnf("ignoring deprecated config %q as %q is defined", deprecated, replacement) + } + if deprecatedConfig.APIKey.configured { + // "apm-server.api_key" -> "apm-server.auth.api_key" + if out.AgentAuth.APIKey.configured { + warnIgnored("apm-server.api_key", "apm-server.auth.api_key") + } else { + out.AgentAuth.APIKey = deprecatedConfig.APIKey + } + } + if deprecatedConfig.RUM != nil { + // "apm-server.rum.event_rate" -> "apm-server.auth.anonymous.rate_limit" + if deprecatedConfig.RUM.EventRate != nil { + if out.AgentAuth.Anonymous.configured { + warnIgnored("apm-server.rum.event_rate", "apm-server.auth.anonymous") + } else { + if deprecatedConfig.RUM.EventRate.Limit != nil { + out.AgentAuth.Anonymous.RateLimit.EventLimit = *deprecatedConfig.RUM.EventRate.Limit + } + if deprecatedConfig.RUM.EventRate.LruSize != nil { + out.AgentAuth.Anonymous.RateLimit.IPLimit = *deprecatedConfig.RUM.EventRate.LruSize + } + } + } + // "apm-server.rum.allow_service_names" -> "apm-server.auth.anonymous.allow_service" + if len(deprecatedConfig.RUM.AllowServiceNames) > 0 { + if out.AgentAuth.Anonymous.configured { + warnIgnored("apm-server.rum.allow_service_names", "apm-server.auth.anonymous") + } else { + out.AgentAuth.Anonymous.AllowService = deprecatedConfig.RUM.AllowServiceNames + } + } + } + if deprecatedConfig.SecretToken != "" { + // "apm-server.secret_token" -> "apm-server.auth.secret_token" + if out.AgentAuth.SecretToken != "" { + warnIgnored("apm-server.secret_token", "apm-server.auth.secret_token") + } else { + out.AgentAuth.SecretToken = deprecatedConfig.SecretToken + } + } + return nil } // DefaultConfig returns a config with default settings for `apm-server` config options. @@ -169,19 +223,23 @@ func DefaultConfig() *Config { MaxEventSize: 300 * 1024, // 300 kb ShutdownTimeout: 5 * time.Second, AugmentEnabled: true, - Expvar: &ExpvarConfig{ - Enabled: new(bool), + Expvar: ExpvarConfig{ + Enabled: false, URL: "/debug/vars", }, - RumConfig: defaultRum(), - Register: defaultRegisterConfig(true), - Mode: ModeProduction, - Kibana: defaultKibanaConfig(), - AgentConfig: &AgentConfig{Cache: &Cache{Expiration: 30 * time.Second}}, - Pipeline: defaultAPMPipeline, - APIKeyConfig: defaultAPIKeyConfig(), - JaegerConfig: defaultJaeger(), - Aggregation: defaultAggregationConfig(), - Sampling: defaultSamplingConfig(), + Pprof: PprofConfig{Enabled: false}, + SelfInstrumentation: defaultInstrumentationConfig(), + RumConfig: defaultRum(), + Register: defaultRegisterConfig(), + Kibana: defaultKibanaConfig(), + KibanaAgentConfig: defaultKibanaAgentConfig(), + Pipeline: defaultAPMPipeline, + JaegerConfig: defaultJaeger(), + Aggregation: defaultAggregationConfig(), + Sampling: defaultSamplingConfig(), + DataStreams: defaultDataStreamsConfig(), + AgentAuth: defaultAgentAuth(), + JavaAttacherConfig: defaultJavaAttacherConfig(), + WaitReadyInterval: 5 * time.Second, } } diff --git a/beater/config/config_test.go b/beater/config/config_test.go index 2303c719331..9fe29f88463 100644 --- a/beater/config/config_test.go +++ b/beater/config/config_test.go @@ -19,7 +19,6 @@ package config import ( "crypto/tls" - "fmt" "path/filepath" "testing" "time" @@ -39,16 +38,31 @@ var testdataCertificateConfig = tlscommon.CertificateConfig{ } func TestUnpackConfig(t *testing.T) { - falsy, truthy := false, true + // When unpacking libbeat/kibana.ClientConfig, proxy headers + // are set to nil rather than an empty map like in the default + // instantiated value. + defaultDecodedKibanaClientConfig := defaultKibanaConfig().ClientConfig + defaultDecodedKibanaClientConfig.Transport.Proxy.Headers = nil kibanaNoSlashConfig := DefaultConfig() + kibanaNoSlashConfig.Kibana.ClientConfig = defaultDecodedKibanaClientConfig kibanaNoSlashConfig.Kibana.Enabled = true kibanaNoSlashConfig.Kibana.Host = "kibanahost:5601/proxy" kibanaHeadersConfig := DefaultConfig() + kibanaHeadersConfig.Kibana.ClientConfig = defaultDecodedKibanaClientConfig kibanaHeadersConfig.Kibana.Enabled = true kibanaHeadersConfig.Kibana.Headers = map[string]string{"foo": "bar"} + responseHeadersConfig := DefaultConfig() + responseHeadersConfig.ResponseHeaders = map[string][]string{ + "k1": []string{"v1"}, + "k2": []string{"v2", "v3"}, + } + responseHeadersConfig.RumConfig.ResponseHeaders = map[string][]string{ + "k4": []string{"v4"}, + } + tests := map[string]struct { inpCfg map[string]interface{} outCfg *Config @@ -68,6 +82,10 @@ func TestUnpackConfig(t *testing.T) { "shutdown_timeout": 9 * time.Second, "capture_personal_data": true, "secret_token": "1234random", + "output": map[string]interface{}{ + "backoff.init": time.Second, + "backoff.max": time.Minute, + }, "ssl": map[string]interface{}{ "enabled": true, "key": "../../testdata/tls/key.pem", @@ -85,14 +103,16 @@ func TestUnpackConfig(t *testing.T) { "limit": 7200, "lru_size": 2000, }, - "allow_origins": []string{"example*"}, - "allow_headers": []string{"Authorization"}, + "allow_service_names": []string{"opbeans-rum"}, + "allow_origins": []string{"example*"}, + "allow_headers": []string{"Authorization"}, "source_mapping": map[string]interface{}{ "cache": map[string]interface{}{ "expiration": 8 * time.Minute, }, "index_pattern": "apm-test*", "elasticsearch.hosts": []string{"localhost:9201", "localhost:9202"}, + "timeout": "2s", }, "library_pattern": "^custom", "exclude_from_grouping": "^grouping", @@ -118,20 +138,16 @@ func TestUnpackConfig(t *testing.T) { }, "aggregation": map[string]interface{}{ "transactions": map[string]interface{}{ - "enabled": true, + "enabled": false, "interval": "1s", "max_groups": 123, "hdrhistogram_significant_figures": 1, - "rum": map[string]interface{}{ - "user_agent": map[string]interface{}{ - "lru_size": 123, - }, - }, }, "service_destinations": map[string]interface{}{ "max_groups": 456, }, }, + "default_service_environment": "overridden", }, outCfg: &Config{ Host: "localhost:3000", @@ -141,65 +157,105 @@ func TestUnpackConfig(t *testing.T) { ReadTimeout: 3000000000, WriteTimeout: 4000000000, ShutdownTimeout: 9000000000, - SecretToken: "1234random", + AgentAuth: AgentAuth{ + SecretToken: "1234random", + APIKey: APIKeyAgentAuth{ + Enabled: true, + LimitPerMin: 200, + ESConfig: &elasticsearch.Config{ + Hosts: elasticsearch.Hosts{"localhost:9201", "localhost:9202"}, + Protocol: "http", + Timeout: 5 * time.Second, + MaxRetries: 3, + Backoff: elasticsearch.DefaultBackoffConfig, + }, + configured: true, + esConfigured: true, + }, + Anonymous: AnonymousAgentAuth{ + Enabled: true, + AllowService: []string{"opbeans-rum"}, + AllowAgent: []string{"rum-js", "js-base"}, + RateLimit: RateLimit{ + EventLimit: 7200, + IPLimit: 2000, + }, + }, + }, TLS: &tlscommon.ServerConfig{ - Enabled: &truthy, + Enabled: newBool(true), Certificate: testdataCertificateConfig, ClientAuth: 4, CAs: []string{"../../testdata/tls/ca.crt.pem"}, }, AugmentEnabled: true, - Expvar: &ExpvarConfig{ - Enabled: &truthy, + Expvar: ExpvarConfig{ + Enabled: true, URL: "/debug/vars", }, - RumConfig: &RumConfig{ - Enabled: &truthy, - EventRate: &EventRate{ - Limit: 7200, - LruSize: 2000, + Pprof: PprofConfig{ + Enabled: false, + }, + SelfInstrumentation: InstrumentationConfig{ + Profiling: ProfilingConfig{ + CPU: CPUProfiling{ + Interval: 1 * time.Minute, + Duration: 10 * time.Second, + }, + Heap: HeapProfiling{ + Interval: 1 * time.Minute, + }, }, + }, + RumConfig: RumConfig{ + Enabled: true, AllowOrigins: []string{"example*"}, AllowHeaders: []string{"Authorization"}, - SourceMapping: &SourceMapping{ - Cache: &Cache{Expiration: 8 * time.Minute}, + SourceMapping: SourceMapping{ + Enabled: true, + Cache: Cache{Expiration: 8 * time.Minute}, IndexPattern: "apm-test*", ESConfig: &elasticsearch.Config{ - Hosts: elasticsearch.Hosts{"localhost:9201", "localhost:9202"}, - Protocol: "http", - Timeout: 5 * time.Second}, + Hosts: elasticsearch.Hosts{"localhost:9201", "localhost:9202"}, + Protocol: "http", + Timeout: 5 * time.Second, + MaxRetries: 3, + Backoff: elasticsearch.DefaultBackoffConfig, + }, + Metadata: []SourceMapMetadata{}, + Timeout: 2 * time.Second, esConfigured: true, }, LibraryPattern: "^custom", ExcludeFromGrouping: "^grouping", }, - Register: &RegisterConfig{ - Ingest: &IngestConfig{ - Pipeline: &PipelineConfig{ - Enabled: &truthy, - Overwrite: &falsy, + Register: RegisterConfig{ + Ingest: IngestConfig{ + Pipeline: PipelineConfig{ + Enabled: true, + Overwrite: false, Path: filepath.Join("tmp", "definition.json"), }, }, }, Kibana: KibanaConfig{ Enabled: true, - ClientConfig: defaultKibanaConfig().ClientConfig, + ClientConfig: defaultDecodedKibanaClientConfig, }, - AgentConfig: &AgentConfig{Cache: &Cache{Expiration: 2 * time.Minute}}, - Pipeline: defaultAPMPipeline, + KibanaAgentConfig: KibanaAgentConfig{Cache: Cache{Expiration: 2 * time.Minute}}, + Pipeline: defaultAPMPipeline, JaegerConfig: JaegerConfig{ GRPC: JaegerGRPCConfig{ Enabled: true, Host: "localhost:12345", TLS: func() *tls.Config { tlsServerConfig, err := tlscommon.LoadTLSServerConfig(&tlscommon.ServerConfig{ - Enabled: &truthy, + Enabled: newBool(true), Certificate: testdataCertificateConfig, ClientAuth: 4, CAs: []string{"../../testdata/tls/ca.crt.pem"}}) require.NoError(t, err) - return tlsServerConfig.BuildModuleConfig("localhost:12345") + return tlsServerConfig.BuildServerConfig("localhost:12345") }(), }, HTTP: JaegerHTTPConfig{ @@ -207,22 +263,12 @@ func TestUnpackConfig(t *testing.T) { Host: "localhost:6789", }, }, - APIKeyConfig: &APIKeyConfig{ - Enabled: true, - LimitPerMin: 200, - ESConfig: &elasticsearch.Config{ - Hosts: elasticsearch.Hosts{"localhost:9201", "localhost:9202"}, - Protocol: "http", - Timeout: 5 * time.Second}, - esConfigured: true, - }, Aggregation: AggregationConfig{ Transactions: TransactionAggregationConfig{ - Enabled: true, + Enabled: false, Interval: time.Second, MaxTransactionGroups: 123, HDRHistogramSignificantFigures: 1, - RUMUserAgentLRUSize: 123, }, ServiceDestinations: ServiceDestinationAggregationConfig{ Enabled: true, @@ -232,7 +278,22 @@ func TestUnpackConfig(t *testing.T) { }, Sampling: SamplingConfig{ KeepUnsampled: true, + Tail: TailSamplingConfig{ + Enabled: false, + ESConfig: elasticsearch.DefaultConfig(), + Interval: 1 * time.Minute, + IngestRateDecayFactor: 0.25, + StorageDir: "tail_sampling", + StorageGCInterval: 5 * time.Minute, + TTL: 30 * time.Minute, + }, + }, + DefaultServiceEnvironment: "overridden", + DataStreams: DataStreamsConfig{ + Enabled: false, + WaitForIntegration: true, }, + WaitReadyInterval: 5 * time.Second, }, }, "merge config with default": { @@ -248,9 +309,20 @@ func TestUnpackConfig(t *testing.T) { "enabled": true, "url": "/debug/vars", }, + "pprof": map[string]interface{}{ + "enabled": true, + }, "rum": map[string]interface{}{ "enabled": true, "source_mapping": map[string]interface{}{ + "metadata": []map[string]string{ + { + "service.name": "opbeans-rum", + "service.version": "1.2.3", + "bundle.filepath": "/test/e2e/general-usecase/bundle.js.map", + "sourcemap.url": "http://somewhere.com/bundle.js.map", + }, + }, "cache": map[string]interface{}{ "expiration": 7, }, @@ -264,12 +336,18 @@ func TestUnpackConfig(t *testing.T) { }, }, }, - "jaeger.grpc.enabled": true, - "api_key.enabled": true, - "aggregation.transactions.enabled": true, - "aggregation.transactions.rum.user_agent.lru_size": 123, - "aggregation.service_destinations.enabled": false, - "sampling.keep_unsampled": false, + "jaeger.grpc.enabled": true, + "api_key.enabled": true, + "aggregation.transactions.enabled": false, + "aggregation.service_destinations.enabled": false, + "sampling.keep_unsampled": false, + "sampling.tail": map[string]interface{}{ + "enabled": false, + "policies": []map[string]interface{}{{"sample_rate": 0.5}}, + "interval": "2m", + "ingest_rate_decay": 1.0, + }, + "data_streams.wait_for_integration": false, }, outCfg: &Config{ Host: "localhost:3000", @@ -279,57 +357,94 @@ func TestUnpackConfig(t *testing.T) { ReadTimeout: 30000000000, WriteTimeout: 30000000000, ShutdownTimeout: 5000000000, - SecretToken: "1234random", + AgentAuth: AgentAuth{ + SecretToken: "1234random", + APIKey: APIKeyAgentAuth{ + Enabled: true, + LimitPerMin: 100, + ESConfig: elasticsearch.DefaultConfig(), + configured: true, + }, + Anonymous: AnonymousAgentAuth{ + Enabled: true, + AllowAgent: []string{"rum-js", "js-base"}, + RateLimit: RateLimit{ + EventLimit: 300, + IPLimit: 1000, + }, + }, + }, TLS: &tlscommon.ServerConfig{ - Enabled: &truthy, + Enabled: newBool(true), Certificate: testdataCertificateConfig, ClientAuth: 0, }, AugmentEnabled: true, - Expvar: &ExpvarConfig{ - Enabled: &truthy, + Expvar: ExpvarConfig{ + Enabled: true, URL: "/debug/vars", }, - RumConfig: &RumConfig{ - Enabled: &truthy, - EventRate: &EventRate{ - Limit: 300, - LruSize: 1000, + Pprof: PprofConfig{ + Enabled: true, + }, + SelfInstrumentation: InstrumentationConfig{ + Profiling: ProfilingConfig{ + CPU: CPUProfiling{ + Interval: 1 * time.Minute, + Duration: 10 * time.Second, + }, + Heap: HeapProfiling{ + Interval: 1 * time.Minute, + }, }, + }, + RumConfig: RumConfig{ + Enabled: true, AllowOrigins: []string{"*"}, AllowHeaders: []string{}, - SourceMapping: &SourceMapping{ - Cache: &Cache{ + SourceMapping: SourceMapping{ + Enabled: true, + Cache: Cache{ Expiration: 7 * time.Second, }, IndexPattern: "apm-*-sourcemap*", ESConfig: elasticsearch.DefaultConfig(), + Metadata: []SourceMapMetadata{ + { + ServiceName: "opbeans-rum", + ServiceVersion: "1.2.3", + BundleFilepath: "/test/e2e/general-usecase/bundle.js.map", + SourceMapURL: "http://somewhere.com/bundle.js.map", + }, + }, + Timeout: 5 * time.Second, }, LibraryPattern: "rum", ExcludeFromGrouping: "^/webpack", }, - Register: &RegisterConfig{ - Ingest: &IngestConfig{ - Pipeline: &PipelineConfig{ - Enabled: &falsy, + Register: RegisterConfig{ + Ingest: IngestConfig{ + Pipeline: PipelineConfig{ + Enabled: false, Path: filepath.Join("ingest", "pipeline", "definition.json"), }, }, }, - Kibana: defaultKibanaConfig(), - AgentConfig: &AgentConfig{Cache: &Cache{Expiration: 30 * time.Second}}, - Pipeline: defaultAPMPipeline, + Kibana: defaultKibanaConfig(), + KibanaAgentConfig: KibanaAgentConfig{Cache: Cache{Expiration: 30 * time.Second}}, + Pipeline: defaultAPMPipeline, JaegerConfig: JaegerConfig{ GRPC: JaegerGRPCConfig{ Enabled: true, Host: "localhost:14250", TLS: func() *tls.Config { tlsServerConfig, err := tlscommon.LoadTLSServerConfig(&tlscommon.ServerConfig{ - Enabled: &truthy, + Enabled: newBool(true), Certificate: testdataCertificateConfig, - ClientAuth: 0}) + ClientAuth: 0, + }) require.NoError(t, err) - return tlsServerConfig.BuildModuleConfig("localhost:14250") + return tlsServerConfig.BuildServerConfig("localhost:14250") }(), }, HTTP: JaegerHTTPConfig{ @@ -337,14 +452,12 @@ func TestUnpackConfig(t *testing.T) { Host: "localhost:14268", }, }, - APIKeyConfig: &APIKeyConfig{Enabled: true, LimitPerMin: 100, ESConfig: elasticsearch.DefaultConfig()}, Aggregation: AggregationConfig{ Transactions: TransactionAggregationConfig{ - Enabled: true, + Enabled: false, Interval: time.Minute, MaxTransactionGroups: 10000, HDRHistogramSignificantFigures: 2, - RUMUserAgentLRUSize: 123, }, ServiceDestinations: ServiceDestinationAggregationConfig{ Enabled: false, @@ -354,7 +467,22 @@ func TestUnpackConfig(t *testing.T) { }, Sampling: SamplingConfig{ KeepUnsampled: false, + Tail: TailSamplingConfig{ + Enabled: false, + Policies: []TailSamplingPolicy{{SampleRate: 0.5}}, + ESConfig: elasticsearch.DefaultConfig(), + Interval: 2 * time.Minute, + IngestRateDecayFactor: 1.0, + StorageDir: "tail_sampling", + StorageGCInterval: 5 * time.Minute, + TTL: 30 * time.Minute, + }, + }, + DataStreams: DataStreamsConfig{ + Enabled: false, + WaitForIntegration: false, }, + WaitReadyInterval: 5 * time.Second, }, }, "kibana trailing slash": { @@ -377,6 +505,20 @@ func TestUnpackConfig(t *testing.T) { }, outCfg: kibanaHeadersConfig, }, + "response headers": { + inpCfg: map[string]interface{}{ + "response_headers": map[string]interface{}{ + "k1": "v1", + "k2": []string{"v2", "v3"}, + }, + "rum": map[string]interface{}{ + "response_headers": map[string]interface{}{ + "k4": []string{"v4"}, + }, + }, + }, + outCfg: responseHeadersConfig, + }, } for name, test := range tests { @@ -387,35 +529,21 @@ func TestUnpackConfig(t *testing.T) { cfg, err := NewConfig(inpCfg, nil) require.NoError(t, err) require.NotNil(t, cfg) + if test.outCfg.JaegerConfig.GRPC.TLS != nil || cfg.JaegerConfig.GRPC.TLS != nil { + // tlscommon captures closures for the following callbacks + // setting them to nil to skip these from comparison + cfg.JaegerConfig.GRPC.TLS.VerifyConnection = nil + test.outCfg.JaegerConfig.GRPC.TLS.VerifyConnection = nil + test.outCfg.JaegerConfig.GRPC.TLS.ClientCAs = nil + cfg.JaegerConfig.GRPC.TLS.ClientCAs = nil + } + assert.Equal(t, test.outCfg, cfg) }) } } -func TestPipeline(t *testing.T) { - truthy, falsy := true, false - cases := []struct { - c *PipelineConfig - enabled, overwrite bool - }{ - {c: nil, enabled: false, overwrite: false}, - {c: &PipelineConfig{}, enabled: true, overwrite: false}, //default values - {c: &PipelineConfig{Enabled: &falsy, Overwrite: &truthy}, - enabled: false, overwrite: true}, - {c: &PipelineConfig{Enabled: &truthy, Overwrite: &falsy}, - enabled: true, overwrite: false}, - } - - for idx, test := range cases { - assert.Equal(t, test.enabled, test.c.IsEnabled(), - fmt.Sprintf("<%v> IsEnabled() expected %v", idx, test.enabled)) - assert.Equal(t, test.overwrite, test.c.ShouldOverwrite(), - fmt.Sprintf("<%v> ShouldOverwrite() expected %v", idx, test.overwrite)) - } -} - func TestTLSSettings(t *testing.T) { - t.Run("ClientAuthentication", func(t *testing.T) { for name, tc := range map[string]struct { config map[string]interface{} @@ -476,8 +604,6 @@ func TestTLSSettings(t *testing.T) { }) t.Run("Enabled", func(t *testing.T) { - truthy := true - falsy := false for name, tc := range map[string]struct { tlsServerCfg *tlscommon.ServerConfig expected bool @@ -486,8 +612,8 @@ func TestTLSSettings(t *testing.T) { "SSL": {tlsServerCfg: &tlscommon.ServerConfig{Enabled: nil}, expected: true}, "WithCert": {tlsServerCfg: &tlscommon.ServerConfig{Certificate: tlscommon.CertificateConfig{Certificate: "Cert"}}, expected: true}, "WithCertAndKey": {tlsServerCfg: &tlscommon.ServerConfig{Certificate: tlscommon.CertificateConfig{Certificate: "Cert", Key: "key"}}, expected: true}, - "ConfiguredToFalse": {tlsServerCfg: &tlscommon.ServerConfig{Certificate: tlscommon.CertificateConfig{Certificate: "Cert", Key: "key"}, Enabled: &falsy}, expected: false}, - "ConfiguredToTrue": {tlsServerCfg: &tlscommon.ServerConfig{Enabled: &truthy}, expected: true}, + "ConfiguredToFalse": {tlsServerCfg: &tlscommon.ServerConfig{Certificate: tlscommon.CertificateConfig{Certificate: "Cert", Key: "key"}, Enabled: newBool(false)}, expected: false}, + "ConfiguredToTrue": {tlsServerCfg: &tlscommon.ServerConfig{Enabled: newBool(true)}, expected: true}, } { t.Run(name, func(t *testing.T) { b := tc.expected @@ -514,19 +640,33 @@ func TestAgentConfig(t *testing.T) { t.Run("Valid", func(t *testing.T) { cfg, err := NewConfig(common.MustNewConfigFrom(map[string]string{"agent.config.cache.expiration": "123000ms"}), nil) require.NoError(t, err) - assert.Equal(t, time.Second*123, cfg.AgentConfig.Cache.Expiration) + assert.Equal(t, time.Second*123, cfg.KibanaAgentConfig.Cache.Expiration) }) } +func TestAgentConfigs(t *testing.T) { + cfg, err := NewConfig(common.MustNewConfigFrom(`{"agent_config":[{"service.environment":"production","config":{"transaction_sample_rate":0.5}}]}`), nil) + require.NoError(t, err) + assert.NotNil(t, cfg) + assert.Len(t, cfg.AgentConfigs, 1) + assert.NotEmpty(t, cfg.AgentConfigs[0].Etag) +} + func TestNewConfig_ESConfig(t *testing.T) { - ucfg, err := common.NewConfigFrom(`{"rum.enabled":true,"api_key.enabled":true}`) + ucfg, err := common.NewConfigFrom(`{ + "rum.enabled":true, + "api_key.enabled":true, + "data_streams.enabled":true, + "sampling.tail.policies":[{"sample_rate": 0.5}], + }`) require.NoError(t, err) // no es config given cfg, err := NewConfig(ucfg, nil) require.NoError(t, err) assert.Equal(t, elasticsearch.DefaultConfig(), cfg.RumConfig.SourceMapping.ESConfig) - assert.Equal(t, elasticsearch.DefaultConfig(), cfg.APIKeyConfig.ESConfig) + assert.Equal(t, elasticsearch.DefaultConfig(), cfg.AgentAuth.APIKey.ESConfig) + assert.Equal(t, elasticsearch.DefaultConfig(), cfg.Sampling.Tail.ESConfig) // with es config outputESCfg := common.MustNewConfigFrom(`{"hosts":["192.0.0.168:9200"]}`) @@ -534,6 +674,12 @@ func TestNewConfig_ESConfig(t *testing.T) { require.NoError(t, err) assert.NotNil(t, cfg.RumConfig.SourceMapping.ESConfig) assert.Equal(t, []string{"192.0.0.168:9200"}, []string(cfg.RumConfig.SourceMapping.ESConfig.Hosts)) - assert.NotNil(t, cfg.APIKeyConfig.ESConfig) - assert.Equal(t, []string{"192.0.0.168:9200"}, []string(cfg.APIKeyConfig.ESConfig.Hosts)) + assert.NotNil(t, cfg.AgentAuth.APIKey.ESConfig) + assert.Equal(t, []string{"192.0.0.168:9200"}, []string(cfg.AgentAuth.APIKey.ESConfig.Hosts)) + assert.NotNil(t, cfg.Sampling.Tail.ESConfig) + assert.Equal(t, []string{"192.0.0.168:9200"}, []string(cfg.Sampling.Tail.ESConfig.Hosts)) +} + +func newBool(v bool) *bool { + return &v } diff --git a/beater/config/data_streams.go b/beater/config/data_streams.go new file mode 100644 index 00000000000..77121f769d6 --- /dev/null +++ b/beater/config/data_streams.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package config + +// DataStreamsConfig holds data streams configuration. +type DataStreamsConfig struct { + Enabled bool `config:"enabled"` + + // WaitForIntegration controls whether APM Server waits for the Fleet + // integration package to be installed before indexing events. + // + // This config is ignored when running under Elastic Agent; it is intended + // for running APM Server standalone, relying on Fleet to install the integration + // for creating Elasticsearch index templates, ILM policies, and ingest pipelines. + // + // This configuration requires either a connection to Kibana or Elasticsearch. + WaitForIntegration bool `config:"wait_for_integration"` +} + +func defaultDataStreamsConfig() DataStreamsConfig { + return DataStreamsConfig{ + Enabled: false, + WaitForIntegration: true, + } +} diff --git a/beater/config/data_streams_test.go b/beater/config/data_streams_test.go new file mode 100644 index 00000000000..ec2cca967ae --- /dev/null +++ b/beater/config/data_streams_test.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package config + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/libbeat/common" +) + +func TestDataStreamsPipeline(t *testing.T) { + cfg, err := NewConfig(common.MustNewConfigFrom(map[string]interface{}{"data_streams.enabled": true}), nil) + require.NoError(t, err) + assert.Equal(t, "", cfg.Pipeline) // enabling data streams disables use of the pipeline +} diff --git a/beater/config/expvar.go b/beater/config/expvar.go new file mode 100644 index 00000000000..107f162b9b2 --- /dev/null +++ b/beater/config/expvar.go @@ -0,0 +1,24 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package config + +// ExpvarConfig holds config information about exposing expvar +type ExpvarConfig struct { + Enabled bool `config:"enabled"` + URL string `config:"url"` +} diff --git a/beater/config/helper.go b/beater/config/helper.go index 31b047db4ce..2bad3c76b68 100644 --- a/beater/config/helper.go +++ b/beater/config/helper.go @@ -20,34 +20,8 @@ package config import ( "fmt" "net/url" - "reflect" - "strconv" - - "github.com/elastic/go-ucfg" ) -func init() { - if err := ucfg.RegisterValidator("maxlen", func(v interface{}, param string) error { - if v == nil { - return nil - } - switch v := reflect.ValueOf(v); v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice: - maxlen, err := strconv.ParseInt(param, 0, 64) - if err != nil { - return err - } - - if length := int64(v.Len()); length > maxlen { - return fmt.Errorf("requires length (%d) <= %v", length, param) - } - } - return nil - }); err != nil { - panic(err) - } -} - type urls []*url.URL func (u *urls) Unpack(c interface{}) error { diff --git a/beater/config/instrumentation.go b/beater/config/instrumentation.go index e36b32c8272..598ac1c2c84 100644 --- a/beater/config/instrumentation.go +++ b/beater/config/instrumentation.go @@ -20,7 +20,6 @@ package config import ( "time" - "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/go-ucfg" ) @@ -32,8 +31,8 @@ const ( // InstrumentationConfig holds config information about self instrumenting the APM Server type InstrumentationConfig struct { - Enabled *bool `config:"enabled"` - Environment *string `config:"environment"` + Enabled bool `config:"enabled"` + Environment string `config:"environment"` Hosts urls `config:"hosts"` //TODO(simi): add `validate:"nonzero"` again once https://github.com/elastic/go-ucfg/issues/147 is fixed Profiling ProfilingConfig `config:"profiling"` APIKey string `config:"api_key"` @@ -49,29 +48,10 @@ func (c *InstrumentationConfig) Validate() error { return nil } -// IsEnabled indicates whether self instrumentation is enabled -func (c *InstrumentationConfig) IsEnabled() bool { - // self instrumentation is disabled by default. - return c != nil && c.Enabled != nil && *c.Enabled -} - -func (c *InstrumentationConfig) setup(log *logp.Logger) error { - if !c.IsEnabled() { - return nil - } - if err := c.Profiling.CPU.setup(log); err != nil { - return err - } - if err := c.Profiling.Heap.setup(log); err != nil { - return err - } - return nil -} - // ProfilingConfig holds config information about self profiling the APM Server type ProfilingConfig struct { - CPU *CPUProfiling `config:"cpu"` - Heap *HeapProfiling `config:"heap"` + CPU CPUProfiling `config:"cpu"` + Heap HeapProfiling `config:"heap"` } // CPUProfiling holds config information about CPU profiling of the APM Server @@ -81,41 +61,22 @@ type CPUProfiling struct { Duration time.Duration `config:"duration" validate:"positive"` } -// IsEnabled indicates whether CPU profiling is enabled or not -func (p *CPUProfiling) IsEnabled() bool { - return p != nil && p.Enabled -} - -func (p *CPUProfiling) setup(log *logp.Logger) error { - if !p.IsEnabled() { - return nil - } - if p.Interval <= 0 { - p.Interval = defaultCPUProfilingInterval - } - if p.Duration <= 0 { - p.Duration = defaultCPUProfilingDuration - } - return nil -} - // HeapProfiling holds config information about heap profiling of the APM Server type HeapProfiling struct { Enabled bool `config:"enabled"` Interval time.Duration `config:"interval" validate:"positive"` } -// IsEnabled indicates whether heap profiling is enabled or not -func (p *HeapProfiling) IsEnabled() bool { - return p != nil && p.Enabled -} - -func (p *HeapProfiling) setup(log *logp.Logger) error { - if !p.IsEnabled() { - return nil +func defaultInstrumentationConfig() InstrumentationConfig { + return InstrumentationConfig{ + Profiling: ProfilingConfig{ + CPU: CPUProfiling{ + Interval: defaultCPUProfilingInterval, + Duration: defaultCPUProfilingDuration, + }, + Heap: HeapProfiling{ + Interval: defaultHeapProfilingInterval, + }, + }, } - if p.Interval <= 0 { - p.Interval = defaultHeapProfilingInterval - } - return nil } diff --git a/beater/config/instrumentation_test.go b/beater/config/instrumentation_test.go index bf5ef612c37..3f8e13a452f 100644 --- a/beater/config/instrumentation_test.go +++ b/beater/config/instrumentation_test.go @@ -40,7 +40,7 @@ func TestNonzeroHosts(t *testing.T) { t.Run("Valid", func(t *testing.T) { cfg, err := NewConfig(common.MustNewConfigFrom(map[string]interface{}{"instrumentation.enabled": true}), nil) require.NoError(t, err) - assert.True(t, *cfg.SelfInstrumentation.Enabled) + assert.True(t, cfg.SelfInstrumentation.Enabled) assert.Empty(t, cfg.SelfInstrumentation.Hosts) }) } diff --git a/beater/config/integration.go b/beater/config/integration.go new file mode 100644 index 00000000000..9c6c61d509f --- /dev/null +++ b/beater/config/integration.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package config + +import ( + "errors" + + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" +) + +func NewIntegrationConfig(rootConfig *common.Config) (*IntegrationConfig, error) { + config := &IntegrationConfig{ + DataStream: &DataStream{ + Namespace: "default", + }, + } + if err := rootConfig.Unpack(config); err != nil { + return nil, err + } + if config.APMServer == nil { + return nil, errors.New("'apm-server' not found in integration config") + } + return config, nil +} + +// IntegrationConfig that comes from Elastic Agent +type IntegrationConfig struct { + ID string `config:"id"` + Name string `config:"name"` + Revision int `config:"revision"` + Type string `config:"type"` + UseOutput string `config:"use_output"` + Meta *Meta `config:"meta"` + DataStream *DataStream `config:"data_stream"` + APMServer *common.Config `config:"apm-server"` + Fleet Fleet `config:"fleet"` +} + +type DataStream struct { + Namespace string `config:"namespace"` +} + +type Meta struct { + Package *Package `config:"package"` +} + +type Package struct { + Name string `config:"name"` + Version string `config:"version"` +} + +type Fleet struct { + Hosts []string `config:"hosts"` + Protocol string `config:"protocol"` + AccessAPIKey string `config:"access_api_key"` + TLS *tlscommon.Config `config:"ssl"` +} diff --git a/beater/config/integration_test.go b/beater/config/integration_test.go new file mode 100644 index 00000000000..5368caa964a --- /dev/null +++ b/beater/config/integration_test.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package config_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/beater/config" + "github.com/elastic/beats/v7/libbeat/common" +) + +func TestIntegrationConfigMissingAPMServer(t *testing.T) { + cfg, err := config.NewIntegrationConfig(common.NewConfig()) + assert.Error(t, err) + assert.Nil(t, cfg) + assert.EqualError(t, err, "'apm-server' not found in integration config") +} + +func TestIntegrationConfigValid(t *testing.T) { + cfg, err := config.NewIntegrationConfig(common.MustNewConfigFrom(map[string]interface{}{ + "apm-server": map[string]interface{}{}, + })) + require.NoError(t, err) + require.NotNil(t, cfg) + assert.NotNil(t, cfg.APMServer) +} diff --git a/beater/config/jaeger.go b/beater/config/jaeger.go index 3253ed52fcd..0157c89c0fd 100644 --- a/beater/config/jaeger.go +++ b/beater/config/jaeger.go @@ -57,7 +57,7 @@ func (c *JaegerConfig) setup(cfg *Config) error { if err != nil { return err } - c.GRPC.TLS = tlsServerConfig.BuildModuleConfig(c.GRPC.Host) + c.GRPC.TLS = tlsServerConfig.BuildServerConfig(c.GRPC.Host) } return nil } diff --git a/beater/config/java_attacher.go b/beater/config/java_attacher.go new file mode 100644 index 00000000000..abd03e5ac94 --- /dev/null +++ b/beater/config/java_attacher.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package config + +import "fmt" + +// JavaAttacherConfig holds configuration information for running a java +// attacher jarfile. +type JavaAttacherConfig struct { + Enabled bool `config:"enabled"` + DiscoveryRules []map[string]string `config:"discovery-rules"` + Config map[string]string `config:"config"` + JavaBin string + DownloadAgentVersion string `config:"download-agent-version"` +} + +func (j JavaAttacherConfig) setup() error { + if !j.Enabled { + return nil + } + for _, rule := range j.DiscoveryRules { + if len(rule) != 1 { + return fmt.Errorf("unexpected discovery rule format: %v", rule) + } + for flag := range rule { + if _, ok := allowlist[flag]; !ok { + return fmt.Errorf("unrecognized discovery rule: %s", rule) + } + } + } + return nil +} + +var allowlist = map[string]struct{}{ + "include-all": {}, + "include-main": {}, + "include-vmarg": {}, + "include-user": {}, + "exclude-main": {}, + "exclude-vmarg": {}, + "exclude-user": {}, +} + +func defaultJavaAttacherConfig() JavaAttacherConfig { + return JavaAttacherConfig{Enabled: false} +} diff --git a/beater/config/java_attacher_test.go b/beater/config/java_attacher_test.go new file mode 100644 index 00000000000..5bce278c4d1 --- /dev/null +++ b/beater/config/java_attacher_test.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package config + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestJavaAttacherConfig(t *testing.T) { + discoveryRules := []map[string]string{ + map[string]string{"include-main": "main.jar"}, + map[string]string{"include-vmarg": "elastic.apm.agent.attach=true"}, + map[string]string{"exclude-user": "root"}, + } + config := JavaAttacherConfig{ + Enabled: true, + DiscoveryRules: discoveryRules, + } + + assert.NoError(t, config.setup()) + + config.DiscoveryRules = append(discoveryRules, map[string]string{"include-pid": "1001"}) + assert.Error(t, config.setup()) +} diff --git a/beater/config/kibana.go b/beater/config/kibana.go new file mode 100644 index 00000000000..5724f518e22 --- /dev/null +++ b/beater/config/kibana.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package config + +import ( + "strings" + + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/kibana" +) + +type KibanaConfig struct { + Enabled bool `config:"enabled"` + APIKey string `config:"api_key"` + kibana.ClientConfig `config:",inline"` +} + +func (k *KibanaConfig) Unpack(cfg *common.Config) error { + type kibanaConfig KibanaConfig + if err := cfg.Unpack((*kibanaConfig)(k)); err != nil { + return err + } + k.Enabled = cfg.Enabled() + k.Host = strings.TrimRight(k.Host, "/") + return nil +} + +func defaultKibanaConfig() KibanaConfig { + return KibanaConfig{ + Enabled: false, + ClientConfig: kibana.DefaultClientConfig(), + } +} diff --git a/beater/config/mode.go b/beater/config/mode.go deleted file mode 100644 index ff2a2f58a90..00000000000 --- a/beater/config/mode.go +++ /dev/null @@ -1,42 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package config - -import "strings" - -//Mode enumerates the APM Server env -type Mode uint8 - -const ( - // ModeProduction is the default mode - ModeProduction Mode = iota - - // ModeExperimental should only be used in development environments. It allows to circumvent some restrictions - // on the Intake API for faster development. - ModeExperimental -) - -// Unpack parses the given string into a Mode value -func (m *Mode) Unpack(s string) error { - if strings.ToLower(s) == "experimental" { - *m = ModeExperimental - return nil - } - *m = ModeProduction - return nil -} diff --git a/beater/config/pprof.go b/beater/config/pprof.go new file mode 100644 index 00000000000..e4944cb5a32 --- /dev/null +++ b/beater/config/pprof.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package config + +// PprofConfig holds config information about exposing pprof +type PprofConfig struct { + Enabled bool `config:"enabled"` + BlockProfileRate int `config:"block_profile_rate"` + MemProfileRate int `config:"mem_profile_rate"` + MutexProfileRate int `config:"mutex_profile_rate"` +} diff --git a/beater/config/ratelimit.go b/beater/config/ratelimit.go new file mode 100644 index 00000000000..457022b5193 --- /dev/null +++ b/beater/config/ratelimit.go @@ -0,0 +1,31 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package config + +// RateLimit holds configuration related to IP and event rate limiting. +type RateLimit struct { + // EventLimit holds the event rate limit per IP, measured in + // events per second. + EventLimit int `config:"event_limit"` + + // IPLimit holds the maximum number of client IPs for which we + // will maintain a distinct event rate limit. Once this has been + // reached, clients will begin sharing rate limiters. This is + // done to avoid DDoS attacks. + IPLimit int `config:"ip_limit"` +} diff --git a/beater/config/register.go b/beater/config/register.go index 990406cae02..e9d3a7423b6 100644 --- a/beater/config/register.go +++ b/beater/config/register.go @@ -29,38 +29,30 @@ const ( // RegisterConfig holds ingest config information type RegisterConfig struct { - Ingest *IngestConfig `config:"ingest"` + Ingest IngestConfig `config:"ingest"` } // IngestConfig holds config pipeline ingest information type IngestConfig struct { - Pipeline *PipelineConfig `config:"pipeline"` + Pipeline PipelineConfig `config:"pipeline"` } // PipelineConfig holds config information about registering ingest pipelines type PipelineConfig struct { - Enabled *bool `config:"enabled"` - Overwrite *bool `config:"overwrite"` + Enabled bool `config:"enabled"` + Overwrite bool `config:"overwrite"` Path string } -// IsEnabled indicates whether pipeline registration is enabled or not -func (c *PipelineConfig) IsEnabled() bool { - return c != nil && (c.Enabled == nil || *c.Enabled) -} - -// ShouldOverwrite indicates whether existing pipelines should be overwritten during registration process -func (c *PipelineConfig) ShouldOverwrite() bool { - return c != nil && (c.Overwrite != nil && *c.Overwrite) -} - -func defaultRegisterConfig(pipelineEnabled bool) *RegisterConfig { - return &RegisterConfig{ - Ingest: &IngestConfig{ - Pipeline: &PipelineConfig{ - Enabled: &pipelineEnabled, - Path: paths.Resolve(paths.Home, - filepath.Join("ingest", "pipeline", "definition.json")), - }}, +func defaultRegisterConfig() RegisterConfig { + return RegisterConfig{ + Ingest: IngestConfig{ + Pipeline: PipelineConfig{ + Enabled: true, + Path: paths.Resolve( + paths.Home, filepath.Join("ingest", "pipeline", "definition.json"), + ), + }, + }, } } diff --git a/beater/config/rum.go b/beater/config/rum.go index 4b74f3fb5ee..571945aecf6 100644 --- a/beater/config/rum.go +++ b/beater/config/rum.go @@ -31,53 +31,37 @@ import ( const ( allowAllOrigins = "*" - defaultEventRateLimit = 300 - defaultEventRateLRUSize = 1000 defaultExcludeFromGrouping = "^/webpack" defaultLibraryPattern = "node_modules|bower_components|~" defaultSourcemapCacheExpiration = 5 * time.Minute defaultSourcemapIndexPattern = "apm-*-sourcemap*" + defaultSourcemapTimeout = 5 * time.Second ) // RumConfig holds config information related to the RUM endpoint type RumConfig struct { - Enabled *bool `config:"enabled"` - EventRate *EventRate `config:"event_rate"` + Enabled bool `config:"enabled"` AllowOrigins []string `config:"allow_origins"` AllowHeaders []string `config:"allow_headers"` ResponseHeaders map[string][]string `config:"response_headers"` LibraryPattern string `config:"library_pattern"` ExcludeFromGrouping string `config:"exclude_from_grouping"` - SourceMapping *SourceMapping `config:"source_mapping"` + SourceMapping SourceMapping `config:"source_mapping"` } -// EventRate holds config information about event rate limiting -type EventRate struct { - Limit int `config:"limit"` - LruSize int `config:"lru_size"` -} - -// SourceMapping holds sourecemap config information +// SourceMapping holds sourcemap config information type SourceMapping struct { - Cache *Cache `config:"cache"` - Enabled *bool `config:"enabled"` + Cache Cache `config:"cache"` + Enabled bool `config:"enabled"` IndexPattern string `config:"index_pattern"` ESConfig *elasticsearch.Config `config:"elasticsearch"` + Metadata []SourceMapMetadata `config:"metadata"` + Timeout time.Duration `config:"timeout" validate:"positive"` esConfigured bool } -// IsEnabled indicates whether RUM endpoint is enabled or not -func (c *RumConfig) IsEnabled() bool { - return c != nil && (c.Enabled != nil && *c.Enabled) -} - -// IsEnabled indicates whether sourcemap handling is enabled or not -func (s *SourceMapping) IsEnabled() bool { - return s == nil || s.Enabled == nil || *s.Enabled -} - -func (c *RumConfig) setup(log *logp.Logger, outputESCfg *common.Config) error { - if !c.IsEnabled() { +func (c *RumConfig) setup(log *logp.Logger, dataStreamsEnabled bool, outputESCfg *common.Config) error { + if !c.Enabled { return nil } @@ -88,7 +72,12 @@ func (c *RumConfig) setup(log *logp.Logger, outputESCfg *common.Config) error { return errors.Wrapf(err, "Invalid regex for `exclude_from_grouping`: ") } - if c.SourceMapping == nil || c.SourceMapping.esConfigured { + if c.SourceMapping.esConfigured && len(c.SourceMapping.Metadata) > 0 { + return errors.New("configuring both source_mapping.elasticsearch and sourcemapping.source_maps not allowed") + } + + // No need to unpack the ESConfig if SourceMapMetadata exist + if len(c.SourceMapping.Metadata) > 0 { return nil } @@ -105,35 +94,27 @@ func (c *RumConfig) setup(log *logp.Logger, outputESCfg *common.Config) error { } func (s *SourceMapping) Unpack(inp *common.Config) error { - // this type is needed to avoid a custom Unpack method - type tmpSourceMapping SourceMapping - - cfg := tmpSourceMapping(*defaultSourcemapping()) - if err := inp.Unpack(&cfg); err != nil { + type underlyingSourceMapping SourceMapping + if err := inp.Unpack((*underlyingSourceMapping)(s)); err != nil { return errors.Wrap(err, "error unpacking sourcemapping config") } - *s = SourceMapping(cfg) - - if inp.HasField("elasticsearch") { - s.esConfigured = true - } + s.esConfigured = inp.HasField("elasticsearch") return nil } -func defaultSourcemapping() *SourceMapping { - return &SourceMapping{ - Cache: &Cache{Expiration: defaultSourcemapCacheExpiration}, +func defaultSourcemapping() SourceMapping { + return SourceMapping{ + Enabled: true, + Cache: Cache{Expiration: defaultSourcemapCacheExpiration}, IndexPattern: defaultSourcemapIndexPattern, ESConfig: elasticsearch.DefaultConfig(), + Metadata: []SourceMapMetadata{}, + Timeout: defaultSourcemapTimeout, } } -func defaultRum() *RumConfig { - return &RumConfig{ - EventRate: &EventRate{ - Limit: defaultEventRateLimit, - LruSize: defaultEventRateLRUSize, - }, +func defaultRum() RumConfig { + return RumConfig{ AllowOrigins: []string{allowAllOrigins}, AllowHeaders: []string{}, SourceMapping: defaultSourcemapping(), diff --git a/beater/config/rum_test.go b/beater/config/rum_test.go index bb098108679..269bb971739 100644 --- a/beater/config/rum_test.go +++ b/beater/config/rum_test.go @@ -20,21 +20,29 @@ package config import ( "testing" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/elasticsearch" + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/stretchr/testify/assert" ) -func TestIsRumEnabled(t *testing.T) { - truthy := true - for _, td := range []struct { - c *Config - enabled bool - }{ - {c: &Config{RumConfig: &RumConfig{Enabled: new(bool)}}, enabled: false}, - {c: &Config{RumConfig: &RumConfig{Enabled: &truthy}}, enabled: true}, - } { - assert.Equal(t, td.enabled, td.c.RumConfig.IsEnabled()) - - } +func TestRumSetup(t *testing.T) { + rum := defaultRum() + rum.SourceMapping.esConfigured = true + rum.Enabled = true + rum.SourceMapping.ESConfig = &elasticsearch.Config{APIKey: "id:apikey"} + esCfg := common.MustNewConfigFrom(map[string]interface{}{ + "hosts": []interface{}{"cloud:9200"}, + }) + + err := rum.setup(logp.NewLogger("test"), true, esCfg) + + require.NoError(t, err) + assert.Equal(t, elasticsearch.Hosts{"cloud:9200"}, rum.SourceMapping.ESConfig.Hosts) + assert.Equal(t, "id:apikey", rum.SourceMapping.ESConfig.APIKey) } func TestDefaultRum(t *testing.T) { diff --git a/beater/config/sampling.go b/beater/config/sampling.go index c415705f0a3..f236de738af 100644 --- a/beater/config/sampling.go +++ b/beater/config/sampling.go @@ -17,17 +17,131 @@ package config +import ( + "time" + + "github.com/pkg/errors" + + "github.com/elastic/apm-server/elasticsearch" + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/logp" +) + // SamplingConfig holds configuration related to sampling. type SamplingConfig struct { // KeepUnsampled controls whether unsampled // transactions should be recorded. KeepUnsampled bool `config:"keep_unsampled"` + + // Tail holds tail-sampling configuration. + Tail TailSamplingConfig `config:"tail"` +} + +// TailSamplingConfig holds configuration related to tail-sampling. +type TailSamplingConfig struct { + Enabled bool `config:"enabled"` + + // Policies holds tail-sampling policies. + // + // Policies must include at least one policy that matches all traces, to ensure + // that dropping non-matching traces is intentional. + Policies []TailSamplingPolicy `config:"policies"` + + ESConfig *elasticsearch.Config `config:"elasticsearch"` + Interval time.Duration `config:"interval" validate:"min=1s"` + IngestRateDecayFactor float64 `config:"ingest_rate_decay" validate:"min=0, max=1"` + StorageDir string `config:"storage_dir"` + StorageGCInterval time.Duration `config:"storage_gc_interval" validate:"min=1s"` + TTL time.Duration `config:"ttl" validate:"min=1s"` + + esConfigured bool +} + +// TailSamplingPolicy holds a tail-sampling policy. +type TailSamplingPolicy struct { + // Service holds attributes of the service which this policy matches. + Service struct { + Name string `config:"name"` + Environment string `config:"environment"` + } `config:"service"` + + // Trace holds attributes of the trace which this policy matches. + Trace struct { + Name string `config:"name"` + Outcome string `config:"outcome"` + } `config:"trace"` + + // SampleRate holds the sample rate applied for this policy. + SampleRate float64 `config:"sample_rate" validate:"min=0, max=1"` +} + +func (c *TailSamplingConfig) Unpack(in *common.Config) error { + type tailSamplingConfig TailSamplingConfig + cfg := tailSamplingConfig(defaultTailSamplingConfig()) + if err := in.Unpack(&cfg); err != nil { + return errors.Wrap(err, "error unpacking tail sampling config") + } + cfg.Enabled = in.Enabled() + *c = TailSamplingConfig(cfg) + c.esConfigured = in.HasField("elasticsearch") + return errors.Wrap(c.Validate(), "invalid tail sampling config") +} + +func (c *TailSamplingConfig) Validate() error { + if !c.Enabled { + return nil + } + if len(c.Policies) == 0 { + return errors.New("no policies specified") + } + var anyDefaultPolicy bool + for _, policy := range c.Policies { + if policy == (TailSamplingPolicy{SampleRate: policy.SampleRate}) { + // We have at least one default policy. + anyDefaultPolicy = true + break + } + } + if !anyDefaultPolicy { + return errors.New("no default (empty criteria) policy specified") + } + return nil +} + +func (c *TailSamplingConfig) setup(log *logp.Logger, dataStreamsEnabled bool, outputESCfg *common.Config) error { + if !c.Enabled { + return nil + } + if !dataStreamsEnabled { + return errors.New("tail-sampling requires data streams to be enabled") + } + if !c.esConfigured && outputESCfg != nil { + log.Info("Falling back to elasticsearch output for tail-sampling") + if err := outputESCfg.Unpack(&c.ESConfig); err != nil { + return errors.Wrap(err, "error unpacking output.elasticsearch config for tail sampling") + } + } + return nil } func defaultSamplingConfig() SamplingConfig { + tail := defaultTailSamplingConfig() return SamplingConfig{ // In a future major release we will set this to // false, and then later remove the option. KeepUnsampled: true, + Tail: tail, + } +} + +func defaultTailSamplingConfig() TailSamplingConfig { + return TailSamplingConfig{ + Enabled: false, + ESConfig: elasticsearch.DefaultConfig(), + Interval: 1 * time.Minute, + IngestRateDecayFactor: 0.25, + StorageDir: "tail_sampling", + StorageGCInterval: 5 * time.Minute, + TTL: 30 * time.Minute, } } diff --git a/beater/config/sampling_test.go b/beater/config/sampling_test.go new file mode 100644 index 00000000000..0b22ae10c81 --- /dev/null +++ b/beater/config/sampling_test.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package config + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/v7/libbeat/common" +) + +func TestSamplingPoliciesValidation(t *testing.T) { + t.Run("MinimallyValid", func(t *testing.T) { + _, err := NewConfig(common.MustNewConfigFrom(map[string]interface{}{ + "data_streams.enabled": true, + "sampling.tail.policies": []map[string]interface{}{{ + "sample_rate": 0.5, + }}, + }), nil) + assert.NoError(t, err) + }) + t.Run("NoPolicies", func(t *testing.T) { + _, err := NewConfig(common.MustNewConfigFrom(map[string]interface{}{ + "data_streams.enabled": true, + "sampling.tail.enabled": true, + }), nil) + assert.EqualError(t, err, "Error processing configuration: invalid tail sampling config: no policies specified accessing 'sampling.tail'") + }) + t.Run("NoDefaultPolicies", func(t *testing.T) { + _, err := NewConfig(common.MustNewConfigFrom(map[string]interface{}{ + "data_streams.enabled": true, + "sampling.tail.policies": []map[string]interface{}{{ + "service.name": "foo", + "sample_rate": 0.5, + }}, + }), nil) + assert.EqualError(t, err, "Error processing configuration: invalid tail sampling config: no default (empty criteria) policy specified accessing 'sampling.tail'") + }) + t.Run("DataStreamsDisabled", func(t *testing.T) { + _, err := NewConfig(common.MustNewConfigFrom(map[string]interface{}{ + "sampling.tail.enabled": true, + "sampling.tail.policies": []map[string]interface{}{{ + "sample_rate": 0.5, + }}, + }), nil) + assert.EqualError(t, err, "tail-sampling requires data streams to be enabled") + }) +} diff --git a/beater/config/sourcemapping.go b/beater/config/sourcemapping.go new file mode 100644 index 00000000000..ab98cf44426 --- /dev/null +++ b/beater/config/sourcemapping.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package config + +// SourceMapMetadata holds source map configuration information. +type SourceMapMetadata struct { + ServiceName string `config:"service.name"` + ServiceVersion string `config:"service.version"` + BundleFilepath string `config:"bundle.filepath"` + SourceMapURL string `config:"sourcemap.url"` +} diff --git a/beater/http.go b/beater/http.go index 6ef8fcaa444..84f2abae133 100644 --- a/beater/http.go +++ b/beater/http.go @@ -19,45 +19,51 @@ package beater import ( "context" + "log" "net" "net/http" "net/url" + "strings" - "go.elastic.co/apm" - "go.elastic.co/apm/module/apmhttp" + "github.com/libp2p/go-reuseport" + "go.uber.org/zap" "golang.org/x/net/netutil" - "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" - "github.com/elastic/beats/v7/libbeat/logp" - "github.com/elastic/apm-server/beater/api" "github.com/elastic/apm-server/beater/config" "github.com/elastic/apm-server/publish" + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/gmux" ) type httpServer struct { *http.Server - cfg *config.Config - logger *logp.Logger - reporter publish.Reporter + cfg *config.Config + logger *logp.Logger + reporter publish.Reporter + grpcListener net.Listener + httpListener net.Listener } -func newHTTPServer(logger *logp.Logger, cfg *config.Config, tracer *apm.Tracer, reporter publish.Reporter) (*httpServer, error) { - mux, err := api.NewMux(cfg, reporter) - if err != nil { - return nil, err - } +func newHTTPServer( + logger *logp.Logger, + info beat.Info, + cfg *config.Config, + handler http.Handler, + reporter publish.Reporter, + listener net.Listener, +) (*httpServer, error) { server := &http.Server{ - Addr: cfg.Host, - Handler: apmhttp.Wrap(mux, - apmhttp.WithServerRequestIgnorer(doNotTrace), - apmhttp.WithTracer(tracer), - ), + Addr: cfg.Host, + Handler: handler, IdleTimeout: cfg.IdleTimeout, ReadTimeout: cfg.ReadTimeout, WriteTimeout: cfg.WriteTimeout, MaxHeaderBytes: cfg.MaxHeaderSize, + ErrorLog: newErrorLog(logger), } if cfg.TLS.IsEnabled() { @@ -65,25 +71,23 @@ func newHTTPServer(logger *logp.Logger, cfg *config.Config, tracer *apm.Tracer, if err != nil { return nil, err } - server.TLSConfig = tlsServerConfig.BuildModuleConfig("") + server.TLSConfig = tlsServerConfig.BuildServerConfig("") } - return &httpServer{server, cfg, logger, reporter}, nil -} -func (h *httpServer) start() error { - lis, err := h.listen() + // Configure the server with gmux. The returned net.Listener will receive + // gRPC connections, while all other requests will be handled by s.Handler. + // + // grpcListener is closed when the HTTP server is shutdown. + grpcListener, err := gmux.ConfigureServer(server, nil) if err != nil { - return err - } - addr := lis.Addr() - if addr.Network() == "tcp" { - h.logger.Infof("Listening on: %s", addr) - } else { - h.logger.Infof("Listening on: %s:%s", addr.Network(), addr.String()) + return nil, err } - switch h.cfg.RumConfig.IsEnabled() { - case true: + return &httpServer{server, cfg, logger, reporter, grpcListener, listener}, nil +} + +func (h *httpServer) start() error { + if h.cfg.RumConfig.Enabled { h.logger.Info("RUM endpoints enabled!") for _, s := range h.cfg.RumConfig.AllowOrigins { if s == "*" { @@ -91,28 +95,28 @@ func (h *httpServer) start() error { break } } - case false: + } else { h.logger.Info("RUM endpoints disabled.") } - if h.cfg.MaxConnections > 0 { - lis = netutil.LimitListener(lis, h.cfg.MaxConnections) - h.logger.Infof("Connection limit set to: %d", h.cfg.MaxConnections) + if !h.cfg.DataStreams.Enabled { + // Create the "onboarding" document, which contains the server's + // listening address. We only do this if data streams are not enabled, + // as onboarding documents are incompatible with data streams. + // Onboarding documents should be replaced by Fleet status later. + notifyListening(context.Background(), h.httpListener.Addr(), h.reporter) } - // Create the "onboarding" document, which contains the server's listening address. - notifyListening(context.Background(), addr, h.reporter) - - if h.TLSConfig != nil { + if h.cfg.TLS.IsEnabled() { h.logger.Info("SSL enabled.") - return h.ServeTLS(lis, "", "") + return h.ServeTLS(h.httpListener, "", "") } - if h.cfg.SecretToken != "" { + if h.cfg.AgentAuth.SecretToken != "" { h.logger.Warn("Secret token is set, but SSL is not enabled.") } h.logger.Info("SSL disabled.") - return h.Serve(lis) + return h.Serve(h.httpListener) } func (h *httpServer) stop() { @@ -126,23 +130,59 @@ func (h *httpServer) stop() { } // listen starts the listener for bt.config.Host. -func (h *httpServer) listen() (net.Listener, error) { - if url, err := url.Parse(h.cfg.Host); err == nil && url.Scheme == "unix" { - return net.Listen("unix", url.Path) +func listen(cfg *config.Config, logger *logp.Logger) (net.Listener, error) { + var listener net.Listener + url, err := url.Parse(cfg.Host) + if err == nil && url.Scheme == "unix" { + // SO_REUSEPORT does not support unix sockets + listener, err = net.Listen("unix", url.Path) + } else { + addr := cfg.Host + if _, _, err := net.SplitHostPort(addr); err != nil { + // Tack on a port if SplitHostPort fails on what should be a + // tcp network address. If splitting failed because there were + // already too many colons, one more won't change that. + addr = net.JoinHostPort(addr, config.DefaultPort) + } + listener, err = reuseport.Listen("tcp", addr) + } + if err != nil { + return nil, err } - const network = "tcp" - addr := h.cfg.Host - if _, _, err := net.SplitHostPort(addr); err != nil { - // Tack on a port if SplitHostPort fails on what should be a - // tcp network address. If splitting failed because there were - // already too many colons, one more won't change that. - addr = net.JoinHostPort(addr, config.DefaultPort) + addr := listener.Addr() + if network := addr.Network(); network == "tcp" { + logger.Infof("Listening on: %s", addr) + } else { + logger.Infof("Listening on: %s:%s", network, addr.String()) } - return net.Listen(network, addr) + if cfg.MaxConnections > 0 { + logger.Infof("Connection limit set to: %d", cfg.MaxConnections) + listener = netutil.LimitListener(listener, cfg.MaxConnections) + } + return listener, nil } func doNotTrace(req *http.Request) bool { // Don't trace root url (healthcheck) requests. return req.URL.Path == api.RootPath } + +// newErrorLog returns a standard library log.Logger that sends +// logs to logger with error level. +func newErrorLog(logger *logp.Logger) *log.Logger { + logger = logger.Named("http") + logger = logger.WithOptions(zap.AddCallerSkip(3)) + w := errorLogWriter{logger} + return log.New(w, "", 0) +} + +type errorLogWriter struct { + logger *logp.Logger +} + +func (w errorLogWriter) Write(p []byte) (int, error) { + message := strings.TrimSpace(string(p)) + w.logger.Error(message) + return len(p), nil +} diff --git a/beater/integration_test.go b/beater/integration_test.go index b6f73e9e32a..70ee77211a2 100644 --- a/beater/integration_test.go +++ b/beater/integration_test.go @@ -21,6 +21,7 @@ import ( "bytes" "fmt" "io" + "io/ioutil" "mime/multipart" "net/http" "net/textproto" @@ -37,7 +38,6 @@ import ( "github.com/elastic/apm-server/approvaltest" "github.com/elastic/apm-server/beater/api" "github.com/elastic/apm-server/beater/beatertest" - "github.com/elastic/apm-server/tests/loader" ) func collectEvents(events <-chan beat.Event, timeout time.Duration) []beat.Event { @@ -149,7 +149,7 @@ func TestPublishIntegration(t *testing.T) { require.NoError(t, err) defer apm.Stop() - b, err := loader.LoadDataAsBytes(filepath.Join("../testdata/intake-v2/", tc.payload)) + b, err := ioutil.ReadFile(filepath.Join("../testdata/intake-v2", tc.payload)) require.NoError(t, err) docs := testPublishIntake(t, apm, events, bytes.NewReader(b)) approvaltest.ApproveEventDocs(t, "test_approved_es_documents/TestPublishIntegration"+tc.name, docs) @@ -204,11 +204,11 @@ func TestPublishIntegrationProfile(t *testing.T) { var metadata io.Reader if tc.metadata != "" { - b, err := loader.LoadDataAsBytes(filepath.Join("../testdata/profile/", tc.metadata)) + b, err := ioutil.ReadFile(filepath.Join("../testdata/profile", tc.metadata)) require.NoError(t, err) metadata = bytes.NewReader(b) } - profileBytes, err := loader.LoadDataAsBytes(filepath.Join("../testdata/profile/", tc.profile)) + profileBytes, err := ioutil.ReadFile(filepath.Join("../testdata/profile", tc.profile)) require.NoError(t, err) docs := testPublishProfile(t, apm, events, metadata, bytes.NewReader(profileBytes)) diff --git a/beater/interceptors/auth.go b/beater/interceptors/auth.go new file mode 100644 index 00000000000..16baec27262 --- /dev/null +++ b/beater/interceptors/auth.go @@ -0,0 +1,111 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package interceptors + +import ( + "context" + "errors" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + "github.com/elastic/apm-server/beater/auth" + "github.com/elastic/apm-server/beater/headers" +) + +// Authenticator provides an interface for authenticating a client. +type Authenticator interface { + Authenticate(ctx context.Context, kind, token string) (auth.AuthenticationDetails, auth.Authorizer, error) +} + +// MethodAuthenticator is a function type for authenticating a gRPC method call. +// This is used to authenticate gRPC method calls by extracting authentication tokens +// from incoming context metadata or from the request payload. +type MethodAuthenticator func(ctx context.Context, req interface{}) (auth.AuthenticationDetails, auth.Authorizer, error) + +// Auth returns a grpc.UnaryServerInterceptor that ensures method calls are +// authenticated before passing on to the next handler. +// +// Authentication is performed using a MethodAuthenticator from the combined +// map parameters, keyed on the full gRPC method name (info.FullMethod). +// If there is no handler defined for the method, authentication fails. +func Auth(methodHandlers ...map[string]MethodAuthenticator) grpc.UnaryServerInterceptor { + combinedMethodHandlers := make(map[string]MethodAuthenticator) + for _, methodHandlers := range methodHandlers { + for method, handler := range methodHandlers { + combinedMethodHandlers[method] = handler + } + } + return func( + ctx context.Context, + req interface{}, + info *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, + ) (interface{}, error) { + authenticator, ok := combinedMethodHandlers[info.FullMethod] + if !ok { + return nil, status.Errorf(codes.Unauthenticated, "no auth method defined for %q", info.FullMethod) + } + details, authz, err := authenticator(ctx, req) + if err != nil { + if errors.Is(err, auth.ErrAuthFailed) { + return nil, status.Error(codes.Unauthenticated, err.Error()) + } + return nil, err + } + ctx = ContextWithAuthenticationDetails(ctx, details) + ctx = auth.ContextWithAuthorizer(ctx, authz) + resp, err := handler(ctx, req) + if errors.Is(err, auth.ErrUnauthorized) { + // Processors may indicate that a request is unauthorized by returning auth.ErrUnauthorized. + err = status.Error(codes.PermissionDenied, err.Error()) + } + return resp, err + } +} + +// MetadataMethodAuthenticator returns a MethodAuthenticator that extracts +// authentication parameters from the "authorization" metadata in ctx, +// calling authenticator.Authenticate. +func MetadataMethodAuthenticator(authenticator Authenticator) MethodAuthenticator { + return func(ctx context.Context, req interface{}) (auth.AuthenticationDetails, auth.Authorizer, error) { + var authHeader string + if md, ok := metadata.FromIncomingContext(ctx); ok { + if values := md.Get(headers.Authorization); len(values) > 0 { + authHeader = values[0] + } + } + kind, token := auth.ParseAuthorizationHeader(authHeader) + return authenticator.Authenticate(ctx, kind, token) + } +} + +type authenticationDetailsKey struct{} + +// ContextWithAuthenticationDetails returns a copy of ctx with details. +func ContextWithAuthenticationDetails(ctx context.Context, details auth.AuthenticationDetails) context.Context { + return context.WithValue(ctx, authenticationDetailsKey{}, details) +} + +// AuthenticationDetailsFromContext returns client metadata extracted by the ClientMetadata interceptor. +func AuthenticationDetailsFromContext(ctx context.Context) (auth.AuthenticationDetails, bool) { + details, ok := ctx.Value(authenticationDetailsKey{}).(auth.AuthenticationDetails) + return details, ok +} diff --git a/beater/interceptors/auth_test.go b/beater/interceptors/auth_test.go new file mode 100644 index 00000000000..0dbbcaa6e17 --- /dev/null +++ b/beater/interceptors/auth_test.go @@ -0,0 +1,150 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package interceptors_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + "github.com/elastic/apm-server/beater/auth" + "github.com/elastic/apm-server/beater/interceptors" +) + +func TestAuth(t *testing.T) { + type contextKey struct{} + origContext := context.WithValue(context.Background(), contextKey{}, 123) + origReq := "grpc_request" + origResp := "grpc_response" + origErr := errors.New("handler error") + + authenticated := authenticatorFunc(func(ctx context.Context, kind, token string) (auth.AuthenticationDetails, auth.Authorizer, error) { + assert.Equal(t, 123, ctx.Value(contextKey{})) + var authz authorizerFunc = func(context.Context, auth.Action, auth.Resource) error { return nil } + return auth.AuthenticationDetails{Method: auth.MethodSecretToken}, authz, nil + }) + authFailed := authenticatorFunc(func(ctx context.Context, kind, token string) (auth.AuthenticationDetails, auth.Authorizer, error) { + return auth.AuthenticationDetails{}, nil, auth.ErrAuthFailed + }) + authError := authenticatorFunc(func(ctx context.Context, kind, token string) (auth.AuthenticationDetails, auth.Authorizer, error) { + return auth.AuthenticationDetails{}, nil, errors.New("error occurred while authenticating") + }) + + makeMethodAuthenticator := func(authenticator interceptors.Authenticator) interceptors.MethodAuthenticator { + return func(ctx context.Context, req interface{}) (auth.AuthenticationDetails, auth.Authorizer, error) { + require.Equal(t, origReq, req) + return authenticator.Authenticate(ctx, "", "") + } + } + + interceptor := interceptors.Auth( + map[string]interceptors.MethodAuthenticator{ + "authenticated": makeMethodAuthenticator(authenticated), + }, + map[string]interceptors.MethodAuthenticator{ + "auth_failed": makeMethodAuthenticator(authFailed), + "auth_error": makeMethodAuthenticator(authError), + }, + ) + + type test struct { + method string + expectResp interface{} + expectErr error + } + for _, test := range []test{{ + method: "authenticated", + expectResp: origResp, + expectErr: origErr, + }, { + method: "auth_failed", + expectErr: status.Error(codes.Unauthenticated, auth.ErrAuthFailed.Error()), + }, { + method: "auth_error", + expectErr: errors.New("error occurred while authenticating"), + }} { + t.Run(test.method, func(t *testing.T) { + next := func(ctx context.Context, req interface{}) (interface{}, error) { + return origResp, origErr + } + resp, err := interceptor(origContext, origReq, &grpc.UnaryServerInfo{FullMethod: test.method}, next) + assert.Equal(t, test.expectErr, err) + assert.Equal(t, test.expectResp, resp) + }) + } +} + +func TestAuthUnauthorized(t *testing.T) { + var authorizer authorizerFunc = func(context.Context, auth.Action, auth.Resource) error { + return fmt.Errorf("%w: none shall pass", auth.ErrUnauthorized) + } + interceptor := interceptors.Auth( + map[string]interceptors.MethodAuthenticator{ + "method": interceptors.MethodAuthenticator( + func(ctx context.Context, req interface{}) (auth.AuthenticationDetails, auth.Authorizer, error) { + return auth.AuthenticationDetails{}, authorizer, nil + }, + ), + }, + ) + next := func(ctx context.Context, req interface{}) (interface{}, error) { + return nil, auth.Authorize(ctx, auth.ActionEventIngest, auth.Resource{}) + } + _, err := interceptor(context.Background(), nil, &grpc.UnaryServerInfo{FullMethod: "method"}, next) + assert.Equal(t, status.Error(codes.PermissionDenied, "unauthorized: none shall pass"), err) +} + +func TestMetadataMethodAuthenticator(t *testing.T) { + expectDetails := auth.AuthenticationDetails{ + Method: auth.MethodSecretToken, + } + var expectAuthz struct { + auth.Authorizer + } + var authenticator authenticatorFunc = func(ctx context.Context, kind, token string) (auth.AuthenticationDetails, auth.Authorizer, error) { + return expectDetails, expectAuthz, nil + } + methodAuthenticator := interceptors.MetadataMethodAuthenticator(authenticator) + + ctx := context.Background() + ctx = metadata.NewIncomingContext(ctx, metadata.Pairs("authorization", "Bearer abc123")) + details, authz, err := methodAuthenticator(ctx, nil) + assert.NoError(t, err) + assert.Equal(t, expectDetails, details) + assert.Exactly(t, expectAuthz, authz) +} + +type authenticatorFunc func(ctx context.Context, kind, token string) (auth.AuthenticationDetails, auth.Authorizer, error) + +func (f authenticatorFunc) Authenticate(ctx context.Context, kind, token string) (auth.AuthenticationDetails, auth.Authorizer, error) { + return f(ctx, kind, token) +} + +type authorizerFunc func(ctx context.Context, action auth.Action, resource auth.Resource) error + +func (f authorizerFunc) Authorize(ctx context.Context, action auth.Action, resource auth.Resource) error { + return f(ctx, action, resource) +} diff --git a/beater/interceptors/logging.go b/beater/interceptors/logging.go new file mode 100644 index 00000000000..6cca489028b --- /dev/null +++ b/beater/interceptors/logging.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package interceptors + +import ( + "context" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/status" + + "github.com/elastic/beats/v7/libbeat/logp" +) + +// Logging intercepts a gRPC request and provides logging processing. The +// returned function implements grpc.UnaryServerInterceptor. +// +// Logging should be added after ClientMetadata to include `source.address` +// in log records. +func Logging(logger *logp.Logger) grpc.UnaryServerInterceptor { + return func( + ctx context.Context, + req interface{}, + info *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, + ) (interface{}, error) { + // Shadow the logger param to ensure we don't update the + // closure variable, and interfere with logging of other + // requests. + logger := logger + + start := time.Now() + if metadata, ok := ClientMetadataFromContext(ctx); ok { + if metadata.SourceAddr != nil { + logger = logger.With("source.address", metadata.SourceAddr.String()) + } + } + + resp, err := handler(ctx, req) + res, _ := status.FromError(err) + logger = logger.With( + "grpc.request.method", info.FullMethod, + "event.duration", time.Since(start), + "grpc.response.status_code", res.Code(), + ) + + if err != nil { + logger.With("error.message", res.Message()).Error(logp.Error(err)) + } else { + logger.Info(res.Message()) + } + return resp, err + } +} diff --git a/beater/interceptors/logging_test.go b/beater/interceptors/logging_test.go new file mode 100644 index 00000000000..5be3a89149a --- /dev/null +++ b/beater/interceptors/logging_test.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package interceptors + +import ( + "context" + "net" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/logp/configure" +) + +func TestLogging(t *testing.T) { + methodName := "test_method_name" + info := &grpc.UnaryServerInfo{ + FullMethod: methodName, + } + ctx := ContextWithClientMetadata(context.Background(), ClientMetadataValues{ + SourceAddr: &net.TCPAddr{ + IP: net.ParseIP("1.2.3.4"), + Port: 4321, + }, + }) + + requiredKeys := []string{ + "source.address", + "grpc.request.method", + "event.duration", + "grpc.response.status_code", + } + + for _, tc := range []struct { + statusCode codes.Code + f func(ctx context.Context, req interface{}) (interface{}, error) + }{ + { + statusCode: codes.Internal, + f: func(ctx context.Context, req interface{}) (interface{}, error) { + return nil, status.New(codes.Internal, "internal server error").Err() + }, + }, + { + statusCode: codes.OK, + f: func(ctx context.Context, req interface{}) (interface{}, error) { + return nil, nil + }, + }, + } { + configure.Logging( + "APM Server test", + common.MustNewConfigFrom(`{"ecs":true}`), + ) + require.NoError(t, logp.DevelopmentSetup(logp.ToObserverOutput())) + logger := logp.NewLogger("interceptor.logging.test") + + i := Logging(logger) + _, err := i(ctx, nil, info, tc.f) + entries := logp.ObserverLogs().TakeAll() + assert.Len(t, entries, 1) + entry := entries[0] + + fields := entry.ContextMap() + if tc.statusCode != codes.OK { + assert.Error(t, err) + assert.Equal(t, zapcore.ErrorLevel, entry.Entry.Level) + assert.Equal(t, "internal server error", fields["error.message"]) + } else { + assert.NoError(t, err) + assert.Equal(t, zapcore.InfoLevel, entry.Entry.Level) + assert.NotContains(t, fields, "error.message") + } + for _, k := range requiredKeys { + assert.Contains(t, fields, k) + } + assert.Equal(t, methodName, fields["grpc.request.method"]) + assert.Equal(t, "1.2.3.4:4321", fields["source.address"]) + assert.Equal(t, tc.statusCode.String(), fields["grpc.response.status_code"]) + } +} diff --git a/beater/interceptors/metadata.go b/beater/interceptors/metadata.go new file mode 100644 index 00000000000..024793219d8 --- /dev/null +++ b/beater/interceptors/metadata.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package interceptors + +import ( + "context" + "net" + "net/http" + + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + + "github.com/elastic/apm-server/utility" +) + +// ClientMetadata returns an interceptor that intercepts unary gRPC requests, +// extracts metadata relating to the gRPC client, and adds it to the context. +// +// Metadata can be extracted from context using ClientMetadataFromContext. +func ClientMetadata() grpc.UnaryServerInterceptor { + return func( + ctx context.Context, + req interface{}, + info *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, + ) (interface{}, error) { + var values ClientMetadataValues + if p, ok := peer.FromContext(ctx); ok { + values.SourceAddr = p.Addr + if addr, ok := p.Addr.(*net.TCPAddr); ok { + values.ClientIP = addr.IP + } + } + if md, ok := metadata.FromIncomingContext(ctx); ok { + if ua := md["user-agent"]; len(ua) > 0 { + values.UserAgent = ua[0] + } + // Account for `forwarded`, `x-real-ip`, `x-forwarded-for` headers + if ip := utility.ExtractIPFromHeader(http.Header(md)); ip != nil { + values.ClientIP = ip + } + } + ctx = context.WithValue(ctx, clientMetadataKey{}, values) + return handler(ctx, req) + } +} + +type clientMetadataKey struct{} + +// ContextWithClientMetadata returns a copy of ctx with values. +func ContextWithClientMetadata(ctx context.Context, values ClientMetadataValues) context.Context { + return context.WithValue(ctx, clientMetadataKey{}, values) +} + +// ClientMetadataFromContext returns client metadata extracted by the ClientMetadata interceptor. +func ClientMetadataFromContext(ctx context.Context) (ClientMetadataValues, bool) { + values, ok := ctx.Value(clientMetadataKey{}).(ClientMetadataValues) + return values, ok +} + +// ClientMetadataValues holds metadata relating to the gRPC client that initiated the request being handled. +type ClientMetadataValues struct { + // SourceAddr holds the address of the (source) network peer, if known. + SourceAddr net.Addr + + // ClientIP holds the IP address of the originating gRPC client, if known, + // as recorded in Forwarded, X-Forwarded-For, etc. + // + // For requests without one of the forwarded headers, this will have the + // same value as the IP in SourceAddr. + ClientIP net.IP + + // UserAgent holds the User-Agent for the gRPC client, if known. + UserAgent string +} diff --git a/beater/interceptors/metadata_test.go b/beater/interceptors/metadata_test.go new file mode 100644 index 00000000000..3135597afb3 --- /dev/null +++ b/beater/interceptors/metadata_test.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package interceptors + +import ( + "context" + "net" + "testing" + + "github.com/stretchr/testify/assert" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" +) + +func TestClientMetadata(t *testing.T) { + tcpAddr := &net.TCPAddr{ + IP: net.ParseIP("1.2.3.4"), + Port: 56837, + } + udpAddr := &net.UDPAddr{ + IP: net.ParseIP("1.1.1.1"), + Port: 1111, + } + + interceptor := ClientMetadata() + + for _, test := range []struct { + peer *peer.Peer + metadata metadata.MD + expected ClientMetadataValues + }{{ + expected: ClientMetadataValues{}, + }, { + peer: &peer.Peer{Addr: udpAddr}, + expected: ClientMetadataValues{ + SourceAddr: udpAddr, + }, + }, { + peer: &peer.Peer{Addr: tcpAddr}, + expected: ClientMetadataValues{ + SourceAddr: tcpAddr, + ClientIP: tcpAddr.IP, + }, + }, { + peer: &peer.Peer{Addr: tcpAddr}, + metadata: metadata.Pairs("X-Real-Ip", "5.6.7.8"), + expected: ClientMetadataValues{ + SourceAddr: tcpAddr, + ClientIP: net.ParseIP("5.6.7.8"), + }, + }, { + metadata: metadata.Pairs("User-Agent", "User-Agent"), + expected: ClientMetadataValues{UserAgent: "User-Agent"}, + }} { + ctx := context.Background() + if test.peer != nil { + ctx = peer.NewContext(ctx, test.peer) + } + if test.metadata != nil { + ctx = metadata.NewIncomingContext(ctx, test.metadata) + } + var got ClientMetadataValues + var ok bool + interceptor(ctx, nil, nil, func(ctx context.Context, req interface{}) (interface{}, error) { + got, ok = ClientMetadataFromContext(ctx) + return nil, nil + }) + assert.True(t, ok) + assert.Equal(t, test.expected, got) + } +} diff --git a/beater/interceptors/metrics.go b/beater/interceptors/metrics.go new file mode 100644 index 00000000000..fb84fe841c7 --- /dev/null +++ b/beater/interceptors/metrics.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package interceptors + +import ( + "context" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/elastic/apm-server/beater/request" + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/monitoring" +) + +// Metrics returns a grpc.UnaryServerInterceptor that increments metrics +// for gRPC method calls. The full gRPC method name will be used to look +// up a monitoring map in any of the given maps; the last one wins. +func Metrics( + logger *logp.Logger, + methodMetrics ...map[string]map[request.ResultID]*monitoring.Int, +) grpc.UnaryServerInterceptor { + + allMethodMetrics := make(map[string]map[request.ResultID]*monitoring.Int) + for _, methodMetrics := range methodMetrics { + for method, metrics := range methodMetrics { + allMethodMetrics[method] = metrics + } + } + + return func( + ctx context.Context, + req interface{}, + info *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, + ) (interface{}, error) { + m, ok := allMethodMetrics[info.FullMethod] + if !ok { + logger.With( + "grpc.request.method", info.FullMethod, + ).Error("metrics registry missing") + return handler(ctx, req) + } + + m[request.IDRequestCount].Inc() + defer m[request.IDResponseCount].Inc() + + resp, err := handler(ctx, req) + + responseID := request.IDResponseValidCount + if err != nil { + responseID = request.IDResponseErrorsCount + if s, ok := status.FromError(err); ok { + switch s.Code() { + case codes.Unauthenticated: + m[request.IDResponseErrorsUnauthorized].Inc() + case codes.DeadlineExceeded: + m[request.IDResponseErrorsTimeout].Inc() + case codes.ResourceExhausted: + m[request.IDResponseErrorsRateLimit].Inc() + } + } + } + + m[responseID].Inc() + + return resp, err + } +} diff --git a/beater/interceptors/metrics_test.go b/beater/interceptors/metrics_test.go new file mode 100644 index 00000000000..33414013a42 --- /dev/null +++ b/beater/interceptors/metrics_test.go @@ -0,0 +1,154 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package interceptors + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/elastic/apm-server/beater/beatertest" + "github.com/elastic/apm-server/beater/request" + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/monitoring" +) + +var monitoringKeys = append( + request.DefaultResultIDs, + request.IDResponseErrorsRateLimit, + request.IDResponseErrorsTimeout, + request.IDResponseErrorsUnauthorized, +) + +func TestMetrics(t *testing.T) { + registry := monitoring.NewRegistry() + + monitoringMap := request.MonitoringMapForRegistry(registry, monitoringKeys) + methodName := "test_method_name" + logger := logp.NewLogger("interceptor.metrics.test") + + testMap := map[string]map[request.ResultID]*monitoring.Int{ + methodName: monitoringMap, + } + i := Metrics(logger, testMap) + + ctx := context.Background() + info := &grpc.UnaryServerInfo{ + FullMethod: methodName, + } + + for _, tc := range []struct { + f func(ctx context.Context, req interface{}) (interface{}, error) + monitoringInt map[request.ResultID]int64 + }{ + { + f: func(ctx context.Context, req interface{}) (interface{}, error) { + return nil, errors.New("error") + }, + monitoringInt: map[request.ResultID]int64{ + request.IDRequestCount: 1, + request.IDResponseCount: 1, + request.IDResponseValidCount: 0, + request.IDResponseErrorsCount: 1, + request.IDResponseErrorsInternal: 1, + request.IDResponseErrorsRateLimit: 0, + request.IDResponseErrorsTimeout: 0, + request.IDResponseErrorsUnauthorized: 0, + }, + }, + { + f: func(ctx context.Context, req interface{}) (interface{}, error) { + return nil, status.Error(codes.Unauthenticated, "error") + }, + monitoringInt: map[request.ResultID]int64{ + request.IDRequestCount: 1, + request.IDResponseCount: 1, + request.IDResponseValidCount: 0, + request.IDResponseErrorsCount: 1, + request.IDResponseErrorsInternal: 0, + request.IDResponseErrorsRateLimit: 0, + request.IDResponseErrorsTimeout: 0, + request.IDResponseErrorsUnauthorized: 1, + }, + }, + { + f: func(ctx context.Context, req interface{}) (interface{}, error) { + return nil, status.Error(codes.DeadlineExceeded, "request timed out") + }, + monitoringInt: map[request.ResultID]int64{ + request.IDRequestCount: 1, + request.IDResponseCount: 1, + request.IDResponseValidCount: 0, + request.IDResponseErrorsCount: 1, + request.IDResponseErrorsInternal: 0, + request.IDResponseErrorsRateLimit: 0, + request.IDResponseErrorsTimeout: 1, + request.IDResponseErrorsUnauthorized: 0, + }, + }, + { + f: func(ctx context.Context, req interface{}) (interface{}, error) { + return nil, status.Error(codes.ResourceExhausted, "rate limit exceeded") + }, + monitoringInt: map[request.ResultID]int64{ + request.IDRequestCount: 1, + request.IDResponseCount: 1, + request.IDResponseValidCount: 0, + request.IDResponseErrorsCount: 1, + request.IDResponseErrorsInternal: 0, + request.IDResponseErrorsRateLimit: 1, + request.IDResponseErrorsTimeout: 0, + request.IDResponseErrorsUnauthorized: 0, + }, + }, + { + f: func(ctx context.Context, req interface{}) (interface{}, error) { + return nil, nil + }, + monitoringInt: map[request.ResultID]int64{ + request.IDRequestCount: 1, + request.IDResponseCount: 1, + request.IDResponseValidCount: 1, + request.IDResponseErrorsCount: 0, + request.IDResponseErrorsInternal: 0, + request.IDResponseErrorsRateLimit: 0, + request.IDResponseErrorsTimeout: 0, + request.IDResponseErrorsUnauthorized: 0, + }, + }, + } { + i(ctx, nil, info, tc.f) + assertMonitoring(t, tc.monitoringInt, monitoringMap) + beatertest.ClearRegistry(monitoringMap) + } +} + +func assertMonitoring(t *testing.T, expected map[request.ResultID]int64, actual map[request.ResultID]*monitoring.Int) { + for _, k := range monitoringKeys { + if val, ok := expected[k]; ok { + assert.Equalf(t, val, actual[k].Get(), "%s mismatch", k) + } else { + assert.Zerof(t, actual[k].Get(), "%s mismatch", k) + } + } +} diff --git a/beater/interceptors/ratelimit.go b/beater/interceptors/ratelimit.go new file mode 100644 index 00000000000..58a6620ae1c --- /dev/null +++ b/beater/interceptors/ratelimit.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package interceptors + +import ( + "context" + "errors" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/elastic/apm-server/beater/ratelimit" +) + +// AnonymousRateLimit returns a grpc.UnaryServerInterceptor that adds a rate limiter +// to the context of anonymous requests. RateLimit must be wrapped by the ClientMetadata +// and Authorization interceptor, as it requires the client's IP address and authorization. +func AnonymousRateLimit(store *ratelimit.Store) grpc.UnaryServerInterceptor { + return func( + ctx context.Context, + req interface{}, + info *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, + ) (interface{}, error) { + details, ok := AuthenticationDetailsFromContext(ctx) + if !ok { + return nil, errors.New("authentication details not found in context") + } + if details.Method == "" { + clientMetadata, ok := ClientMetadataFromContext(ctx) + if !ok { + return nil, errors.New("client metadata not found in context") + } + limiter := store.ForIP(clientMetadata.ClientIP) + if !limiter.Allow() { + return nil, status.Error( + codes.ResourceExhausted, + ratelimit.ErrRateLimitExceeded.Error(), + ) + } + ctx = ratelimit.ContextWithLimiter(ctx, limiter) + } + result, err := handler(ctx, req) + if errors.Is(err, ratelimit.ErrRateLimitExceeded) { + err = status.Error(codes.ResourceExhausted, err.Error()) + } + return result, err + } +} diff --git a/beater/interceptors/ratelimit_test.go b/beater/interceptors/ratelimit_test.go new file mode 100644 index 00000000000..9e184371f2c --- /dev/null +++ b/beater/interceptors/ratelimit_test.go @@ -0,0 +1,116 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package interceptors_test + +import ( + "context" + "net" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/elastic/apm-server/beater/auth" + "github.com/elastic/apm-server/beater/interceptors" + "github.com/elastic/apm-server/beater/ratelimit" +) + +func TestAnonymousRateLimit(t *testing.T) { + type test struct { + burst int + anonymous bool + + expectErr error + expectAllow bool + } + for _, test := range []test{{ + burst: 0, + anonymous: false, + expectErr: nil, + }, { + burst: 0, + anonymous: true, + expectErr: status.Error(codes.ResourceExhausted, "rate limit exceeded"), + }, { + burst: 1, + anonymous: true, + expectErr: nil, + expectAllow: false, + }, { + burst: 2, + anonymous: true, + expectErr: nil, + expectAllow: true, + }} { + store, _ := ratelimit.NewStore(1, 1, test.burst) + interceptor := interceptors.AnonymousRateLimit(store) + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + limiter, ok := ratelimit.FromContext(ctx) + if test.anonymous { + require.True(t, ok) + assert.Equal(t, test.expectAllow, limiter.Allow()) + } else { + require.False(t, ok) + } + return "response", nil + } + + ctx := interceptors.ContextWithClientMetadata(context.Background(), + interceptors.ClientMetadataValues{ + ClientIP: net.ParseIP("10.2.3.4"), + }, + ) + details := auth.AuthenticationDetails{} + if !test.anonymous { + details.Method = "none" + } + ctx = interceptors.ContextWithAuthenticationDetails(ctx, details) + resp, err := interceptor(ctx, "request", &grpc.UnaryServerInfo{}, handler) + if test.expectErr != nil { + assert.Nil(t, resp) + assert.Equal(t, test.expectErr, err) + } else { + assert.NoError(t, err) + assert.Equal(t, "response", resp) + } + } +} + +func TestAnonymousRateLimitForIP(t *testing.T) { + store, _ := ratelimit.NewStore(2, 1, 1) + interceptor := interceptors.AnonymousRateLimit(store) + handler := func(ctx context.Context, req interface{}) (interface{}, error) { return nil, nil } + + requestWithIP := func(ip string) error { + ctx := interceptors.ContextWithClientMetadata(context.Background(), + interceptors.ClientMetadataValues{ClientIP: net.ParseIP(ip)}, + ) + ctx = interceptors.ContextWithAuthenticationDetails(ctx, auth.AuthenticationDetails{}) + _, err := interceptor(ctx, "request", &grpc.UnaryServerInfo{}, handler) + return err + } + assert.NoError(t, requestWithIP("10.1.1.1")) + assert.Equal(t, status.Error(codes.ResourceExhausted, "rate limit exceeded"), requestWithIP("10.1.1.1")) + assert.NoError(t, requestWithIP("10.1.1.2")) + + // ratelimit.Store size is 2: the 3rd IP reuses an existing (depleted) rate limiter. + assert.Equal(t, status.Error(codes.ResourceExhausted, "rate limit exceeded"), requestWithIP("10.1.1.3")) +} diff --git a/beater/interceptors/timeout.go b/beater/interceptors/timeout.go new file mode 100644 index 00000000000..be5f9ef8508 --- /dev/null +++ b/beater/interceptors/timeout.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package interceptors + +import ( + "context" + "errors" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// Timeout returns a grpc.UnaryServerInterceptor that intercepts +// context.Canceled and context.DeadlineExceeded errors, and +// updates the response to indicate that the request timed out. +// This could be caused by either a client timeout or server timeout. +func Timeout() grpc.UnaryServerInterceptor { + return func( + ctx context.Context, + req interface{}, + info *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, + ) (interface{}, error) { + resp, err := handler(ctx, req) + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + if s, ok := status.FromError(err); !ok || s.Code() == codes.OK { + err = status.Error(codes.DeadlineExceeded, "request timed out") + } + } + return resp, err + } +} diff --git a/beater/interceptors/timeout_test.go b/beater/interceptors/timeout_test.go new file mode 100644 index 00000000000..03312f234b3 --- /dev/null +++ b/beater/interceptors/timeout_test.go @@ -0,0 +1,72 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package interceptors_test + +import ( + "context" + "fmt" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/elastic/apm-server/beater/interceptors" +) + +func TestTimeout(t *testing.T) { + for _, tc := range []struct { + err error + timeout bool + }{{ + err: nil, + }, { + err: errors.New("not timeout"), + }, { + err: context.Canceled, + timeout: true, + }, { + err: context.DeadlineExceeded, + timeout: true, + }, { + err: fmt.Errorf("wrapped: %w", context.Canceled), + timeout: true, + }, { + err: errors.Wrap(context.DeadlineExceeded, "also wrapped"), + timeout: true, + }} { + interceptor := interceptors.Timeout() + resp, err := interceptor(context.Background(), "request_arg", &grpc.UnaryServerInfo{}, + func(context.Context, interface{}) (interface{}, error) { return 123, tc.err }, + ) + if tc.err == nil { + assert.NoError(t, err) + } else if !tc.timeout { + assert.Equal(t, tc.err, err) + } else { + s, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, codes.DeadlineExceeded, s.Code()) + assert.Equal(t, "request timed out", s.Message()) + } + assert.Equal(t, 123, resp) // always returned unchanged by interceptor + } +} diff --git a/beater/jaeger/common.go b/beater/jaeger/common.go index 66655a68dcd..6f4d3aef7ba 100644 --- a/beater/jaeger/common.go +++ b/beater/jaeger/common.go @@ -21,29 +21,14 @@ import ( "context" "github.com/jaegertracing/jaeger/model" - "github.com/open-telemetry/opentelemetry-collector/consumer" - trjaeger "github.com/open-telemetry/opentelemetry-collector/translator/trace/jaeger" - "github.com/pkg/errors" + jaegertranslator "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger" + "go.opentelemetry.io/collector/consumer" "github.com/elastic/beats/v7/libbeat/monitoring" - "github.com/elastic/apm-server/beater/authorization" "github.com/elastic/apm-server/beater/request" ) -const ( - collectorType = "jaeger" -) - -var ( - monitoringKeys = []request.ResultID{ - request.IDRequestCount, request.IDResponseCount, request.IDResponseErrorsCount, - request.IDResponseValidCount, request.IDEventReceivedCount, request.IDEventDroppedCount, - } - - errNotAuthorized = errors.New("not authorized") -) - type monitoringMap map[request.ResultID]*monitoring.Int func (m monitoringMap) inc(id request.ResultID) { @@ -61,46 +46,11 @@ func (m monitoringMap) add(id request.ResultID, n int64) { func consumeBatch( ctx context.Context, batch model.Batch, - consumer consumer.TraceConsumer, + consumer consumer.Traces, requestMetrics monitoringMap, ) error { spanCount := int64(len(batch.Spans)) requestMetrics.add(request.IDEventReceivedCount, spanCount) - traceData, err := trjaeger.ProtoBatchToOCProto(batch) - if err != nil { - requestMetrics.add(request.IDEventDroppedCount, spanCount) - return err - } - traceData.SourceFormat = collectorType - return consumer.ConsumeTraceData(ctx, traceData) -} - -type authFunc func(context.Context, model.Batch) error - -func noAuth(context.Context, model.Batch) error { - return nil -} - -func makeAuthFunc(authTag string, authHandler *authorization.Handler) authFunc { - return func(ctx context.Context, batch model.Batch) error { - var kind, token string - for i, kv := range batch.Process.GetTags() { - if kv.Key != authTag { - continue - } - // Remove the auth tag. - batch.Process.Tags = append(batch.Process.Tags[:i], batch.Process.Tags[i+1:]...) - kind, token = authorization.ParseAuthorizationHeader(kv.VStr) - break - } - auth := authHandler.AuthorizationFor(kind, token) - authorized, err := auth.AuthorizedFor(ctx, authorization.ResourceInternal) - if !authorized { - if err != nil { - return errors.Wrap(err, errNotAuthorized.Error()) - } - return errNotAuthorized - } - return nil - } + traces := jaegertranslator.ProtoBatchToInternalTraces(batch) + return consumer.ConsumeTraces(ctx, traces) } diff --git a/beater/jaeger/grpc.go b/beater/jaeger/grpc.go index 4710d14ac7b..1ed8d281dda 100644 --- a/beater/jaeger/grpc.go +++ b/beater/jaeger/grpc.go @@ -22,32 +22,57 @@ import ( "errors" "fmt" "strconv" + "strings" "github.com/jaegertracing/jaeger/model" "github.com/jaegertracing/jaeger/proto-gen/api_v2" - "github.com/open-telemetry/opentelemetry-collector/consumer" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" + "go.opentelemetry.io/collector/consumer" "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/beats/v7/libbeat/monitoring" "github.com/elastic/apm-server/agentcfg" + "github.com/elastic/apm-server/beater/auth" + "github.com/elastic/apm-server/beater/config" + "github.com/elastic/apm-server/beater/interceptors" "github.com/elastic/apm-server/beater/request" - "github.com/elastic/apm-server/kibana" "github.com/elastic/apm-server/processor/otel" ) var ( gRPCCollectorRegistry = monitoring.Default.NewRegistry("apm-server.jaeger.grpc.collect") - gRPCCollectorMonitoringMap monitoringMap = request.MonitoringMapForRegistry(gRPCCollectorRegistry, monitoringKeys) + gRPCCollectorMonitoringMap monitoringMap = request.MonitoringMapForRegistry( + gRPCCollectorRegistry, append(request.DefaultResultIDs, + request.IDResponseErrorsRateLimit, + request.IDResponseErrorsTimeout, + request.IDResponseErrorsUnauthorized, + ), + ) + + // RegistryMonitoringMaps provides mappings from the fully qualified gRPC + // method name to its respective monitoring map. + RegistryMonitoringMaps = map[string]map[request.ResultID]*monitoring.Int{ + postSpansFullMethod: gRPCCollectorMonitoringMap, + getSamplingStrategyFullMethod: gRPCSamplingMonitoringMap, + } +) + +const ( + postSpansFullMethod = "/jaeger.api_v2.CollectorService/PostSpans" + getSamplingStrategyFullMethod = "/jaeger.api_v2.SamplingManager/GetSamplingStrategy" ) +// MethodAuthenticators returns a map of all supported Jaeger/gRPC methods to authorization handlers. +func MethodAuthenticators(authenticator *auth.Authenticator, authTag string) map[string]interceptors.MethodAuthenticator { + return map[string]interceptors.MethodAuthenticator{ + postSpansFullMethod: postSpansMethodAuthenticator(authenticator, authTag), + getSamplingStrategyFullMethod: getSamplingStrategyMethodAuthenticator(authenticator), + } +} + // grpcCollector implements Jaeger api_v2 protocol for receiving tracing data type grpcCollector struct { - log *logp.Logger - auth authFunc - consumer consumer.TraceConsumer + consumer consumer.Traces } // PostSpans implements the api_v2/collector.proto. It converts spans received via Jaeger Proto batch to open-telemetry @@ -55,23 +80,13 @@ type grpcCollector struct { // The implementation of the protobuf contract is based on the open-telemetry implementation at // https://github.com/open-telemetry/opentelemetry-collector/tree/master/receiver/jaegerreceiver func (c *grpcCollector) PostSpans(ctx context.Context, r *api_v2.PostSpansRequest) (*api_v2.PostSpansResponse, error) { - gRPCCollectorMonitoringMap.inc(request.IDRequestCount) - defer gRPCCollectorMonitoringMap.inc(request.IDResponseCount) - if err := c.postSpans(ctx, r.Batch); err != nil { - gRPCCollectorMonitoringMap.inc(request.IDResponseErrorsCount) - c.log.With(logp.Error(err)).Error("error gRPC PostSpans") return nil, err } - gRPCCollectorMonitoringMap.inc(request.IDResponseValidCount) return &api_v2.PostSpansResponse{}, nil } func (c *grpcCollector) postSpans(ctx context.Context, batch model.Batch) error { - if err := c.auth(ctx, batch); err != nil { - gRPCCollectorMonitoringMap.inc(request.IDResponseErrorsUnauthorized) - return status.Error(codes.Unauthenticated, err.Error()) - } return consumeBatch(ctx, batch, c.consumer, gRPCCollectorMonitoringMap) } @@ -83,9 +98,8 @@ var ( ) type grpcSampler struct { - log *logp.Logger - client kibana.Client - fetcher *agentcfg.Fetcher + logger *logp.Logger + fetcher agentcfg.Fetcher } // GetSamplingStrategy implements the api_v2/sampling.proto. @@ -95,30 +109,42 @@ func (s *grpcSampler) GetSamplingStrategy( ctx context.Context, params *api_v2.SamplingStrategyParameters) (*api_v2.SamplingStrategyResponse, error) { - gRPCSamplingMonitoringMap.inc(request.IDRequestCount) - defer gRPCSamplingMonitoringMap.inc(request.IDResponseCount) - if err := s.validateKibanaClient(ctx); err != nil { - gRPCSamplingMonitoringMap.inc(request.IDResponseErrorsCount) - // do not return full error details since this is part of an unprotected endpoint response - s.log.With(logp.Error(err)).Error("Configured Kibana client does not support agent remote configuration") - return nil, errors.New("agent remote configuration not supported, check server logs for more details") - } samplingRate, err := s.fetchSamplingRate(ctx, params.ServiceName) if err != nil { - gRPCSamplingMonitoringMap.inc(request.IDResponseErrorsCount) + var verr *agentcfg.ValidationError + if errors.As(err, &verr) { + if err := checkValidationError(verr); err != nil { + // do not return full error details since this is part of an unprotected endpoint response + s.logger.With(logp.Error(err)).Error("Configured Kibana client does not support agent remote configuration") + return nil, errors.New("agent remote configuration not supported, check server logs for more details") + } + } + // do not return full error details since this is part of an unprotected endpoint response - s.log.With(logp.Error(err)).Error("No valid sampling rate fetched from Kibana.") + s.logger.With(logp.Error(err)).Error("No valid sampling rate fetched from Kibana.") return nil, errors.New("no sampling rate available, check server logs for more details") } - gRPCSamplingMonitoringMap.inc(request.IDResponseValidCount) + return &api_v2.SamplingStrategyResponse{ StrategyType: api_v2.SamplingStrategyType_PROBABILISTIC, - ProbabilisticSampling: &api_v2.ProbabilisticSamplingStrategy{SamplingRate: samplingRate}}, nil + ProbabilisticSampling: &api_v2.ProbabilisticSamplingStrategy{SamplingRate: samplingRate}, + }, nil } func (s *grpcSampler) fetchSamplingRate(ctx context.Context, service string) (float64, error) { - query := agentcfg.Query{Service: agentcfg.Service{Name: service}, - InsecureAgents: jaegerAgentPrefixes, MarkAsAppliedByAgent: newBool(true)} + // Only service, and not agent, is known for config queries. + // For anonymous/untrusted agents, we filter the results using + // query.InsecureAgents below. + authResource := auth.Resource{ServiceName: service} + if err := auth.Authorize(ctx, auth.ActionAgentConfig, authResource); err != nil { + return 0, err + } + + query := agentcfg.Query{ + Service: agentcfg.Service{Name: service}, + InsecureAgents: jaegerAgentPrefixes, + MarkAsAppliedByAgent: true, + } result, err := s.fetcher.Fetch(ctx, query) if err != nil { gRPCSamplingMonitoringMap.inc(request.IDResponseErrorsServiceUnavailable) @@ -137,23 +163,60 @@ func (s *grpcSampler) fetchSamplingRate(ctx context.Context, service string) (fl return 0, fmt.Errorf("no sampling rate found for %v", service) } -func (s *grpcSampler) validateKibanaClient(ctx context.Context) error { - if s.client == nil { +func checkValidationError(err *agentcfg.ValidationError) error { + body := err.Body() + switch { + case strings.HasPrefix(body, agentcfg.ErrMsgKibanaDisabled): gRPCSamplingMonitoringMap.inc(request.IDResponseErrorsServiceUnavailable) return errors.New("jaeger remote sampling endpoint is disabled, " + "configure the `apm-server.kibana` section in apm-server.yml to enable it") - } - supported, err := s.client.SupportsVersion(ctx, agentcfg.KibanaMinVersion, true) - if err != nil { + case strings.HasPrefix(body, agentcfg.ErrMsgNoKibanaConnection): gRPCSamplingMonitoringMap.inc(request.IDResponseErrorsServiceUnavailable) return fmt.Errorf("error checking kibana version: %w", err) - } - if !supported { + case strings.HasPrefix(body, agentcfg.ErrMsgKibanaVersionNotCompatible): gRPCSamplingMonitoringMap.inc(request.IDResponseErrorsServiceUnavailable) - return fmt.Errorf("not supported by used Kibana version, min required Kibana version: %v", - agentcfg.KibanaMinVersion) + return fmt.Errorf( + "not supported by used Kibana version, min required Kibana version: %v", + agentcfg.KibanaMinVersion, + ) + default: + return nil + } +} + +func postSpansMethodAuthenticator(authenticator *auth.Authenticator, authTag string) interceptors.MethodAuthenticator { + return func(ctx context.Context, req interface{}) (auth.AuthenticationDetails, auth.Authorizer, error) { + postSpansRequest := req.(*api_v2.PostSpansRequest) + batch := &postSpansRequest.Batch + var kind, token string + for i, kv := range batch.Process.GetTags() { + if kv.Key != authTag { + continue + } + // Remove the auth tag. + batch.Process.Tags = append(batch.Process.Tags[:i], batch.Process.Tags[i+1:]...) + kind, token = auth.ParseAuthorizationHeader(kv.VStr) + break + } + return authenticator.Authenticate(ctx, kind, token) } - return nil } -func newBool(b bool) *bool { return &b } +func getSamplingStrategyMethodAuthenticator(authenticator *auth.Authenticator) interceptors.MethodAuthenticator { + // Sampling strategy queries are always unauthenticated. We still consult + // the authenticator in case auth isn't required, in which case we should + // not rate limit the request. + anonymousAuthenticator, err := auth.NewAuthenticator(config.AgentAuth{ + Anonymous: config.AnonymousAgentAuth{Enabled: true}, + }) + if err != nil { + panic(err) + } + return func(ctx context.Context, req interface{}) (auth.AuthenticationDetails, auth.Authorizer, error) { + details, authz, err := authenticator.Authenticate(ctx, "", "") + if !errors.Is(err, auth.ErrAuthFailed) { + return details, authz, err + } + return anonymousAuthenticator.Authenticate(ctx, "", "") + } +} diff --git a/beater/jaeger/grpc_test.go b/beater/jaeger/grpc_test.go index 472003e822c..60f5d352283 100644 --- a/beater/jaeger/grpc_test.go +++ b/beater/jaeger/grpc_test.go @@ -24,133 +24,98 @@ import ( "testing" "time" - v1 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" - "github.com/jaegertracing/jaeger/model" "github.com/jaegertracing/jaeger/proto-gen/api_v2" - "github.com/open-telemetry/opentelemetry-collector/consumer/consumerdata" - "github.com/open-telemetry/opentelemetry-collector/translator/trace/jaeger" + jaegertranslator "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/model/pdata" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/apm-server/agentcfg" + "github.com/elastic/apm-server/beater/auth" "github.com/elastic/apm-server/beater/beatertest" - "github.com/elastic/apm-server/beater/request" - "github.com/elastic/apm-server/tests" + "github.com/elastic/apm-server/beater/config" + "github.com/elastic/apm-server/kibana/kibanatest" ) func TestGRPCCollector_PostSpans(t *testing.T) { for name, tc := range map[string]testGRPCCollector{ "empty request": { request: &api_v2.PostSpansRequest{}, - monitoringInt: map[request.ResultID]int64{ - request.IDRequestCount: 1, - request.IDResponseCount: 1, - request.IDResponseValidCount: 1, - }, - }, - "successful request": { - monitoringInt: map[request.ResultID]int64{ - request.IDRequestCount: 1, - request.IDResponseCount: 1, - request.IDResponseValidCount: 1, - request.IDEventReceivedCount: 2, - }, }, + "successful request": {}, "failing request": { consumerErr: errors.New("consumer failed"), - monitoringInt: map[request.ResultID]int64{ - request.IDRequestCount: 1, - request.IDResponseCount: 1, - request.IDResponseErrorsCount: 1, - request.IDEventReceivedCount: 2, - }, - }, - "auth fails": { - authError: errors.New("oh noes"), - monitoringInt: map[request.ResultID]int64{ - request.IDRequestCount: 1, - request.IDResponseCount: 1, - request.IDResponseErrorsCount: 1, - request.IDResponseErrorsUnauthorized: 1, - }, + expectedErr: errors.New("consumer failed"), }, } { t.Run(name, func(t *testing.T) { tc.setup(t) - var expectedErr error - if tc.authError != nil { - expectedErr = status.Error(codes.Unauthenticated, tc.authError.Error()) - } else { - expectedErr = tc.consumerErr - } resp, err := tc.collector.PostSpans(context.Background(), tc.request) - if expectedErr != nil { + if tc.expectedErr != nil { require.Nil(t, resp) require.Error(t, err) - assert.Equal(t, expectedErr, err) + assert.Equal(t, tc.expectedErr, err) } else { require.NotNil(t, resp) require.NoError(t, err) } - assertMonitoring(t, tc.monitoringInt, gRPCCollectorMonitoringMap) }) } } type testGRPCCollector struct { request *api_v2.PostSpansRequest - authError error + consumer tracesConsumerFunc consumerErr error collector *grpcCollector - monitoringInt map[request.ResultID]int64 + expectedErr error } func (tc *testGRPCCollector) setup(t *testing.T) { beatertest.ClearRegistry(gRPCCollectorMonitoringMap) if tc.request == nil { - td := consumerdata.TraceData{Spans: []*v1.Span{ - {TraceId: []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - SpanId: []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}}, - {TraceId: []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - SpanId: []byte{0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}}}} - batch, err := jaeger.OCProtoToJaegerProto(td) + traces := pdata.NewTraces() + resourceSpans := traces.ResourceSpans().AppendEmpty() + spans := resourceSpans.InstrumentationLibrarySpans().AppendEmpty() + span0 := spans.Spans().AppendEmpty() + span0.SetTraceID(pdata.NewTraceID([16]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})) + span0.SetSpanID(pdata.NewSpanID([8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF})) + span1 := spans.Spans().AppendEmpty() + span1.SetTraceID(pdata.NewTraceID([16]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})) + span1.SetSpanID(pdata.NewSpanID([8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF})) + + batches, err := jaegertranslator.InternalTracesToJaegerProto(traces) require.NoError(t, err) - require.NotNil(t, batch) - tc.request = &api_v2.PostSpansRequest{Batch: *batch} + require.Len(t, batches, 1) + tc.request = &api_v2.PostSpansRequest{Batch: *batches[0]} } - tc.collector = &grpcCollector{logp.NewLogger("gRPC"), authFunc(func(context.Context, model.Batch) error { - return tc.authError - }), traceConsumerFunc(func(ctx context.Context, td consumerdata.TraceData) error { - return tc.consumerErr - })} -} - -func assertMonitoring(t *testing.T, expected map[request.ResultID]int64, actual monitoringMap) { - for _, k := range monitoringKeys { - if val, ok := expected[k]; ok { - assert.Equalf(t, val, actual[k].Get(), "%s mismatch", k) - } else { - assert.Zerof(t, actual[k].Get(), "%s mismatch", k) + if tc.consumer == nil { + tc.consumer = func(ctx context.Context, td pdata.Traces) error { + return tc.consumerErr } } + tc.collector = &grpcCollector{tc.consumer} } -type traceConsumerFunc func(ctx context.Context, td consumerdata.TraceData) error +type tracesConsumerFunc func(ctx context.Context, td pdata.Traces) error -func (f traceConsumerFunc) ConsumeTraceData(ctx context.Context, td consumerdata.TraceData) error { +func (f tracesConsumerFunc) Capabilities() consumer.Capabilities { + return consumer.Capabilities{} +} + +func (f tracesConsumerFunc) ConsumeTraces(ctx context.Context, td pdata.Traces) error { return f(ctx, td) } -func nopConsumer() traceConsumerFunc { - return func(context.Context, consumerdata.TraceData) error { return nil } +func nopConsumer() tracesConsumerFunc { + return func(context.Context, pdata.Traces) error { return nil } } func TestGRPCSampler_GetSamplingStrategy(t *testing.T) { @@ -164,11 +129,7 @@ func TestGRPCSampler_GetSamplingStrategy(t *testing.T) { "settings": map[string]interface{}{}}}, expectedErrMsg: "no sampling rate available", expectedLogMsg: "No valid sampling rate fetched", - monitoringInt: map[request.ResultID]int64{ - request.IDRequestCount: 1, - request.IDResponseCount: 1, - request.IDResponseErrorsCount: 1, - request.IDResponseErrorsNotFound: 1}}, + }, "invalidSamplingRate": { kibanaBody: map[string]interface{}{ "_id": "1", @@ -177,26 +138,27 @@ func TestGRPCSampler_GetSamplingStrategy(t *testing.T) { agentcfg.TransactionSamplingRateKey: "foo"}}}, expectedErrMsg: "no sampling rate available", expectedLogMsg: "No valid sampling rate fetched", - monitoringInt: map[request.ResultID]int64{ - request.IDRequestCount: 1, - request.IDResponseCount: 1, - request.IDResponseErrorsCount: 1, - request.IDResponseErrorsInternal: 1}}, + }, "unsupportedVersion": { kibanaVersion: common.MustNewVersion("7.4.0"), expectedErrMsg: "agent remote configuration not supported", expectedLogMsg: "Kibana client does not support", - monitoringInt: map[request.ResultID]int64{ - request.IDRequestCount: 1, - request.IDResponseCount: 1, - request.IDResponseErrorsCount: 1, - request.IDResponseErrorsServiceUnavailable: 1}}, + }, } { t.Run(name, func(t *testing.T) { require.NoError(t, logp.DevelopmentSetup(logp.ToObserverOutput())) tc.setup() params := &api_v2.SamplingStrategyParameters{ServiceName: "serviceA"} - resp, err := tc.sampler.GetSamplingStrategy(context.Background(), params) + + authenticator, err := auth.NewAuthenticator(config.AgentAuth{ + Anonymous: config.AnonymousAgentAuth{Enabled: true}, + }) + require.NoError(t, err) + ctx := context.Background() + _, authz, err := authenticator.Authenticate(ctx, "", "") + require.NoError(t, err) + ctx = auth.ContextWithAuthorizer(ctx, authz) + resp, err := tc.sampler.GetSamplingStrategy(ctx, params) // assert sampling response if tc.expectedErrMsg != "" { @@ -218,9 +180,6 @@ func TestGRPCSampler_GetSamplingStrategy(t *testing.T) { assert.Nil(t, resp.OperationSampling) assert.Nil(t, resp.RateLimitingSampling) } - - // assert monitoring counters - assertMonitoring(t, tc.monitoringInt, gRPCSamplingMonitoringMap) }) } } @@ -234,7 +193,6 @@ type testGRPCSampler struct { expectedErrMsg string expectedLogMsg string expectedSamplingRate float64 - monitoringInt map[request.ResultID]int64 } func (tc *testGRPCSampler) setup() { @@ -254,15 +212,8 @@ func (tc *testGRPCSampler) setup() { if tc.kibanaVersion == nil { tc.kibanaVersion = common.MustNewVersion("7.7.0") } - client := tests.MockKibana(tc.kibanaCode, tc.kibanaBody, *tc.kibanaVersion, true) - fetcher := agentcfg.NewFetcher(client, time.Second) - tc.sampler = &grpcSampler{logp.L(), client, fetcher} + client := kibanatest.MockKibana(tc.kibanaCode, tc.kibanaBody, *tc.kibanaVersion, true) + fetcher := agentcfg.NewKibanaFetcher(client, time.Second) + tc.sampler = &grpcSampler{logp.L(), fetcher} beatertest.ClearRegistry(gRPCSamplingMonitoringMap) - if tc.monitoringInt == nil { - tc.monitoringInt = map[request.ResultID]int64{ - request.IDRequestCount: 1, - request.IDResponseCount: 1, - request.IDResponseValidCount: 1, - } - } } diff --git a/beater/jaeger/http.go b/beater/jaeger/http.go index 0119fb3bc90..72948a757df 100644 --- a/beater/jaeger/http.go +++ b/beater/jaeger/http.go @@ -28,7 +28,7 @@ import ( "github.com/jaegertracing/jaeger/model" converter "github.com/jaegertracing/jaeger/model/converter/thrift/jaeger" "github.com/jaegertracing/jaeger/thrift-gen/jaeger" - "github.com/open-telemetry/opentelemetry-collector/consumer" + "go.opentelemetry.io/collector/consumer" "github.com/elastic/beats/v7/libbeat/monitoring" @@ -42,11 +42,12 @@ const ( var ( httpRegistry = monitoring.Default.NewRegistry("apm-server.jaeger.http") + monitoringKeys = append(request.DefaultResultIDs, request.IDEventReceivedCount) httpMonitoringMap = request.MonitoringMapForRegistry(httpRegistry, monitoringKeys) ) // newHTTPMux returns a new http.ServeMux which accepts Thrift-encoded spans. -func newHTTPMux(consumer consumer.TraceConsumer) (*http.ServeMux, error) { +func newHTTPMux(consumer consumer.Traces) (*http.ServeMux, error) { handler, err := middleware.Wrap( newHTTPHandler(consumer), middleware.LogMiddleware(), @@ -65,10 +66,10 @@ func newHTTPMux(consumer consumer.TraceConsumer) (*http.ServeMux, error) { } type httpHandler struct { - consumer consumer.TraceConsumer + consumer consumer.Traces } -func newHTTPHandler(consumer consumer.TraceConsumer) request.Handler { +func newHTTPHandler(consumer consumer.Traces) request.Handler { h := &httpHandler{consumer} return h.handle } @@ -109,8 +110,8 @@ func (h *httpHandler) handleTraces(c *request.Context) { var batch jaeger.Batch transport := thrift.NewStreamTransport(c.Request.Body, ioutil.Discard) - protocol := thrift.NewTBinaryProtocolFactoryDefault().GetProtocol(transport) - if err := batch.Read(protocol); err != nil { + protocol := thrift.NewTBinaryProtocolFactoryConf(nil).GetProtocol(transport) + if err := batch.Read(c.Request.Context(), protocol); err != nil { c.Result.SetWithError(request.IDResponseErrorsDecode, err) return } diff --git a/beater/jaeger/http_test.go b/beater/jaeger/http_test.go index 6fceb4ce7b1..aeb9aeb001a 100644 --- a/beater/jaeger/http_test.go +++ b/beater/jaeger/http_test.go @@ -29,9 +29,9 @@ import ( "github.com/apache/thrift/lib/go/thrift" jaegerthrift "github.com/jaegertracing/jaeger/thrift-gen/jaeger" - "github.com/open-telemetry/opentelemetry-collector/consumer/consumerdata" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/model/pdata" "github.com/elastic/apm-server/beater/beatertest" "github.com/elastic/apm-server/beater/request" @@ -89,7 +89,7 @@ func testHTTPMux(t *testing.T, test httpMuxTest) { beatertest.ClearRegistry(httpMonitoringMap) var consumed bool - mux, err := newHTTPMux(traceConsumerFunc(func(ctx context.Context, td consumerdata.TraceData) error { + mux, err := newHTTPMux(tracesConsumerFunc(func(ctx context.Context, _ pdata.Traces) error { consumed = true return test.consumerError })) @@ -106,6 +106,16 @@ func testHTTPMux(t *testing.T, test httpMuxTest) { assertMonitoring(t, test.expectedMonitoringMap, httpMonitoringMap) } +func assertMonitoring(t *testing.T, expected map[request.ResultID]int64, actual monitoringMap) { + for _, k := range monitoringKeys { + if val, ok := expected[k]; ok { + assert.Equalf(t, val, actual[k].Get(), "%s mismatch", k) + } else { + assert.Zerof(t, actual[k].Get(), "%s mismatch", k) + } + } +} + func TestHTTPHandler_UnknownRoute(t *testing.T) { c, recorder := newRequestContext("POST", "/foo", nil) newHTTPHandler(nopConsumer())(c) @@ -147,7 +157,7 @@ func TestHTTPMux_InvalidBody(t *testing.T) { } func TestHTTPMux_ConsumerError(t *testing.T) { - var consumer traceConsumerFunc = func(ctx context.Context, td consumerdata.TraceData) error { + var consumer tracesConsumerFunc = func(ctx context.Context, _ pdata.Traces) error { return errors.New("bauch tut weh") } c, recorder := newRequestContext("POST", "/api/traces", encodeThriftSpans(&jaegerthrift.Span{})) @@ -171,9 +181,9 @@ func encodeThriftSpans(spans ...*jaegerthrift.Span) io.Reader { } func encodeThriftBatch(batch *jaegerthrift.Batch) io.Reader { - transport := thrift.NewTMemoryBuffer() - if err := batch.Write(thrift.NewTBinaryProtocolTransport(transport)); err != nil { + buffer := thrift.NewTMemoryBuffer() + if err := batch.Write(context.Background(), thrift.NewTBinaryProtocolConf(buffer, nil)); err != nil { panic(err) } - return bytes.NewReader(transport.Buffer.Bytes()) + return bytes.NewReader(buffer.Bytes()) } diff --git a/beater/jaeger/server.go b/beater/jaeger/server.go index 370f429dcd8..7c7ff412e45 100644 --- a/beater/jaeger/server.go +++ b/beater/jaeger/server.go @@ -23,6 +23,7 @@ import ( "net/http" "github.com/jaegertracing/jaeger/proto-gen/api_v2" + "github.com/libp2p/go-reuseport" "go.elastic.co/apm" "go.elastic.co/apm/module/apmgrpc" "go.elastic.co/apm/module/apmhttp" @@ -33,14 +34,25 @@ import ( "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/apm-server/agentcfg" - "github.com/elastic/apm-server/beater/authorization" + "github.com/elastic/apm-server/beater/auth" "github.com/elastic/apm-server/beater/config" - "github.com/elastic/apm-server/kibana" - processor "github.com/elastic/apm-server/processor/otel" - "github.com/elastic/apm-server/publish" + "github.com/elastic/apm-server/beater/interceptors" + logs "github.com/elastic/apm-server/log" + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/processor/otel" ) +// ElasticAuthTag is the name of the agent tag that will be used for auth. +// The tag value should be "Bearer ". +// +// This is only relevant to the gmuxed gRPC server. +const ElasticAuthTag = "elastic-apm-auth" + // Server manages Jaeger gRPC and HTTP servers, providing methods for starting and stopping them. +// +// NOTE(axw) the standalone Jaeger gRPC and HTTP servers provided by this package are deprecated, +// and will be removed in a future release. Jaeger gRPC is now served on the primary APM Server +// port, muxed with Elastic APM HTTP traffic. type Server struct { logger *logp.Logger grpc struct { @@ -54,59 +66,60 @@ type Server struct { } // NewServer creates a new Server. -func NewServer(logger *logp.Logger, cfg *config.Config, tracer *apm.Tracer, reporter publish.Reporter) (*Server, error) { +func NewServer( + logger *logp.Logger, + cfg *config.Config, + tracer *apm.Tracer, + processor model.BatchProcessor, + fetcher agentcfg.Fetcher, +) (*Server, error) { if !cfg.JaegerConfig.GRPC.Enabled && !cfg.JaegerConfig.HTTP.Enabled { return nil, nil } - traceConsumer := &processor.Consumer{Reporter: reporter} + traceConsumer := &otel.Consumer{Processor: processor} srv := &Server{logger: logger} if cfg.JaegerConfig.GRPC.Enabled { - // By default auth is not required for Jaeger - users must explicitly specify which tag to use. - auth := noAuth + var agentAuth config.AgentAuth if cfg.JaegerConfig.GRPC.AuthTag != "" { - // TODO(axw) share auth builder with beater/api. - authBuilder, err := authorization.NewBuilder(cfg) - if err != nil { - return nil, err - } - auth = makeAuthFunc( - cfg.JaegerConfig.GRPC.AuthTag, - authBuilder.ForPrivilege(authorization.PrivilegeEventWrite.Action), - ) + // By default auth is not required for Jaeger - users + // must explicitly specify which tag to use. + agentAuth = cfg.AgentAuth + } + authenticator, err := auth.NewAuthenticator(agentAuth) + if err != nil { + return nil, err + } + + logger = logger.Named(logs.Jaeger) + grpcInterceptors := []grpc.UnaryServerInterceptor{ + apmgrpc.NewUnaryServerInterceptor( + apmgrpc.WithRecovery(), + apmgrpc.WithTracer(tracer), + ), + interceptors.Logging(logger), + interceptors.Metrics(logger, RegistryMonitoringMaps), + interceptors.Timeout(), + interceptors.Auth(MethodAuthenticators(authenticator, cfg.JaegerConfig.GRPC.AuthTag)), } // TODO(axw) should the listener respect cfg.MaxConnections? - grpcListener, err := net.Listen("tcp", cfg.JaegerConfig.GRPC.Host) + grpcListener, err := reuseport.Listen("tcp", cfg.JaegerConfig.GRPC.Host) if err != nil { return nil, err } - grpcOptions := []grpc.ServerOption{grpc.UnaryInterceptor(apmgrpc.NewUnaryServerInterceptor( - apmgrpc.WithRecovery(), - apmgrpc.WithTracer(tracer))), - } + grpcOptions := []grpc.ServerOption{grpc.ChainUnaryInterceptor(grpcInterceptors...)} if cfg.JaegerConfig.GRPC.TLS != nil { creds := credentials.NewTLS(cfg.JaegerConfig.GRPC.TLS) grpcOptions = append(grpcOptions, grpc.Creds(creds)) } srv.grpc.server = grpc.NewServer(grpcOptions...) srv.grpc.listener = grpcListener - - api_v2.RegisterCollectorServiceServer(srv.grpc.server, - &grpcCollector{logger, auth, traceConsumer}) - - var client kibana.Client - var fetcher *agentcfg.Fetcher - if cfg.Kibana.Enabled { - client = kibana.NewConnectingClient(&cfg.Kibana.ClientConfig) - fetcher = agentcfg.NewFetcher(client, cfg.AgentConfig.Cache.Expiration) - } - api_v2.RegisterSamplingManagerServer(srv.grpc.server, - &grpcSampler{logger, client, fetcher}) + RegisterGRPCServices(srv.grpc.server, logger, processor, fetcher) } if cfg.JaegerConfig.HTTP.Enabled { // TODO(axw) should the listener respect cfg.MaxConnections? - httpListener, err := net.Listen("tcp", cfg.JaegerConfig.HTTP.Host) + httpListener, err := reuseport.Listen("tcp", cfg.JaegerConfig.HTTP.Host) if err != nil { return nil, err } @@ -126,6 +139,18 @@ func NewServer(logger *logp.Logger, cfg *config.Config, tracer *apm.Tracer, repo return srv, nil } +// RegisterGRPCServices registers Jaeger gRPC services with srv. +func RegisterGRPCServices( + srv *grpc.Server, + logger *logp.Logger, + processor model.BatchProcessor, + fetcher agentcfg.Fetcher, +) { + traceConsumer := &otel.Consumer{Processor: processor} + api_v2.RegisterCollectorServiceServer(srv, &grpcCollector{traceConsumer}) + api_v2.RegisterSamplingManagerServer(srv, &grpcSampler{logger, fetcher}) +} + // Serve accepts gRPC and HTTP connections, and handles Jaeger requests. // // Serve blocks until Stop is called, or if either of the gRPC or HTTP diff --git a/beater/jaeger/server_test.go b/beater/jaeger/server_test.go index 1673bd008f3..c2e2ed9025e 100644 --- a/beater/jaeger/server_test.go +++ b/beater/jaeger/server_test.go @@ -44,11 +44,11 @@ import ( "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/apm-server/agentcfg" "github.com/elastic/apm-server/approvaltest" "github.com/elastic/apm-server/beater/beatertest" "github.com/elastic/apm-server/beater/config" - "github.com/elastic/apm-server/publish" - "github.com/elastic/apm-server/transform" + "github.com/elastic/apm-server/model" ) func TestApprovals(t *testing.T) { @@ -161,7 +161,7 @@ func TestServerIntegration(t *testing.T) { if err != nil { panic(err) } - return serverConfig.BuildModuleConfig("") + return serverConfig.BuildServerConfig("") }(), }, }, @@ -190,7 +190,7 @@ func TestServerIntegration(t *testing.T) { if err != nil { panic(err) } - return serverConfig.BuildModuleConfig("") + return serverConfig.BuildServerConfig("") }(), }, }, @@ -215,7 +215,7 @@ func TestServerIntegration(t *testing.T) { if err != nil { panic(err) } - return serverConfig.BuildModuleConfig("") + return serverConfig.BuildServerConfig("") }(), }, }, @@ -231,7 +231,7 @@ func TestServerIntegration(t *testing.T) { "secret token set but no auth_tag": { cfg: func() *config.Config { cfg := config.DefaultConfig() - cfg.SecretToken = "hunter2" + cfg.AgentAuth.SecretToken = "hunter2" cfg.JaegerConfig.GRPC.Enabled = true cfg.JaegerConfig.GRPC.Host = "localhost:0" cfg.JaegerConfig.HTTP.Enabled = true @@ -243,7 +243,7 @@ func TestServerIntegration(t *testing.T) { "secret token and auth_tag set, but no auth_tag sent by agent": { cfg: func() *config.Config { cfg := config.DefaultConfig() - cfg.SecretToken = "hunter2" + cfg.AgentAuth.SecretToken = "hunter2" cfg.JaegerConfig.GRPC.Enabled = true cfg.JaegerConfig.GRPC.Host = "localhost:0" cfg.JaegerConfig.GRPC.AuthTag = "authorization" @@ -255,7 +255,7 @@ func TestServerIntegration(t *testing.T) { "secret token and auth_tag set, auth_tag sent by agent": { cfg: func() *config.Config { cfg := config.DefaultConfig() - cfg.SecretToken = "hunter2" + cfg.AgentAuth.SecretToken = "hunter2" cfg.JaegerConfig.GRPC.Enabled = true cfg.JaegerConfig.GRPC.Host = "localhost:0" cfg.JaegerConfig.GRPC.AuthTag = "authorization" @@ -357,16 +357,15 @@ type testcase struct { } func (tc *testcase) setup(t *testing.T) { - reporter := func(ctx context.Context, req publish.PendingReq) error { - for _, transformable := range req.Transformables { - tc.events = append(tc.events, transformable.Transform(ctx, &transform.Config{})...) - } + var batchProcessor model.ProcessBatchFunc = func(ctx context.Context, batch *model.Batch) error { + tc.events = append(tc.events, batch.Transform(ctx)...) return nil } var err error tc.tracer = apmtest.NewRecordingTracer() - tc.server, err = NewServer(logp.NewLogger("jaeger"), tc.cfg, tc.tracer.Tracer, reporter) + f := agentcfg.NewFetcher(tc.cfg) + tc.server, err = NewServer(logp.NewLogger("jaeger"), tc.cfg, tc.tracer.Tracer, batchProcessor, f) require.NoError(t, err) if tc.server == nil { return diff --git a/beater/java_attacher/java_attacher.go b/beater/java_attacher/java_attacher.go new file mode 100644 index 00000000000..7edf3e6284a --- /dev/null +++ b/beater/java_attacher/java_attacher.go @@ -0,0 +1,150 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package javaattacher + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "sort" + "strings" + + "github.com/elastic/beats/v7/libbeat/logp" + + "github.com/elastic/apm-server/beater/config" +) + +type JavaAttacher struct { + cfg config.JavaAttacherConfig + logger *logp.Logger +} + +func New(cfg config.JavaAttacherConfig) (JavaAttacher, error) { + if cfg.JavaBin == "" { + if jh := os.Getenv("JAVA_HOME"); jh != "" { + cfg.JavaBin = filepath.Join(jh, "/bin/java") + } else { + bin, err := exec.LookPath("java") + if err != nil { + return JavaAttacher{}, fmt.Errorf("no java binary found: %v", err) + } + cfg.JavaBin = bin + } + } else { + // Ensure we're using the correct separators for the system + // running apm-server + cfg.JavaBin = filepath.FromSlash(cfg.JavaBin) + } + logger := logp.NewLogger("java-attacher") + return JavaAttacher{ + cfg: cfg, + logger: logger, + }, nil +} + +// javaAttacher is bundled by the server +// TODO: Figure out the real path +var javaAttacher = filepath.FromSlash("/bin/apm-agent-attach-cli-1.24.0-slim.jar") + +func (j JavaAttacher) Run(ctx context.Context) error { + cmd := j.build(ctx) + j.logger.Debugf("starting java attacher with command: %s", strings.Join(cmd.Args, " ")) + stdout, err := cmd.StdoutPipe() + if err != nil { + return err + } + + if err := cmd.Start(); err != nil { + return err + } + + donec := make(chan struct{}) + defer close(donec) + go func() { + scanner := bufio.NewScanner(stdout) + b := struct { + LogLevel string `json:"log.level"` + Message string `json:"message"` + }{} + for scanner.Scan() { + select { + case <-ctx.Done(): + return + case <-donec: + return + default: + } + if err := json.Unmarshal(scanner.Bytes(), &b); err != nil { + j.logger.Errorf("error unmarshaling attacher logs: %v", err) + continue + } + switch b.LogLevel { + case "FATAL", "ERROR": + j.logger.Error(b.Message) + case "WARN": + j.logger.Warn(b.Message) + case "INFO": + j.logger.Info(b.Message) + case "DEBUG", "TRACE": + j.logger.Debug(b.Message) + default: + j.logger.Errorf("unrecognized java-attacher log.level: %s", b.LogLevel) + } + } + if err := scanner.Err(); err != nil { + j.logger.Errorf("error scanning attacher logs: %v", err) + } + }() + + return cmd.Wait() +} + +func (j JavaAttacher) build(ctx context.Context) *exec.Cmd { + args := append([]string{"-jar", javaAttacher}, j.formatArgs()...) + return exec.CommandContext(ctx, j.cfg.JavaBin, args...) +} + +func (j JavaAttacher) formatArgs() []string { + args := []string{"--continuous", "--log-level", "debug"} + + if j.cfg.DownloadAgentVersion != "" { + args = append(args, "--download-agent-version", j.cfg.DownloadAgentVersion) + } + + for _, flag := range j.cfg.DiscoveryRules { + for name, value := range flag { + args = append(args, makeArg("--"+name, value)) + } + } + cfg := make([]string, 0, len(j.cfg.Config)) + for k, v := range j.cfg.Config { + cfg = append(cfg, "--config "+k+"="+v) + } + // we want a predictable order for testing + sort.Strings(cfg) + + return append(args, cfg...) +} + +func makeArg(flagName string, args ...string) string { + return flagName + " " + strings.Join(args, " ") +} diff --git a/beater/java_attacher/java_attacher_test.go b/beater/java_attacher/java_attacher_test.go new file mode 100644 index 00000000000..fb29d814dca --- /dev/null +++ b/beater/java_attacher/java_attacher_test.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package javaattacher + +import ( + "context" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/beater/config" +) + +func TestNew(t *testing.T) { + cfg := config.JavaAttacherConfig{JavaBin: ""} + jh := os.Getenv("JAVA_HOME") + os.Setenv("JAVA_HOME", "/usr/local") + defer func() { + // reset JAVA_HOME + os.Setenv("JAVA_HOME", jh) + }() + + attacher, err := New(cfg) + require.NoError(t, err) + + javapath := filepath.FromSlash("/usr/local/bin/java") + assert.Equal(t, javapath, attacher.cfg.JavaBin) + + cfg.JavaBin = "/home/user/bin/java" + attacher, err = New(cfg) + require.NoError(t, err) + + javapath = filepath.FromSlash("/home/user/bin/java") + assert.Equal(t, javapath, attacher.cfg.JavaBin) +} + +func TestBuild(t *testing.T) { + args := []map[string]string{ + {"exclude-user": "root"}, + {"include-main": "MyApplication"}, + {"include-main": "my-application.jar"}, + {"include-vmarg": "elastic.apm.agent.attach=true"}, + } + cfg := config.JavaAttacherConfig{ + Enabled: true, + DiscoveryRules: args, + Config: map[string]string{ + "service_name": "my-cool-service", + "server_url": "http://localhost:8200", + }, + JavaBin: "/usr/bin/java", + DownloadAgentVersion: "1.25.0", + } + + attacher, err := New(cfg) + require.NoError(t, err) + + cmd := attacher.build(context.Background()) + + want := filepath.FromSlash("/usr/bin/java -jar /bin/apm-agent-attach-cli-1.24.0-slim.jar") + + " --continuous --log-level debug --download-agent-version 1.25.0 --exclude-user root --include-main MyApplication " + + "--include-main my-application.jar --include-vmarg elastic.apm.agent.attach=true " + + "--config server_url=http://localhost:8200 --config service_name=my-cool-service" + + cmdArgs := strings.Join(cmd.Args, " ") + assert.Equal(t, want, cmdArgs) +} diff --git a/beater/middleware/auth_middleware.go b/beater/middleware/auth_middleware.go new file mode 100644 index 00000000000..75fa09e6627 --- /dev/null +++ b/beater/middleware/auth_middleware.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package middleware + +import ( + "context" + + "github.com/pkg/errors" + + "github.com/elastic/apm-server/beater/auth" + "github.com/elastic/apm-server/beater/headers" + "github.com/elastic/apm-server/beater/request" +) + +// Authenticator provides an interface for authenticating a client. +type Authenticator interface { + Authenticate(ctx context.Context, kind, value string) (auth.AuthenticationDetails, auth.Authorizer, error) +} + +// AuthMiddleware returns a Middleware to authenticate clients. +// +// If required is true, then the middleware will prevent unauthenticated +// requests. Otherwise the request.Context's Authentication will be set, +// and in the case of unauthenticated requests, the Authentication field +// will have the zero value and the context will be populated with an +// auth.Authorizer that denies all actions and resources. +func AuthMiddleware(authenticator Authenticator, required bool) Middleware { + return func(h request.Handler) (request.Handler, error) { + return func(c *request.Context) { + header := c.Request.Header.Get(headers.Authorization) + kind, token := auth.ParseAuthorizationHeader(header) + details, authorizer, err := authenticator.Authenticate(c.Request.Context(), kind, token) + if err != nil { + if errors.Is(err, auth.ErrAuthFailed) { + if !required { + details = auth.AuthenticationDetails{} + authorizer = denyAll{} + } else { + id := request.IDResponseErrorsUnauthorized + status := request.MapResultIDToStatus[id] + status.Keyword = err.Error() + c.Result.Set(id, status.Code, status.Keyword, nil, nil) + c.Write() + return + } + } else { + c.Result.SetDefault(request.IDResponseErrorsServiceUnavailable) + c.Result.Err = err + c.Write() + return + } + } + c.Authentication = details + c.Request = c.Request.WithContext(auth.ContextWithAuthorizer(c.Request.Context(), authorizer)) + h(c) + + // Processors may indicate that a request is unauthorized by returning auth.ErrUnauthorized. + if errors.Is(c.Result.Err, auth.ErrUnauthorized) { + switch c.Result.ID { + case request.IDUnset, request.IDResponseErrorsInternal: + id := request.IDResponseErrorsForbidden + status := request.MapResultIDToStatus[id] + c.Result.Set(id, status.Code, c.Result.Keyword, c.Result.Body, c.Result.Err) + } + } + }, nil + } +} + +type denyAll struct{} + +func (denyAll) Authorize(context.Context, auth.Action, auth.Resource) error { + return auth.ErrUnauthorized +} diff --git a/beater/middleware/auth_middleware_test.go b/beater/middleware/auth_middleware_test.go new file mode 100644 index 00000000000..9f0b48c0769 --- /dev/null +++ b/beater/middleware/auth_middleware_test.go @@ -0,0 +1,132 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package middleware + +import ( + "context" + "fmt" + "net/http" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + + "github.com/elastic/apm-server/beater/auth" + "github.com/elastic/apm-server/beater/beatertest" + "github.com/elastic/apm-server/beater/headers" + "github.com/elastic/apm-server/beater/request" +) + +func TestAuthMiddleware(t *testing.T) { + + for name, tc := range map[string]struct { + authHeader string + authRequired bool + authError error + + expectKind string + expectToken string + expectStatus int + expectBody string + expectAuthentication auth.AuthenticationDetails + }{ + "authenticated": { + authRequired: true, + authHeader: "Bearer abc123", + expectKind: "Bearer", + expectToken: "abc123", + expectStatus: http.StatusAccepted, + expectAuthentication: auth.AuthenticationDetails{ + Method: auth.MethodSecretToken, + }, + }, + "auth_failed_required": { + authRequired: true, + authError: fmt.Errorf("%w: nope", auth.ErrAuthFailed), + expectStatus: http.StatusUnauthorized, + expectBody: beatertest.ResultErrWrap("authentication failed: nope"), + }, + "auth_failed_optional": { + authRequired: false, + authError: fmt.Errorf("%w: nope", auth.ErrAuthFailed), + expectStatus: http.StatusAccepted, + }, + } { + t.Run(name, func(t *testing.T) { + c, rec := beatertest.DefaultContextWithResponseRecorder() + if tc.authHeader != "" { + c.Request.Header.Set(headers.Authorization, tc.authHeader) + } + var authenticator authenticatorFunc = func(ctx context.Context, kind, token string) (auth.AuthenticationDetails, auth.Authorizer, error) { + assert.Equal(t, tc.expectKind, kind) + assert.Equal(t, tc.expectToken, token) + return auth.AuthenticationDetails{Method: auth.MethodSecretToken}, denyAll{}, tc.authError + } + m := AuthMiddleware(authenticator, tc.authRequired) + Apply(m, beatertest.Handler202)(c) + assert.Equal(t, tc.expectStatus, rec.Code) + assert.Equal(t, tc.expectBody, rec.Body.String()) + assert.Equal(t, tc.expectAuthentication, c.Authentication) + }) + } +} + +func TestAuthMiddlewareError(t *testing.T) { + var authenticator authenticatorFunc = func(ctx context.Context, kind, token string) (auth.AuthenticationDetails, auth.Authorizer, error) { + return auth.AuthenticationDetails{}, nil, errors.New("internal details should not be leaked") + } + for _, required := range []bool{false, true} { + c, rec := beatertest.DefaultContextWithResponseRecorder() + m := AuthMiddleware(authenticator, required) + Apply(m, beatertest.Handler202)(c) + assert.Equal(t, http.StatusServiceUnavailable, rec.Code) + assert.Equal(t, `{"error":"service unavailable"}`+"\n", rec.Body.String()) + assert.EqualError(t, c.Result.Err, "internal details should not be leaked") + assert.Zero(t, c.Authentication) + } +} + +func TestAuthUnauthorized(t *testing.T) { + var authorizer authorizerFunc = func(context.Context, auth.Action, auth.Resource) error { + return fmt.Errorf("%w: none shall pass", auth.ErrUnauthorized) + } + var authenticator authenticatorFunc = func(ctx context.Context, kind, token string) (auth.AuthenticationDetails, auth.Authorizer, error) { + return auth.AuthenticationDetails{}, authorizer, nil + } + next := func(c *request.Context) { + c.Result.Err = auth.Authorize(c.Request.Context(), auth.ActionEventIngest, auth.Resource{}) + } + c, _ := beatertest.DefaultContextWithResponseRecorder() + m := AuthMiddleware(authenticator, true) + Apply(m, next)(c) + + assert.Equal(t, request.IDResponseErrorsForbidden, c.Result.ID) + assert.Equal(t, "unauthorized: none shall pass", c.Result.Body) +} + +type authenticatorFunc func(ctx context.Context, kind, token string) (auth.AuthenticationDetails, auth.Authorizer, error) + +func (f authenticatorFunc) Authenticate(ctx context.Context, kind, token string) (auth.AuthenticationDetails, auth.Authorizer, error) { + return f(ctx, kind, token) +} + +type authorizerFunc func(ctx context.Context, action auth.Action, resource auth.Resource) error + +func (f authorizerFunc) Authorize(ctx context.Context, action auth.Action, resource auth.Resource) error { + return f(ctx, action, resource) +} diff --git a/beater/middleware/authorization_middleware.go b/beater/middleware/authorization_middleware.go deleted file mode 100644 index b14c82293ba..00000000000 --- a/beater/middleware/authorization_middleware.go +++ /dev/null @@ -1,45 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package middleware - -import ( - "github.com/elastic/apm-server/beater/authorization" - "github.com/elastic/apm-server/beater/headers" - "github.com/elastic/apm-server/beater/request" -) - -// AuthorizationMiddleware returns a Middleware to only let authorized requests pass through -func AuthorizationMiddleware(auth *authorization.Handler, apply bool) Middleware { - return func(h request.Handler) (request.Handler, error) { - return func(c *request.Context) { - header := c.Request.Header.Get(headers.Authorization) - c.Authorization = auth.AuthorizationFor(authorization.ParseAuthorizationHeader(header)) - - if apply { - authorized, err := c.Authorization.AuthorizedFor(c.Request.Context(), authorization.ResourceInternal) - if !authorized { - c.Result.SetDeniedAuthorization(err) - c.Write() - return - } - } - - h(c) - }, nil - } -} diff --git a/beater/middleware/authorization_middleware_test.go b/beater/middleware/authorization_middleware_test.go deleted file mode 100644 index 54cfee9148d..00000000000 --- a/beater/middleware/authorization_middleware_test.go +++ /dev/null @@ -1,92 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package middleware - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/apm-server/beater/authorization" - "github.com/elastic/apm-server/beater/beatertest" - "github.com/elastic/apm-server/beater/config" - "github.com/elastic/apm-server/beater/headers" - "github.com/elastic/apm-server/beater/request" -) - -func TestAuthorizationMiddleware(t *testing.T) { - - for name, tc := range map[string]struct { - header string - allowedWhenSecured bool - }{ - "no header": {}, - "invalid header": {header: "Foo Bar"}, - "invalid token": {header: "Bearer Bar"}, - "bearer": {header: "Bearer foo", allowedWhenSecured: true}, - } { - setup := func(token string) (*authorization.Handler, *request.Context, *httptest.ResponseRecorder) { - c, rec := beatertest.DefaultContextWithResponseRecorder() - if tc.header != "" { - c.Request.Header.Set(headers.Authorization, tc.header) - } - builder, err := authorization.NewBuilder(&config.Config{SecretToken: token}) - require.NoError(t, err) - return builder.ForAnyOfPrivileges(authorization.ActionAny), c, rec - } - - t.Run(name+"secured apply", func(t *testing.T) { - handler, c, rec := setup("foo") - m := AuthorizationMiddleware(handler, true) - Apply(m, beatertest.Handler202)(c) - if tc.allowedWhenSecured { - require.Equal(t, http.StatusAccepted, rec.Code) - } else { - require.Equal(t, http.StatusUnauthorized, rec.Code) - } - }) - - t.Run(name+"secured", func(t *testing.T) { - handler, c, rec := setup("foo") - m := AuthorizationMiddleware(handler, false) - Apply(m, beatertest.Handler202)(c) - require.Equal(t, http.StatusAccepted, rec.Code) - }) - - t.Run(name+"unsecured apply", func(t *testing.T) { - handler, c, rec := setup("") - m := AuthorizationMiddleware(handler, true) - Apply(m, beatertest.Handler202)(c) - require.Equal(t, http.StatusAccepted, rec.Code) - assert.Equal(t, authorization.AllowAuth{}, c.Authorization) - }) - - t.Run(name+"unsecured", func(t *testing.T) { - handler, c, rec := setup("") - m := AuthorizationMiddleware(handler, false) - Apply(m, beatertest.Handler202)(c) - require.Equal(t, http.StatusAccepted, rec.Code) - assert.Equal(t, authorization.AllowAuth{}, c.Authorization) - - }) - } -} diff --git a/beater/middleware/log_middleware.go b/beater/middleware/log_middleware.go index 7394de1f4bb..9d465a74289 100644 --- a/beater/middleware/log_middleware.go +++ b/beater/middleware/log_middleware.go @@ -34,73 +34,76 @@ import ( // LogMiddleware returns a middleware taking care of logging processing a request in the middleware and the request handler func LogMiddleware() Middleware { - logger := logp.NewLogger(logs.Request) return func(h request.Handler) (request.Handler, error) { - return func(c *request.Context) { - var reqID, transactionID, traceID string start := time.Now() - tx := apm.TransactionFromContext(c.Request.Context()) - if tx != nil { - // This request is being traced, grab its IDs to add to logs. - traceContext := tx.TraceContext() - transactionID = traceContext.Span.String() - traceID = traceContext.Trace.String() - reqID = transactionID - } else { - uuid, err := uuid.NewV4() - if err != nil { - id := request.IDResponseErrorsInternal - logger.Errorw(request.MapResultIDToStatus[id].Keyword, "error", err) - c.Result.SetWithError(id, err) - c.Write() - return - } - reqID = uuid.String() - } - - reqLogger := logger.With( - "request_id", reqID, - "method", c.Request.Method, - "URL", c.Request.URL, - "content_length", c.Request.ContentLength, - "remote_address", utility.RemoteAddr(c.Request), - "user-agent", c.Request.Header.Get(headers.UserAgent)) - - if traceID != "" { - reqLogger = reqLogger.With( - "trace.id", traceID, - "transaction.id", transactionID, - ) + c.Logger = loggerWithRequestContext(c) + var err error + if c.Logger, err = loggerWithTraceContext(c); err != nil { + id := request.IDResponseErrorsInternal + c.Logger.Error(request.MapResultIDToStatus[id].Keyword, logp.Error(err)) + c.Result.SetWithError(id, err) + c.Write() + return } - - c.Logger = reqLogger h(c) - reqLogger = reqLogger.With("event.duration", time.Since(start)) - + c.Logger = c.Logger.With("event.duration", time.Since(start)) if c.MultipleWriteAttempts() { - reqLogger.Warn("multiple write attempts") + c.Logger.Warn("multiple write attempts") } - keyword := c.Result.Keyword if keyword == "" { keyword = "handled request" } - - keysAndValues := []interface{}{"response_code", c.Result.StatusCode} - if c.Result.Err != nil { - keysAndValues = append(keysAndValues, "error", c.Result.Err.Error()) - } - if c.Result.Stacktrace != "" { - keysAndValues = append(keysAndValues, "stacktrace", c.Result.Stacktrace) - } - + c.Logger = loggerWithResult(c) if c.Result.Failure() { - reqLogger.Errorw(keyword, keysAndValues...) - } else { - reqLogger.Infow(keyword, keysAndValues...) + c.Logger.Error(keyword) + return } - + c.Logger.Info(keyword) }, nil } } + +func loggerWithRequestContext(c *request.Context) *logp.Logger { + logger := logp.NewLogger(logs.Request).With( + "url.original", c.Request.URL.String(), + "http.request.method", c.Request.Method, + "user_agent.original", c.Request.Header.Get(headers.UserAgent), + "source.address", utility.RemoteAddr(c.Request)) + if c.Request.ContentLength != -1 { + logger = logger.With("http.request.body.bytes", c.Request.ContentLength) + } + return logger +} + +func loggerWithTraceContext(c *request.Context) (*logp.Logger, error) { + tx := apm.TransactionFromContext(c.Request.Context()) + if tx == nil { + uuid, err := uuid.NewV4() + if err != nil { + return c.Logger, err + } + return c.Logger.With("http.request.id", uuid.String()), nil + } + // This request is being traced, grab its IDs to add to logs. + traceContext := tx.TraceContext() + transactionID := traceContext.Span.String() + return c.Logger.With( + "trace.id", traceContext.Trace.String(), + "transaction.id", transactionID, + "http.request.id", transactionID, + ), nil +} + +func loggerWithResult(c *request.Context) *logp.Logger { + logger := c.Logger.With( + "http.response.status_code", c.Result.StatusCode) + if c.Result.Err != nil { + logger = logger.With("error.message", c.Result.Err.Error()) + } + if c.Result.Stacktrace != "" { + logger = logger.With("error.stack_trace", c.Result.Stacktrace) + } + return logger +} diff --git a/beater/middleware/log_middleware_test.go b/beater/middleware/log_middleware_test.go index 64133ad8142..ffbaec40db3 100644 --- a/beater/middleware/log_middleware_test.go +++ b/beater/middleware/log_middleware_test.go @@ -21,7 +21,6 @@ import ( "net/http" "testing" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/zap/zapcore" @@ -29,7 +28,9 @@ import ( "go.elastic.co/apm" "go.elastic.co/apm/apmtest" + "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/logp/configure" "github.com/elastic/apm-server/beater/beatertest" "github.com/elastic/apm-server/beater/headers" @@ -38,17 +39,14 @@ import ( ) func TestLogMiddleware(t *testing.T) { - err := logp.DevelopmentSetup(logp.ToObserverOutput()) - require.NoError(t, err) testCases := []struct { name, message string level zapcore.Level handler request.Handler code int - error error - stacktrace bool traced bool + ecsKeys []string }{ { name: "Accepted", @@ -56,6 +54,7 @@ func TestLogMiddleware(t *testing.T) { level: zapcore.InfoLevel, handler: beatertest.Handler202, code: http.StatusAccepted, + ecsKeys: []string{"url.original"}, }, { name: "Traced", @@ -63,6 +62,7 @@ func TestLogMiddleware(t *testing.T) { level: zapcore.InfoLevel, handler: beatertest.Handler202, code: http.StatusAccepted, + ecsKeys: []string{"url.original", "trace.id", "transaction.id"}, traced: true, }, { @@ -71,16 +71,15 @@ func TestLogMiddleware(t *testing.T) { level: zapcore.ErrorLevel, handler: beatertest.Handler403, code: http.StatusForbidden, - error: errors.New("forbidden request"), + ecsKeys: []string{"url.original", "error.message"}, }, { - name: "Panic", - message: "internal error", - level: zapcore.ErrorLevel, - handler: Apply(RecoverPanicMiddleware(), beatertest.HandlerPanic), - code: http.StatusInternalServerError, - error: errors.New("panic on Handle"), - stacktrace: true, + name: "Panic", + message: "internal error", + level: zapcore.ErrorLevel, + handler: Apply(RecoverPanicMiddleware(), beatertest.HandlerPanic), + code: http.StatusInternalServerError, + ecsKeys: []string{"url.original", "error.message", "error.stack_trace"}, }, { name: "Error without keyword", @@ -90,12 +89,19 @@ func TestLogMiddleware(t *testing.T) { c.Result.StatusCode = http.StatusForbidden c.Write() }, - code: http.StatusForbidden, + code: http.StatusForbidden, + ecsKeys: []string{"url.original"}, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { + // log setup + configure.Logging("APM Server test", + common.MustNewConfigFrom(`{"ecs":true}`)) + require.NoError(t, logp.DevelopmentSetup(logp.ToObserverOutput())) + + // prepare and record request c, rec := beatertest.DefaultContextWithResponseRecorder() c.Request.Header.Set(headers.UserAgent, tc.name) if tc.traced { @@ -105,39 +111,27 @@ func TestLogMiddleware(t *testing.T) { } Apply(LogMiddleware(), tc.handler)(c) + // check log lines assert.Equal(t, tc.code, rec.Code) - for i, entry := range logp.ObserverLogs().TakeAll() { - // expect only one log entry per request - assert.Equal(t, i, 0) - assert.Equal(t, logs.Request, entry.LoggerName) - assert.Equal(t, tc.level, entry.Level) - assert.Equal(t, tc.message, entry.Message) + entries := logp.ObserverLogs().TakeAll() + require.Equal(t, 1, len(entries)) + entry := entries[0] + assert.Equal(t, logs.Request, entry.LoggerName) + assert.Equal(t, tc.level, entry.Level) + assert.Equal(t, tc.message, entry.Message) - ec := entry.ContextMap() - t.Logf("context map: %v", ec) - - assert.NotEmpty(t, ec["request_id"]) - assert.NotEmpty(t, ec["method"]) - assert.Equal(t, c.Request.URL.String(), ec["URL"]) - assert.NotEmpty(t, ec["remote_address"]) - assert.Contains(t, ec, "event.duration") - assert.Equal(t, c.Request.Header.Get(headers.UserAgent), ec["user-agent"]) - // zap encoded type - assert.Equal(t, tc.code, int(ec["response_code"].(int64))) - if tc.error != nil { - assert.Equal(t, tc.error.Error(), ec["error"]) - } - if tc.stacktrace { - assert.NotZero(t, ec["stacktrace"]) - } - if tc.traced { - assert.NotEmpty(t, ec, "trace.id") - assert.NotEmpty(t, ec, "transaction.id") - assert.Equal(t, ec["request_id"], ec["transaction.id"]) - } else { - assert.NotContains(t, ec, "trace.id") - assert.NotContains(t, ec, "transaction.id") - } + encoder := zapcore.NewMapObjectEncoder() + ec := common.MapStr{} + for _, f := range entry.Context { + f.AddTo(encoder) + ec.DeepUpdate(encoder.Fields) + } + keys := []string{"http.request.id", "http.request.method", "http.request.body.bytes", + "source.address", "user_agent.original", "http.response.status_code", "event.duration"} + keys = append(keys, tc.ecsKeys...) + for _, key := range keys { + ok, _ := ec.HasKey(key) + assert.True(t, ok, key) } }) } diff --git a/beater/middleware/monitoring_middleware_test.go b/beater/middleware/monitoring_middleware_test.go index f77c58e70fa..4a988611f81 100644 --- a/beater/middleware/monitoring_middleware_test.go +++ b/beater/middleware/monitoring_middleware_test.go @@ -40,8 +40,10 @@ func TestMonitoringHandler(t *testing.T) { expected map[request.ResultID]int, m map[request.ResultID]*monitoring.Int, ) { + beatertest.ClearRegistry(m) c, _ := beatertest.DefaultContextWithResponseRecorder() - equal, result := beatertest.CompareMonitoringInt(Apply(MonitoringMiddleware(m), h), c, expected, m) + Apply(MonitoringMiddleware(m), h)(c) + equal, result := beatertest.CompareMonitoringInt(expected, m) assert.True(t, equal, result) } diff --git a/beater/middleware/rate_limit_middleware.go b/beater/middleware/rate_limit_middleware.go index 5ef47cc1e54..de296e56867 100644 --- a/beater/middleware/rate_limit_middleware.go +++ b/beater/middleware/rate_limit_middleware.go @@ -18,21 +18,35 @@ package middleware import ( - "github.com/elastic/apm-server/beater/api/ratelimit" - "github.com/elastic/apm-server/beater/config" + "github.com/elastic/apm-server/beater/auth" + "github.com/elastic/apm-server/beater/ratelimit" "github.com/elastic/apm-server/beater/request" ) -const burstMultiplier = 3 - -// SetIPRateLimitMiddleware sets a rate limiter -func SetIPRateLimitMiddleware(cfg *config.EventRate) Middleware { - store, err := ratelimit.NewStore(cfg.LruSize, cfg.Limit, burstMultiplier) - +// AnonymousRateLimitMiddleware adds a rate.Limiter to the context of anonymous +// requests, first ensuring the client is allowed to perform a single event and +// responding with 429 Too Many Requests if it is not. +// +// This middleware must be wrapped by AuthorizationMiddleware, as it depends on +// the value of c.AuthResult.Anonymous. +func AnonymousRateLimitMiddleware(store *ratelimit.Store) Middleware { return func(h request.Handler) (request.Handler, error) { return func(c *request.Context) { - c.RateLimiter = store.ForIP(c.Request) + if c.Authentication.Method == auth.MethodAnonymous { + limiter := store.ForIP(c.ClientIP) + if !limiter.Allow() { + c.Result.SetWithError( + request.IDResponseErrorsRateLimit, + ratelimit.ErrRateLimitExceeded, + ) + c.Write() + return + } + ctx := c.Request.Context() + ctx = ratelimit.ContextWithLimiter(ctx, limiter) + c.Request = c.Request.WithContext(ctx) + } h(c) - }, err + }, nil } } diff --git a/beater/middleware/rate_limit_middleware_test.go b/beater/middleware/rate_limit_middleware_test.go new file mode 100644 index 00000000000..fe5a60c0684 --- /dev/null +++ b/beater/middleware/rate_limit_middleware_test.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package middleware + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/beater/ratelimit" + "github.com/elastic/apm-server/beater/request" +) + +func TestAnonymousRateLimitMiddleware(t *testing.T) { + type test struct { + burst int + anonymous bool + + expectStatusCode int + expectAllow bool + } + for _, test := range []test{{ + burst: 0, + anonymous: false, + expectStatusCode: http.StatusOK, + }, { + burst: 0, + anonymous: true, + expectStatusCode: http.StatusTooManyRequests, + }, { + burst: 1, + anonymous: true, + expectStatusCode: http.StatusOK, + expectAllow: false, + }, { + burst: 2, + anonymous: true, + expectStatusCode: http.StatusOK, + expectAllow: true, + }} { + store, _ := ratelimit.NewStore(1, 1, test.burst) + middleware := AnonymousRateLimitMiddleware(store) + handler := func(c *request.Context) { + limiter, ok := ratelimit.FromContext(c.Request.Context()) + if test.anonymous { + require.True(t, ok) + assert.Equal(t, test.expectAllow, limiter.Allow()) + } else { + require.False(t, ok) + } + } + wrapped, err := middleware(handler) + require.NoError(t, err) + + c := request.NewContext() + w := httptest.NewRecorder() + r := httptest.NewRequest("GET", "/", nil) + c.Reset(w, r) + if !test.anonymous { + c.Authentication.Method = "none" + } + + wrapped(c) + assert.Equal(t, test.expectStatusCode, w.Code) + } +} + +func TestAnonymousRateLimitMiddlewareForIP(t *testing.T) { + store, _ := ratelimit.NewStore(2, 1, 1) + middleware := AnonymousRateLimitMiddleware(store) + handler := func(c *request.Context) {} + wrapped, err := middleware(handler) + require.NoError(t, err) + + requestWithIP := func(ip string) int { + c := request.NewContext() + w := httptest.NewRecorder() + r := httptest.NewRequest("GET", "/", nil) + r.Header.Set("X-Real-Ip", ip) + r.RemoteAddr = "1.2.3.4:5678" + c.Reset(w, r) + wrapped(c) + return w.Code + } + assert.Equal(t, http.StatusOK, requestWithIP("10.1.1.1")) + assert.Equal(t, http.StatusTooManyRequests, requestWithIP("10.1.1.1")) + assert.Equal(t, http.StatusOK, requestWithIP("10.1.1.2")) + + // ratelimit.Store size is 2: the 3rd IP reuses an existing (depleted) rate limiter. + assert.Equal(t, http.StatusTooManyRequests, requestWithIP("10.1.1.3")) +} diff --git a/beater/middleware/request_metadata_middleware.go b/beater/middleware/request_metadata_middleware.go deleted file mode 100644 index f28f7610df4..00000000000 --- a/beater/middleware/request_metadata_middleware.go +++ /dev/null @@ -1,47 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package middleware - -import ( - "github.com/elastic/apm-server/beater/request" - "github.com/elastic/apm-server/utility" -) - -// UserMetadataMiddleware returns a Middleware recording request-level -// user metadata (e.g. user-agent and source IP) in the request's context. -func UserMetadataMiddleware() Middleware { - return func(h request.Handler) (request.Handler, error) { - return func(c *request.Context) { - dec := utility.ManualDecoder{} - c.RequestMetadata.UserAgent = dec.UserAgentHeader(c.Request.Header) - c.RequestMetadata.ClientIP = utility.ExtractIP(c.Request) - h(c) - }, nil - } -} - -// SystemMetadataMiddleware returns a Middleware recording request-level -// system metadata (e.g. source IP) in the request's context. -func SystemMetadataMiddleware() Middleware { - return func(h request.Handler) (request.Handler, error) { - return func(c *request.Context) { - c.RequestMetadata.SystemIP = utility.ExtractIP(c.Request) - h(c) - }, nil - } -} diff --git a/beater/middleware/request_metadata_middleware_test.go b/beater/middleware/request_metadata_middleware_test.go deleted file mode 100644 index ac69123c5c3..00000000000 --- a/beater/middleware/request_metadata_middleware_test.go +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package middleware - -import ( - "fmt" - "net" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/apm-server/beater/beatertest" -) - -func TestUserMetadataMiddleware(t *testing.T) { - type test struct { - remoteAddr string - userAgent []string - expectedIP net.IP - expectedUserAgent string - } - - ua1 := "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" - ua2 := "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:67.0) Gecko/20100101 Firefox/67.0" - tests := []test{ - {remoteAddr: "1.2.3.4:1234", expectedIP: net.ParseIP("1.2.3.4"), userAgent: []string{ua1, ua2}, expectedUserAgent: fmt.Sprintf("%s, %s", ua1, ua2)}, - {remoteAddr: "not-an-ip:1234", userAgent: []string{ua1}, expectedUserAgent: ua1}, - {remoteAddr: ""}, - } - - for _, test := range tests { - c, _ := beatertest.DefaultContextWithResponseRecorder() - c.Request.RemoteAddr = test.remoteAddr - for _, ua := range test.userAgent { - c.Request.Header.Add("User-Agent", ua) - } - - Apply(UserMetadataMiddleware(), beatertest.HandlerIdle)(c) - assert.Equal(t, test.expectedUserAgent, c.RequestMetadata.UserAgent) - assert.Equal(t, test.expectedIP, c.RequestMetadata.ClientIP) - } -} - -func TestSystemMetadataMiddleware(t *testing.T) { - type test struct { - remoteAddr string - expectedIP net.IP - } - tests := []test{ - {remoteAddr: "1.2.3.4:1234", expectedIP: net.ParseIP("1.2.3.4")}, - {remoteAddr: "not-an-ip:1234"}, - {remoteAddr: ""}, - } - - for _, test := range tests { - c, _ := beatertest.DefaultContextWithResponseRecorder() - c.Request.RemoteAddr = test.remoteAddr - - Apply(SystemMetadataMiddleware(), beatertest.HandlerIdle)(c) - assert.Equal(t, test.expectedIP, c.RequestMetadata.SystemIP) - } -} diff --git a/beater/middleware/rum_middleware.go b/beater/middleware/rum_middleware.go deleted file mode 100644 index be3f8f33d4d..00000000000 --- a/beater/middleware/rum_middleware.go +++ /dev/null @@ -1,32 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package middleware - -import ( - "github.com/elastic/apm-server/beater/request" -) - -// SetRumFlagMiddleware sets a rum flag in the context -func SetRumFlagMiddleware() Middleware { - return func(h request.Handler) (request.Handler, error) { - return func(c *request.Context) { - c.IsRum = true - h(c) - }, nil - } -} diff --git a/beater/middleware/timeout_middleware.go b/beater/middleware/timeout_middleware.go new file mode 100644 index 00000000000..b0c15c54810 --- /dev/null +++ b/beater/middleware/timeout_middleware.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package middleware + +import ( + "context" + + "github.com/pkg/errors" + + "github.com/elastic/apm-server/beater/request" +) + +// TimeoutMiddleware assumes that a context.Canceled error indicates a timed out +// request. This could be caused by a either a client timeout or server timeout. +// The middleware sets the Context.Result. +func TimeoutMiddleware() Middleware { + tErr := errors.New("request timed out") + return func(h request.Handler) (request.Handler, error) { + return func(c *request.Context) { + h(c) + + err := c.Request.Context().Err() + if errors.Is(err, context.Canceled) { + c.Result.SetDefault(request.IDResponseErrorsTimeout) + c.Result.Err = tErr + c.Result.Body = tErr.Error() + } + }, nil + } +} diff --git a/beater/middleware/timeout_middleware_test.go b/beater/middleware/timeout_middleware_test.go new file mode 100644 index 00000000000..eeaedc15499 --- /dev/null +++ b/beater/middleware/timeout_middleware_test.go @@ -0,0 +1,52 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package middleware + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/apm-server/beater/request" +) + +func TestTimeoutMiddleware(t *testing.T) { + var err error + m := TimeoutMiddleware() + h := request.Handler(func(c *request.Context) { + ctx := c.Request.Context() + ctx, cancel := context.WithCancel(ctx) + r := c.Request.WithContext(ctx) + c.Request = r + cancel() + }) + + h, err = m(h) + assert.NoError(t, err) + + c := request.NewContext() + r, err := http.NewRequest("GET", "/", nil) + assert.NoError(t, err) + c.Reset(httptest.NewRecorder(), r) + h(c) + + assert.Equal(t, http.StatusServiceUnavailable, c.Result.StatusCode) +} diff --git a/beater/onboarding.go b/beater/onboarding.go index ac7d97e2233..f860cdb505d 100644 --- a/beater/onboarding.go +++ b/beater/onboarding.go @@ -28,13 +28,12 @@ import ( logs "github.com/elastic/apm-server/log" "github.com/elastic/apm-server/publish" - "github.com/elastic/apm-server/transform" ) func notifyListening(ctx context.Context, listenAddr net.Addr, reporter publish.Reporter) { logp.NewLogger(logs.Onboarding).Info("Publishing onboarding document") reporter(ctx, publish.PendingReq{ - Transformables: []transform.Transformable{onboardingDoc{listenAddr: listenAddr.String()}}, + Transformable: onboardingDoc{listenAddr: listenAddr.String()}, }) } @@ -42,7 +41,7 @@ type onboardingDoc struct { listenAddr string } -func (o onboardingDoc) Transform(ctx context.Context, cfg *transform.Config) []beat.Event { +func (o onboardingDoc) Transform(ctx context.Context) []beat.Event { return []beat.Event{{ Timestamp: time.Now(), Fields: common.MapStr{ diff --git a/beater/otlp/clientmetadata.go b/beater/otlp/clientmetadata.go new file mode 100644 index 00000000000..2a224bd00d6 --- /dev/null +++ b/beater/otlp/clientmetadata.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package otlp + +import ( + "context" + "net" + + "github.com/elastic/apm-server/beater/interceptors" + "github.com/elastic/apm-server/model" +) + +// SetClientMetadata sets metadata relating to the gRPC client in end-user +// events, which are assumed to have been sent to the server from the user's device. +// +// Client metadata is extracted from ctx, injected by interceptors.ClientMetadata. +func SetClientMetadata(ctx context.Context, batch *model.Batch) error { + for i := range *batch { + event := &(*batch)[i] + if event.Agent.Name != "iOS/swift" { + // This is not an event from an agent we would consider to be + // running on an end-user device. + // + // TODO(axw) use User-Agent in the check, when we know what we + // should be looking for? + continue + } + clientMetadata, ok := interceptors.ClientMetadataFromContext(ctx) + if ok { + if event.Source.IP == nil { + if tcpAddr, ok := clientMetadata.SourceAddr.(*net.TCPAddr); ok { + event.Source.IP = tcpAddr.IP + event.Source.Port = tcpAddr.Port + } + } + if event.Client.IP == nil { + event.Client.IP = clientMetadata.ClientIP + } + } + } + return nil +} diff --git a/beater/otlp/clientmetadata_test.go b/beater/otlp/clientmetadata_test.go new file mode 100644 index 00000000000..f0f5a5426e4 --- /dev/null +++ b/beater/otlp/clientmetadata_test.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package otlp_test + +import ( + "context" + "net" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/apm-server/beater/interceptors" + "github.com/elastic/apm-server/beater/otlp" + "github.com/elastic/apm-server/model" +) + +func TestSetClientMetadata(t *testing.T) { + ip1234 := net.ParseIP("1.2.3.4") + ip5678 := net.ParseIP("5.6.7.8") + + for _, test := range []struct { + ctx context.Context + in model.APMEvent + expected model.APMEvent + }{{ + ctx: context.Background(), + in: model.APMEvent{ + Client: model.Client{IP: ip1234}, + }, + expected: model.APMEvent{ + Client: model.Client{IP: ip1234}, + }, + }, { + ctx: context.Background(), + in: model.APMEvent{ + Agent: model.Agent{Name: "iOS/swift"}, + Client: model.Client{IP: ip1234}, + }, + expected: model.APMEvent{ + Agent: model.Agent{Name: "iOS/swift"}, + Client: model.Client{IP: ip1234}, + }, + }, { + ctx: context.Background(), + in: model.APMEvent{ + Agent: model.Agent{Name: "iOS/swift"}, + }, + expected: model.APMEvent{ + Agent: model.Agent{Name: "iOS/swift"}, + }, + }, { + ctx: interceptors.ContextWithClientMetadata(context.Background(), interceptors.ClientMetadataValues{ + SourceAddr: &net.TCPAddr{IP: ip1234, Port: 4321}, + ClientIP: ip5678, + }), + in: model.APMEvent{ + Agent: model.Agent{Name: "iOS/swift"}, + }, + expected: model.APMEvent{ + Agent: model.Agent{Name: "iOS/swift"}, + Client: model.Client{IP: ip5678}, + Source: model.Source{IP: ip1234, Port: 4321}, + }, + }} { + batch := model.Batch{test.in} + err := otlp.SetClientMetadata(test.ctx, &batch) + assert.NoError(t, err) + assert.Equal(t, test.expected, batch[0]) + } +} diff --git a/beater/otlp/grpc.go b/beater/otlp/grpc.go new file mode 100644 index 00000000000..4f12ec9f851 --- /dev/null +++ b/beater/otlp/grpc.go @@ -0,0 +1,116 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package otlp + +import ( + "context" + "sync" + + "github.com/pkg/errors" + "go.opentelemetry.io/collector/receiver/otlpreceiver" + "google.golang.org/grpc" + + "github.com/elastic/beats/v7/libbeat/monitoring" + + "github.com/elastic/apm-server/beater/auth" + "github.com/elastic/apm-server/beater/interceptors" + "github.com/elastic/apm-server/beater/request" + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/processor/otel" +) + +var ( + monitoringKeys = append(request.DefaultResultIDs, + request.IDResponseErrorsRateLimit, + request.IDResponseErrorsTimeout, + request.IDResponseErrorsUnauthorized, + ) + gRPCMetricsRegistry = monitoring.Default.NewRegistry("apm-server.otlp.grpc.metrics") + gRPCMetricsMonitoringMap = request.MonitoringMapForRegistry(gRPCMetricsRegistry, monitoringKeys) + gRPCTracesRegistry = monitoring.Default.NewRegistry("apm-server.otlp.grpc.traces") + gRPCTracesMonitoringMap = request.MonitoringMapForRegistry(gRPCTracesRegistry, monitoringKeys) + + // RegistryMonitoringMaps provides mappings from the fully qualified gRPC + // method name to its respective monitoring map. + RegistryMonitoringMaps = map[string]map[request.ResultID]*monitoring.Int{ + metricsFullMethod: gRPCMetricsMonitoringMap, + tracesFullMethod: gRPCTracesMonitoringMap, + } +) + +const ( + metricsFullMethod = "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export" + tracesFullMethod = "/opentelemetry.proto.collector.trace.v1.TraceService/Export" +) + +func init() { + monitoring.NewFunc(gRPCMetricsRegistry, "consumer", collectMetricsMonitoring, monitoring.Report) +} + +// MethodAuthenticators returns a map of all supported OTLP/gRPC methods to authenticators. +func MethodAuthenticators(authenticator *auth.Authenticator) map[string]interceptors.MethodAuthenticator { + metadataMethodAuthenticator := interceptors.MetadataMethodAuthenticator(authenticator) + return map[string]interceptors.MethodAuthenticator{ + metricsFullMethod: metadataMethodAuthenticator, + tracesFullMethod: metadataMethodAuthenticator, + } +} + +// RegisterGRPCServices registers OTLP consumer services with the given gRPC server. +func RegisterGRPCServices(grpcServer *grpc.Server, processor model.BatchProcessor) error { + consumer := &otel.Consumer{Processor: processor} + + // TODO(axw) stop assuming we have only one OTLP gRPC service running + // at any time, and instead aggregate metrics from consumers that are + // dynamically registered and unregistered. + setCurrentMonitoredConsumer(consumer) + + if err := otlpreceiver.RegisterTraceReceiver(context.Background(), consumer, grpcServer); err != nil { + return errors.Wrap(err, "failed to register OTLP trace receiver") + } + if err := otlpreceiver.RegisterMetricsReceiver(context.Background(), consumer, grpcServer); err != nil { + return errors.Wrap(err, "failed to register OTLP metrics receiver") + } + return nil +} + +var ( + currentMonitoredConsumerMu sync.RWMutex + currentMonitoredConsumer *otel.Consumer +) + +func setCurrentMonitoredConsumer(c *otel.Consumer) { + currentMonitoredConsumerMu.Lock() + defer currentMonitoredConsumerMu.Unlock() + currentMonitoredConsumer = c +} + +func collectMetricsMonitoring(mode monitoring.Mode, V monitoring.Visitor) { + V.OnRegistryStart() + defer V.OnRegistryFinished() + + currentMonitoredConsumerMu.RLock() + c := currentMonitoredConsumer + currentMonitoredConsumerMu.RUnlock() + if c == nil { + return + } + + stats := c.Stats() + monitoring.ReportInt(V, "unsupported_dropped", stats.UnsupportedMetricsDropped) +} diff --git a/beater/otlp/grpc_test.go b/beater/otlp/grpc_test.go new file mode 100644 index 00000000000..917a7f76c32 --- /dev/null +++ b/beater/otlp/grpc_test.go @@ -0,0 +1,152 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package otlp_test + +import ( + "context" + "errors" + "net" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/model/otlpgrpc" + "go.opentelemetry.io/collector/model/pdata" + "google.golang.org/grpc" + "google.golang.org/grpc/status" + + "github.com/elastic/apm-server/beater/interceptors" + "github.com/elastic/apm-server/beater/otlp" + "github.com/elastic/apm-server/model" + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/monitoring" +) + +func TestConsumeTraces(t *testing.T) { + var batches []model.Batch + var reportError error + var batchProcessor model.ProcessBatchFunc = func(ctx context.Context, batch *model.Batch) error { + batches = append(batches, *batch) + return reportError + } + + conn := newServer(t, batchProcessor) + client := otlpgrpc.NewTracesClient(conn) + + // Send a minimal trace to verify that everything is connected properly. + // + // We intentionally do not check the published event contents; those are + // tested in processor/otel. + traces := pdata.NewTraces() + span := traces.ResourceSpans().AppendEmpty().InstrumentationLibrarySpans().AppendEmpty().Spans().AppendEmpty() + span.SetName("operation_name") + + _, err := client.Export(context.Background(), traces) + assert.NoError(t, err) + require.Len(t, batches, 1) + + reportError = errors.New("failed to publish events") + _, err = client.Export(context.Background(), traces) + assert.Error(t, err) + errStatus := status.Convert(err) + assert.Equal(t, "failed to publish events", errStatus.Message()) + require.Len(t, batches, 2) + assert.Len(t, batches[0], 1) + assert.Len(t, batches[1], 1) + + actual := map[string]interface{}{} + monitoring.GetRegistry("apm-server.otlp.grpc.traces").Do(monitoring.Full, func(key string, value interface{}) { + actual[key] = value + }) + assert.Equal(t, map[string]interface{}{ + "request.count": int64(2), + "response.count": int64(2), + "response.errors.count": int64(1), + "response.valid.count": int64(1), + "response.errors.ratelimit": int64(0), + "response.errors.timeout": int64(0), + "response.errors.unauthorized": int64(0), + }, actual) +} + +func TestConsumeMetrics(t *testing.T) { + var reportError error + var batchProcessor model.ProcessBatchFunc = func(ctx context.Context, batch *model.Batch) error { + return reportError + } + + conn := newServer(t, batchProcessor) + client := otlpgrpc.NewMetricsClient(conn) + + // Send a minimal metric to verify that everything is connected properly. + // + // We intentionally do not check the published event contents; those are + // tested in processor/otel. + metrics := pdata.NewMetrics() + metric := metrics.ResourceMetrics().AppendEmpty().InstrumentationLibraryMetrics().AppendEmpty().Metrics().AppendEmpty() + metric.SetName("metric_type") + metric.SetDataType(pdata.MetricDataTypeSummary) + metric.Summary().DataPoints().AppendEmpty() + + _, err := client.Export(context.Background(), metrics) + assert.NoError(t, err) + + reportError = errors.New("failed to publish events") + _, err = client.Export(context.Background(), metrics) + assert.Error(t, err) + + errStatus := status.Convert(err) + assert.Equal(t, "failed to publish events", errStatus.Message()) + + actual := map[string]interface{}{} + monitoring.GetRegistry("apm-server.otlp.grpc.metrics").Do(monitoring.Full, func(key string, value interface{}) { + actual[key] = value + }) + assert.Equal(t, map[string]interface{}{ + // In both of the requests we send above, + // the metrics do not have a type and so + // we treat them as unsupported metrics. + "consumer.unsupported_dropped": int64(2), + + "request.count": int64(2), + "response.count": int64(2), + "response.errors.count": int64(1), + "response.valid.count": int64(1), + "response.errors.ratelimit": int64(0), + "response.errors.timeout": int64(0), + "response.errors.unauthorized": int64(0), + }, actual) +} + +func newServer(t *testing.T, batchProcessor model.BatchProcessor) *grpc.ClientConn { + lis, err := net.Listen("tcp", "localhost:0") + require.NoError(t, err) + logger := logp.NewLogger("otlp.grpc.test") + srv := grpc.NewServer( + grpc.UnaryInterceptor(interceptors.Metrics(logger, otlp.RegistryMonitoringMaps)), + ) + err = otlp.RegisterGRPCServices(srv, batchProcessor) + require.NoError(t, err) + + go srv.Serve(lis) + t.Cleanup(srv.GracefulStop) + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + require.NoError(t, err) + t.Cleanup(func() { conn.Close() }) + return conn +} diff --git a/beater/processors.go b/beater/processors.go new file mode 100644 index 00000000000..445e04cd106 --- /dev/null +++ b/beater/processors.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package beater + +import ( + "context" + "time" + + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/ecs/code/go/ecs" + + "github.com/elastic/apm-server/beater/auth" + "github.com/elastic/apm-server/beater/ratelimit" + "github.com/elastic/apm-server/model" +) + +const ( + rateLimitTimeout = time.Second +) + +// authorizeEventIngestProcessor is a model.BatchProcessor that checks that the +// client is authorized to ingest events for the given agent and service name. +func authorizeEventIngestProcessor(ctx context.Context, batch *model.Batch) error { + for _, event := range *batch { + if err := auth.Authorize(ctx, auth.ActionEventIngest, auth.Resource{ + AgentName: event.Agent.Name, + ServiceName: event.Service.Name, + }); err != nil { + return err + } + } + return nil +} + +// rateLimitBatchProcessor is a model.BatchProcessor that rate limits based on +// the batch size. This will be invoked after decoding events, but before sending +// on to the libbeat publisher. +func rateLimitBatchProcessor(ctx context.Context, batch *model.Batch) error { + if limiter, ok := ratelimit.FromContext(ctx); ok { + ctx, cancel := context.WithTimeout(ctx, rateLimitTimeout) + defer cancel() + if err := limiter.WaitN(ctx, len(*batch)); err != nil { + return ratelimit.ErrRateLimitExceeded + } + } + return nil +} + +// ecsVersionBatchProcessor is a model.BatchProcessor that sets the ECSVersion +// field of each event to the ECS library version. +func ecsVersionBatchProcessor(ctx context.Context, b *model.Batch) error { + for i := range *b { + event := &(*b)[i] + event.ECSVersion = ecs.Version + } + return nil +} + +// newObserverBatchProcessor returns a model.BatchProcessor that sets observer +// fields from info. +func newObserverBatchProcessor(info beat.Info) model.ProcessBatchFunc { + var versionMajor int + if version, err := common.NewVersion(info.Version); err == nil { + versionMajor = version.Major + } + return func(ctx context.Context, b *model.Batch) error { + for i := range *b { + observer := &(*b)[i].Observer + observer.EphemeralID = info.EphemeralID.String() + observer.Hostname = info.Hostname + observer.ID = info.ID.String() + if info.Name != info.Hostname { + observer.Name = info.Name + } + observer.Type = info.Beat + observer.Version = info.Version + observer.VersionMajor = versionMajor + } + return nil + } +} diff --git a/beater/processors_test.go b/beater/processors_test.go new file mode 100644 index 00000000000..f3116e85b63 --- /dev/null +++ b/beater/processors_test.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package beater + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/time/rate" + + "github.com/elastic/apm-server/beater/ratelimit" + "github.com/elastic/apm-server/model" +) + +func TestRateLimitBatchProcessor(t *testing.T) { + limiter := rate.NewLimiter(1, 10) + ctx := ratelimit.ContextWithLimiter(context.Background(), limiter) + + batch := make(model.Batch, 5) + for i := range batch { + batch[i].Transaction = &model.Transaction{} + } + for i := 0; i < 2; i++ { + err := rateLimitBatchProcessor(ctx, &batch) + require.NoError(t, err) + } + + // After the second batch, the rate limiter burst has been exhausted, + // and the limit is not high enough to allow another one. + err := rateLimitBatchProcessor(ctx, &batch) + assert.Equal(t, ratelimit.ErrRateLimitExceeded, err) +} diff --git a/beater/ratelimit/context.go b/beater/ratelimit/context.go new file mode 100644 index 00000000000..d17f99597de --- /dev/null +++ b/beater/ratelimit/context.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package ratelimit + +import ( + "context" + + "github.com/pkg/errors" + "golang.org/x/time/rate" +) + +// ErrRateLimitExceeded is returned when the rate limit is exceeded. +var ErrRateLimitExceeded = errors.New("rate limit exceeded") + +type rateLimiterKey struct{} + +// FromContext returns a rate.Limiter if one is contained in ctx, +// and a bool indicating whether one was found. +func FromContext(ctx context.Context) (*rate.Limiter, bool) { + limiter, ok := ctx.Value(rateLimiterKey{}).(*rate.Limiter) + return limiter, ok +} + +// ContextWithLimiter returns a copy of parent associated with limiter. +func ContextWithLimiter(parent context.Context, limiter *rate.Limiter) context.Context { + return context.WithValue(parent, rateLimiterKey{}, limiter) +} diff --git a/beater/api/ratelimit/store.go b/beater/ratelimit/store.go similarity index 87% rename from beater/api/ratelimit/store.go rename to beater/ratelimit/store.go index b9fac6c8cfd..660a3af76cd 100644 --- a/beater/api/ratelimit/store.go +++ b/beater/ratelimit/store.go @@ -18,11 +18,9 @@ package ratelimit import ( - "net/http" + "net" "sync" - "github.com/elastic/apm-server/utility" - "github.com/hashicorp/golang-lru/simplelru" "github.com/pkg/errors" "golang.org/x/time/rate" @@ -63,8 +61,9 @@ func NewStore(size, rateLimit, burstFactor int) (*Store, error) { return &store, nil } -// acquire returns a rate.Limiter instance for the given key -func (s *Store) acquire(key string) *rate.Limiter { +// ForIP returns a rate limiter for the given IP. +func (s *Store) ForIP(ip net.IP) *rate.Limiter { + key := ip.String() // lock get and add action for cache to allow proper eviction handling without // race conditions. @@ -83,11 +82,3 @@ func (s *Store) acquire(key string) *rate.Limiter { } return limiter } - -// ForIP returns a rate limiter for the given request IP -func (s *Store) ForIP(r *http.Request) *rate.Limiter { - if s == nil { - return nil - } - return s.acquire(utility.RemoteAddr(r)) -} diff --git a/beater/ratelimit/store_test.go b/beater/ratelimit/store_test.go new file mode 100644 index 00000000000..4706e9dbff8 --- /dev/null +++ b/beater/ratelimit/store_test.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package ratelimit + +import ( + "net" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCacheInitFails(t *testing.T) { + for _, test := range []struct { + size int + limit int + }{ + {-1, 1}, + {0, 1}, + {1, -1}, + } { + c, err := NewStore(test.size, test.limit, 3) + assert.Error(t, err) + assert.Nil(t, c) + } +} + +func TestCacheEviction(t *testing.T) { + cacheSize := 2 + limit := 1 //multiplied times BurstMultiplier 3 + + store, err := NewStore(cacheSize, limit, 3) + require.NoError(t, err) + + // add new limiter + rlA := store.ForIP(net.ParseIP("127.0.0.1")) + rlA.AllowN(time.Now(), 3) + + // add new limiter + rlB := store.ForIP(net.ParseIP("127.0.0.2")) + rlB.AllowN(time.Now(), 2) + + // reuse evicted limiter rlA + rlC := store.ForIP(net.ParseIP("127.0.0.3")) + assert.False(t, rlC.Allow()) + assert.Equal(t, rlC, store.evictedLimiter) + + // reuse evicted limiter rlB + rlD := store.ForIP(net.ParseIP("127.0.0.1")) + assert.True(t, rlD.Allow()) + assert.False(t, rlD.Allow()) + assert.Equal(t, rlD, store.evictedLimiter) + // check that limiter are independent + assert.True(t, rlD != rlC) + store.evictedLimiter = nil + assert.NotNil(t, rlD) + assert.NotNil(t, rlC) +} + +func TestCacheOk(t *testing.T) { + store, err := NewStore(1, 1, 1) + require.NoError(t, err) + limiter := store.ForIP(net.ParseIP("127.0.0.1")) + assert.NotNil(t, limiter) +} diff --git a/beater/request/context.go b/beater/request/context.go index 65bfa26608b..769fb377900 100644 --- a/beater/request/context.go +++ b/beater/request/context.go @@ -23,13 +23,12 @@ import ( "net/http" "strings" - "golang.org/x/time/rate" - "github.com/elastic/beats/v7/libbeat/logp" - "github.com/elastic/apm-server/beater/authorization" + "github.com/elastic/apm-server/beater/auth" "github.com/elastic/apm-server/beater/headers" logs "github.com/elastic/apm-server/log" + "github.com/elastic/apm-server/utility" ) const ( @@ -43,50 +42,57 @@ var ( // Context abstracts request and response information for http requests type Context struct { - Request *http.Request - Logger *logp.Logger - RateLimiter *rate.Limiter - Authorization authorization.Authorization - IsRum bool - Result Result - RequestMetadata Metadata + Request *http.Request + Logger *logp.Logger + Authentication auth.AuthenticationDetails + Result Result + + // SourceAddr holds the address of the (source) network peer. + SourceAddr net.Addr + + // ClientIP holds the IP address of the originating client, + // as recorded in Forwarded, X-Forwarded-For, etc. + // + // For requests without one of the forwarded headers, this will + // have the same value as SourceIP. + ClientIP net.IP + + // UserAgent holds the User-Agent request header value. + UserAgent string w http.ResponseWriter writeAttempts int } -// Metadata contains metadata extracted from the request by middleware, -// and should be merged into the event metadata. -type Metadata struct { - ClientIP net.IP - SystemIP net.IP - UserAgent string -} - // NewContext creates an empty Context struct func NewContext() *Context { return &Context{} } -// Reset allows to reuse a context by removing all request specific information +// Reset allows to reuse a context by removing all request specific information. +// +// It is valid to call Reset(nil, nil), which will just clear all information. +// If w and r are non-nil, the context will be associated with them for handling +// the request, and information such as the user agent and source IP will be +// extracted for handlers. func (c *Context) Reset(w http.ResponseWriter, r *http.Request) { - c.Request = r - c.Logger = nil - c.RateLimiter = nil - c.Authorization = &authorization.AllowAuth{} - c.IsRum = false - c.Result.Reset() - c.RequestMetadata.Reset() + if c.Request != nil && c.Request.MultipartForm != nil { + c.Request.MultipartForm.RemoveAll() + } - c.w = w - c.writeAttempts = 0 -} + *c = Context{ + Request: r, + Logger: nil, + Authentication: auth.AuthenticationDetails{}, + w: w, + } + c.Result.Reset() -// Reset sets all attribtues of the Metadata instance to it's zero value -func (m *Metadata) Reset() { - m.ClientIP = nil - m.SystemIP = nil - m.UserAgent = "" + if r != nil { + c.SourceAddr = utility.ParseTCPAddr(r.RemoteAddr) + c.ClientIP = utility.ExtractIP(r) + c.UserAgent = utility.UserAgentHeader(r.Header) + } } // Header returns the http.Header of the context's writer diff --git a/beater/request/context_pool.go b/beater/request/context_pool.go index f210e4c70ec..bb7b5f69d01 100644 --- a/beater/request/context_pool.go +++ b/beater/request/context_pool.go @@ -42,6 +42,7 @@ func (pool *ContextPool) HTTPHandler(h Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { c := pool.p.Get().(*Context) defer pool.p.Put(c) + defer c.Reset(nil, nil) c.Reset(w, r) h(c) }) diff --git a/beater/request/context_test.go b/beater/request/context_test.go index a0da505e57c..4dfdc3b654a 100644 --- a/beater/request/context_test.go +++ b/beater/request/context_test.go @@ -18,17 +18,22 @@ package request import ( + "bytes" + "mime/multipart" + "net" "net/http" "net/http/httptest" + "os" "reflect" "testing" "github.com/pkg/errors" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/libbeat/logp" - "github.com/elastic/apm-server/beater/authorization" + "github.com/elastic/apm-server/beater/auth" "github.com/elastic/apm-server/beater/headers" ) @@ -37,7 +42,33 @@ func TestContext_Reset(t *testing.T) { w1.WriteHeader(http.StatusServiceUnavailable) w2 := httptest.NewRecorder() r1 := httptest.NewRequest(http.MethodGet, "/", nil) + r1.RemoteAddr = "10.1.2.3:4321" + r1.Header.Set("User-Agent", "ua1") r2 := httptest.NewRequest(http.MethodHead, "/new", nil) + r2.RemoteAddr = "10.1.2.3:1234" + r2.Header.Set("User-Agent", "ua2") + r2.Header.Set("X-Forwarded-For", "192.168.0.1") + + var multipartBuf bytes.Buffer + multipartWriter := multipart.NewWriter(&multipartBuf) + fw, err := multipartWriter.CreateFormFile("a_file", "filename.txt") + require.NoError(t, err) + fw.Write([]byte("abc")) + err = multipartWriter.Close() + require.NoError(t, err) + + multipartReader := multipart.NewReader(&multipartBuf, multipartWriter.Boundary()) + form, err := multipartReader.ReadForm(0) // always write to /tmp + require.NoError(t, err) + r1.MultipartForm = form + + // Check that a temp file was written. + require.Len(t, form.File["a_file"], 1) + formFile, err := form.File["a_file"][0].Open() + require.NoError(t, err) + formTempFile := formFile.(*os.File) + formTempFilename := formTempFile.Name() + formFile.Close() c := Context{ Request: r1, w: w1, @@ -50,6 +81,10 @@ func TestContext_Reset(t *testing.T) { } c.Reset(w2, r2) + // Resetting the context should have removed r1's temporary form file. + _, err = os.Stat(formTempFilename) + require.True(t, os.IsNotExist(err)) + // use reflection to ensure all fields of `context` are tested cType := reflect.TypeOf(c) cVal := reflect.ValueOf(c) @@ -57,16 +92,23 @@ func TestContext_Reset(t *testing.T) { switch cType.Field(i).Name { case "Request": assert.Equal(t, r2, cVal.Field(i).Interface()) - case "Authorization": - assert.Equal(t, &authorization.AllowAuth{}, cVal.Field(i).Interface()) + case "Authentication": + assert.Equal(t, auth.AuthenticationDetails{}, cVal.Field(i).Interface()) case "w": assert.Equal(t, w2, c.w) case "writeAttempts": assert.Equal(t, 0, c.writeAttempts) case "Result": assertResultIsEmpty(t, cVal.Field(i).Interface().(Result)) - case "RequestMetadata": - assert.Equal(t, Metadata{}, cVal.Field(i).Interface().(Metadata)) + case "SourceAddr": + assert.Equal(t, &net.TCPAddr{ + IP: net.ParseIP("10.1.2.3"), + Port: 1234, + }, cVal.Field(i).Interface()) + case "ClientIP": + assert.Equal(t, net.ParseIP("192.168.0.1"), cVal.Field(i).Interface()) + case "UserAgent": + assert.Equal(t, "ua2", cVal.Field(i).Interface()) default: assert.Empty(t, cVal.Field(i).Interface(), cType.Field(i).Name) } diff --git a/beater/request/result.go b/beater/request/result.go index 051781af174..0c1992336eb 100644 --- a/beater/request/result.go +++ b/beater/request/result.go @@ -66,6 +66,8 @@ const ( IDResponseErrorsValidate ResultID = "response.errors.validate" // IDResponseErrorsRateLimit identifies responses for rate limited requests IDResponseErrorsRateLimit ResultID = "response.errors.ratelimit" + // IDResponseErrorsTimeout identifies responses for timed out requests + IDResponseErrorsTimeout ResultID = "response.errors.timeout" // IDResponseErrorsMethodNotAllowed identifies responses for requests using a forbidden method IDResponseErrorsMethodNotAllowed ResultID = "response.errors.method" // IDResponseErrorsFullQueue identifies responses when internal queue was full @@ -94,11 +96,15 @@ var ( IDResponseErrorsValidate: {Code: http.StatusBadRequest, Keyword: "data validation error"}, IDResponseErrorsMethodNotAllowed: {Code: http.StatusMethodNotAllowed, Keyword: "method not supported"}, IDResponseErrorsRateLimit: {Code: http.StatusTooManyRequests, Keyword: "too many requests"}, + IDResponseErrorsTimeout: {Code: http.StatusServiceUnavailable, Keyword: "request timed out"}, IDResponseErrorsFullQueue: {Code: http.StatusServiceUnavailable, Keyword: "queue is full"}, IDResponseErrorsShuttingDown: {Code: http.StatusServiceUnavailable, Keyword: "server is shutting down"}, IDResponseErrorsServiceUnavailable: {Code: http.StatusServiceUnavailable, Keyword: "service unavailable"}, IDResponseErrorsInternal: {Code: http.StatusInternalServerError, Keyword: "internal error"}, } + + // DefaultResultIDs is a list of the default result IDs used by the package. + DefaultResultIDs = []ResultID{IDRequestCount, IDResponseCount, IDResponseErrorsCount, IDResponseValidCount} ) // ResultID unique string identifying a requests Result @@ -122,7 +128,7 @@ type Result struct { // DefaultMonitoringMapForRegistry returns map matching resultIDs to monitoring counters for given registry. func DefaultMonitoringMapForRegistry(r *monitoring.Registry) map[ResultID]*monitoring.Int { - ids := []ResultID{IDUnset, IDRequestCount, IDResponseCount, IDResponseErrorsCount, IDResponseValidCount} + ids := append(DefaultResultIDs, IDUnset) for id := range MapResultIDToStatus { ids = append(ids, id) } @@ -172,16 +178,6 @@ func (r *Result) SetWithBody(id ResultID, body interface{}) { r.set(id, body, nil) } -// SetDeniedAuthorization sets the result when authorization is denied -func (r *Result) SetDeniedAuthorization(err error) { - if err != nil { - id := IDResponseErrorsServiceUnavailable - status := MapResultIDToStatus[id] - r.Set(id, status.Code, status.Keyword, status.Keyword, err) - } - r.SetDefault(IDResponseErrorsUnauthorized) -} - // Set allows for the most flexibility in setting a result's properties. // The error and body information are derived from the given parameters. func (r *Result) Set(id ResultID, statusCode int, keyword string, body interface{}, err error) { diff --git a/beater/request/result_test.go b/beater/request/result_test.go index dcad5ede568..f0dabbc315b 100644 --- a/beater/request/result_test.go +++ b/beater/request/result_test.go @@ -192,7 +192,7 @@ func TestResult_Failure(t *testing.T) { func TestDefaultMonitoringMapForRegistry(t *testing.T) { mockRegistry := monitoring.Default.NewRegistry("mock-default") m := DefaultMonitoringMapForRegistry(mockRegistry) - assert.Equal(t, 21, len(m)) + assert.Equal(t, 22, len(m)) for id := range m { assert.Equal(t, int64(0), m[id].Get()) } diff --git a/beater/server.go b/beater/server.go index e23c129387e..cc2b882e5a2 100644 --- a/beater/server.go +++ b/beater/server.go @@ -19,17 +19,33 @@ package beater import ( "context" + "net" "net/http" + "time" "go.elastic.co/apm" + "go.elastic.co/apm/module/apmgrpc" + "go.elastic.co/apm/module/apmhttp" "golang.org/x/sync/errgroup" + "google.golang.org/grpc" + "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/beats/v7/libbeat/version" + "github.com/elastic/apm-server/agentcfg" + "github.com/elastic/apm-server/beater/api" + "github.com/elastic/apm-server/beater/auth" "github.com/elastic/apm-server/beater/config" + "github.com/elastic/apm-server/beater/interceptors" "github.com/elastic/apm-server/beater/jaeger" + "github.com/elastic/apm-server/beater/otlp" + "github.com/elastic/apm-server/beater/ratelimit" + "github.com/elastic/apm-server/elasticsearch" + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modelprocessor" "github.com/elastic/apm-server/publish" + "github.com/elastic/apm-server/sourcemap" ) // RunServerFunc is a function which runs the APM Server until a @@ -38,9 +54,18 @@ type RunServerFunc func(context.Context, ServerParams) error // ServerParams holds parameters for running the APM Server. type ServerParams struct { + // Info holds metadata about the server, such as its UUID. + Info beat.Info + // Config is the configuration used for running the APM Server. Config *config.Config + // Managed indicates that the server is managed by Fleet. + Managed bool + + // Namespace holds the data stream namespace for the server. + Namespace string + // Logger is the logger for the beater component. Logger *logp.Logger @@ -48,65 +73,198 @@ type ServerParams struct { // for self-instrumentation. Tracer *apm.Tracer - // Reporter is the publish.Reporter that the APM Server - // should use for reporting events. - Reporter publish.Reporter + // SourcemapStore holds a sourcemap.Store, or nil if source + // mapping is disabled. + SourcemapStore *sourcemap.Store + + // BatchProcessor is the model.BatchProcessor that is used + // for publishing events to the output, such as Elasticsearch. + BatchProcessor model.BatchProcessor + + // PublishReady holds a channel which will be signalled when the serve + // is ready to publish events. Readiness means that preconditions for + // event publication have been met, including icense checks for some + // features and waiting for the Fleet integration to be installed + // when running in standalone mode. + // + // Even if the server is not ready to publish events, it will still + // accept events and enqueue them for later publication. + PublishReady <-chan struct{} + + // NewElasticsearchClient returns an elasticsearch.Client for cfg. + // + // This must be used whenever an elasticsearch client might be used + // for indexing. Under some configuration, the server will wrap the + // client's transport such that requests will be blocked until data + // streams have been initialised. + NewElasticsearchClient func(cfg *elasticsearch.Config) (elasticsearch.Client, error) } -// runServer runs the APM Server until a fatal error occurs, or ctx is cancelled. -func runServer(ctx context.Context, args ServerParams) error { - srv, err := newServer(args.Logger, args.Config, args.Tracer, args.Reporter) - if err != nil { - return err - } - done := make(chan struct{}) - defer close(done) - go func() { - select { - case <-ctx.Done(): - srv.stop() - case <-done: +// newBaseRunServer returns the base RunServerFunc. +// +// reporter is the publish.Reporter that the server should use +// uploading sourcemaps and publishing its onboarding doc. +// Everything else should be using ServerParams.BatchProcessor. +// +// Once we remove sourcemap uploading and onboarding docs, we +// should remove the reporter parameter. +func newBaseRunServer(listener net.Listener, reporter publish.Reporter) RunServerFunc { + return func(ctx context.Context, args ServerParams) error { + srv, err := newServer(args, listener, reporter) + if err != nil { + return err } - }() - return srv.run() + done := make(chan struct{}) + defer close(done) + go func() { + select { + case <-ctx.Done(): + srv.stop() + case <-done: + } + }() + go srv.agentcfgFetchReporter.Run(ctx) + return srv.run() + } } type server struct { - logger *logp.Logger - cfg *config.Config + logger *logp.Logger + cfg *config.Config + agentcfgFetchReporter agentcfg.Reporter httpServer *httpServer + grpcServer *grpc.Server jaegerServer *jaeger.Server - reporter publish.Reporter } -func newServer(logger *logp.Logger, cfg *config.Config, tracer *apm.Tracer, reporter publish.Reporter) (server, error) { - httpServer, err := newHTTPServer(logger, cfg, tracer, reporter) +func newServer(args ServerParams, listener net.Listener, reporter publish.Reporter) (server, error) { + agentcfgFetchReporter := agentcfg.NewReporter(agentcfg.NewFetcher(args.Config), args.BatchProcessor, 30*time.Second) + + // DEPRECATED: dedicated Jaeger server. This does not use the same authenticator and is not rate limited. + jaegerServer, err := jaeger.NewServer(args.Logger, args.Config, args.Tracer, args.BatchProcessor, agentcfgFetchReporter) + if err != nil { + return server{}, err + } + + ratelimitStore, err := ratelimit.NewStore( + args.Config.AgentAuth.Anonymous.RateLimit.IPLimit, + args.Config.AgentAuth.Anonymous.RateLimit.EventLimit, + 3, // burst multiplier + ) + if err != nil { + return server{}, err + } + authenticator, err := auth.NewAuthenticator(args.Config.AgentAuth) + if err != nil { + return server{}, err + } + + // Add a model processor that rate limits, and checks authorization for the agent and service for each event. + batchProcessor := modelprocessor.Chained{ + model.ProcessBatchFunc(rateLimitBatchProcessor), + model.ProcessBatchFunc(authorizeEventIngestProcessor), + args.BatchProcessor, + } + + publishReady := func() bool { + select { + case <-args.PublishReady: + return true + default: + return false + } + } + + // Create an HTTP server for serving Elastic APM agent requests. + mux, err := api.NewMux( + args.Info, args.Config, reporter, batchProcessor, + authenticator, agentcfgFetchReporter, ratelimitStore, + args.SourcemapStore, args.Managed, publishReady, + ) + if err != nil { + return server{}, err + } + handler := apmhttp.Wrap(mux, apmhttp.WithServerRequestIgnorer(doNotTrace), apmhttp.WithTracer(args.Tracer)) + httpServer, err := newHTTPServer(args.Logger, args.Info, args.Config, handler, reporter, listener) if err != nil { return server{}, err } - jaegerServer, err := jaeger.NewServer(logger, cfg, tracer, reporter) + + // Create a gRPC server for OTLP and Jaeger. + grpcServer, err := newGRPCServer( + args.Logger, args.Config, args.Tracer, + authenticator, batchProcessor, agentcfgFetchReporter, ratelimitStore, + ) if err != nil { return server{}, err } + return server{ - logger: logger, - cfg: cfg, - httpServer: httpServer, - jaegerServer: jaegerServer, - reporter: reporter, + logger: args.Logger, + cfg: args.Config, + httpServer: httpServer, + grpcServer: grpcServer, + jaegerServer: jaegerServer, + agentcfgFetchReporter: agentcfgFetchReporter, }, nil } +func newGRPCServer( + logger *logp.Logger, + cfg *config.Config, + tracer *apm.Tracer, + authenticator *auth.Authenticator, + batchProcessor model.BatchProcessor, + agentcfgFetcher agentcfg.Fetcher, + ratelimitStore *ratelimit.Store, +) (*grpc.Server, error) { + apmInterceptor := apmgrpc.NewUnaryServerInterceptor(apmgrpc.WithRecovery(), apmgrpc.WithTracer(tracer)) + authInterceptor := interceptors.Auth( + otlp.MethodAuthenticators(authenticator), + jaeger.MethodAuthenticators(authenticator, jaeger.ElasticAuthTag), + ) + + // Note that we intentionally do not use a grpc.Creds ServerOption + // even if TLS is enabled, as TLS is handled by the net/http server. + logger = logger.Named("grpc") + srv := grpc.NewServer( + grpc.ChainUnaryInterceptor( + apmInterceptor, + interceptors.ClientMetadata(), + interceptors.Logging(logger), + interceptors.Metrics(logger, otlp.RegistryMonitoringMaps, jaeger.RegistryMonitoringMaps), + interceptors.Timeout(), + authInterceptor, + interceptors.AnonymousRateLimit(ratelimitStore), + ), + ) + + if cfg.AugmentEnabled { + // Add a model processor that sets `client.ip` for events from end-user devices. + batchProcessor = modelprocessor.Chained{ + model.ProcessBatchFunc(otlp.SetClientMetadata), + batchProcessor, + } + } + + jaeger.RegisterGRPCServices(srv, logger, batchProcessor, agentcfgFetcher) + if err := otlp.RegisterGRPCServices(srv, batchProcessor); err != nil { + return nil, err + } + return srv, nil +} + func (s server) run() error { s.logger.Infof("Starting apm-server [%s built %s]. Hit CTRL-C to stop it.", version.Commit(), version.BuildTime()) var g errgroup.Group + g.Go(s.httpServer.start) + g.Go(func() error { + return s.grpcServer.Serve(s.httpServer.grpcListener) + }) if s.jaegerServer != nil { g.Go(s.jaegerServer.Serve) } - if s.httpServer != nil { - g.Go(s.httpServer.start) - } if err := g.Wait(); err != http.ErrServerClosed { return err } @@ -118,7 +276,6 @@ func (s server) stop() { if s.jaegerServer != nil { s.jaegerServer.Stop() } - if s.httpServer != nil { - s.httpServer.stop() - } + s.grpcServer.GracefulStop() + s.httpServer.stop() } diff --git a/beater/server_test.go b/beater/server_test.go index aa3853c7bc4..51b2431060a 100644 --- a/beater/server_test.go +++ b/beater/server_test.go @@ -20,35 +20,46 @@ package beater import ( "bytes" "context" + "encoding/json" "fmt" + "io" "io/ioutil" "net" "net/http" + "net/http/httptest" + "net/url" "os" + "path" + "reflect" "runtime" + "sync" "testing" "time" - "github.com/gofrs/uuid" + "github.com/gogo/protobuf/proto" + "github.com/jaegertracing/jaeger/proto-gen/api_v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" - "github.com/elastic/beats/v7/libbeat/instrumentation" + "github.com/elastic/beats/v7/libbeat/common/reload" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/management" "github.com/elastic/beats/v7/libbeat/outputs" pubs "github.com/elastic/beats/v7/libbeat/publisher" "github.com/elastic/beats/v7/libbeat/publisher/pipeline" "github.com/elastic/beats/v7/libbeat/publisher/processing" "github.com/elastic/beats/v7/libbeat/publisher/queue" "github.com/elastic/beats/v7/libbeat/publisher/queue/memqueue" - "github.com/elastic/beats/v7/libbeat/version" "github.com/elastic/apm-server/beater/api" "github.com/elastic/apm-server/beater/config" "github.com/elastic/apm-server/elasticsearch" - "github.com/elastic/apm-server/tests/loader" ) type m map[string]interface{} @@ -385,6 +396,296 @@ func TestServerSourcemapElasticsearch(t *testing.T) { } } +func TestServerJaegerGRPC(t *testing.T) { + server, err := setupServer(t, nil, nil, nil) + require.NoError(t, err) + defer server.Stop() + + baseURL, err := url.Parse(server.baseURL) + require.NoError(t, err) + conn, err := grpc.Dial(baseURL.Host, grpc.WithInsecure()) + require.NoError(t, err) + defer conn.Close() + + client := api_v2.NewCollectorServiceClient(conn) + result, err := client.PostSpans(context.Background(), &api_v2.PostSpansRequest{}) + assert.NoError(t, err) + assert.NotNil(t, result) +} + +func TestServerOTLPGRPC(t *testing.T) { + ucfg, err := common.NewConfigFrom(m{"secret_token": "abc123"}) + assert.NoError(t, err) + server, err := setupServer(t, ucfg, nil, nil) + require.NoError(t, err) + defer server.Stop() + + baseURL, err := url.Parse(server.baseURL) + require.NoError(t, err) + invokeExport := func(ctx context.Context, conn *grpc.ClientConn) error { + // We can't use go.opentelemetry.io/otel, as it has its own generated protobuf packages + // which which conflict with opentelemetry-collector's. Instead, use the types registered + // by the opentelemetry-collector packages. + requestType := proto.MessageType("opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest") + responseType := proto.MessageType("opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse") + request := reflect.New(requestType.Elem()).Interface() + response := reflect.New(responseType.Elem()).Interface() + return conn.Invoke(ctx, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", request, response) + } + + conn, err := grpc.Dial(baseURL.Host, grpc.WithInsecure()) + require.NoError(t, err) + defer conn.Close() + + ctx := context.Background() + err = invokeExport(ctx, conn) + assert.Error(t, err) + assert.Equal(t, codes.Unauthenticated, status.Code(err)) + + ctx = metadata.NewOutgoingContext(ctx, metadata.Pairs("Authorization", "Bearer abc123")) + err = invokeExport(ctx, conn) + assert.NoError(t, err) +} + +func TestServerConfigReload(t *testing.T) { + if testing.Short() { + t.Skip("skipping server test") + } + + // The beater has no way of unregistering itself from reload.Register, + // so we create a fresh registry and replace it after the test. + oldRegister := reload.Register + defer func() { + reload.Register = oldRegister + }() + reload.Register = reload.NewRegistry() + + cfg := common.MustNewConfigFrom(map[string]interface{}{ + // Set an invalid host to illustrate that the static config + // is not used for defining the listening address. + "host": "testing.invalid:123", + + // Data streams must be enabled when the server is managed. + "data_streams.enabled": true, + }) + apmBeat, cfg := newBeat(t, cfg, nil, nil) + apmBeat.Manager = &mockManager{enabled: true} + beater, err := newTestBeater(t, apmBeat, cfg, nil) + require.NoError(t, err) + beater.start() + + // Now that the beater is running, send config changes. The reloader + // is not registered until after the beater starts running, so we + // must loop until it is set. + var reloadable reload.ReloadableList + for { + // The Reloader is not registered until after the beat has started running. + reloadable = reload.Register.GetReloadableList("inputs") + if reloadable != nil { + break + } + time.Sleep(10 * time.Millisecond) + } + + // The config must contain an "apm-server" section, and will be rejected otherwise. + err = reloadable.Reload([]*reload.ConfigWithMeta{{Config: common.NewConfig()}}) + assert.EqualError(t, err, "'apm-server' not found in integration config") + + // Creating the socket listener is performed synchronously in the Reload method + // to ensure zero downtime when reloading an already running server. Illustrate + // that the socket listener is created synhconously in Reload by attempting to + // reload with an invalid host. + err = reloadable.Reload([]*reload.ConfigWithMeta{{Config: common.MustNewConfigFrom(map[string]interface{}{ + "apm-server": map[string]interface{}{ + "host": "testing.invalid:123", + }, + })}}) + require.Error(t, err) + assert.Regexp(t, "listen tcp: lookup testing.invalid: .*", err.Error()) + + inputConfig := common.MustNewConfigFrom(map[string]interface{}{ + "apm-server": map[string]interface{}{ + "host": "localhost:0", + }, + }) + err = reloadable.Reload([]*reload.ConfigWithMeta{{Config: inputConfig}}) + require.NoError(t, err) + + healthcheck := func(addr string) string { + resp, err := http.Get("http://" + addr) + require.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, http.StatusOK, resp.StatusCode) + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + return string(body) + } + + addr1, err := beater.waitListenAddr(1 * time.Second) + require.NoError(t, err) + assert.NotEmpty(t, healthcheck(addr1)) // non-empty as there's no auth required + + // Reload config, causing the HTTP server to be restarted. + require.NoError(t, inputConfig.SetString("apm-server.secret_token", -1, "secret")) + err = reloadable.Reload([]*reload.ConfigWithMeta{{Config: inputConfig}}) + require.NoError(t, err) + + addr2, err := beater.waitListenAddr(1 * time.Second) + require.NoError(t, err) + assert.Empty(t, healthcheck(addr2)) // empty as auth is required but not specified + + // First HTTP server should have been stopped. + _, err = http.Get("http://" + addr1) + assert.Error(t, err) +} + +func TestServerWaitForIntegrationKibana(t *testing.T) { + var requests int + requestCh := make(chan struct{}) + mux := http.NewServeMux() + mux.HandleFunc("/api/status", func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(`{"version":{"number":"1.2.3"}}`)) + }) + mux.HandleFunc("/api/fleet/epm/packages/apm", func(w http.ResponseWriter, r *http.Request) { + requests++ + switch requests { + case 1: + w.WriteHeader(500) + case 2: + fmt.Fprintln(w, `{"response":{"status":"not_installed"}}`) + case 3: + fmt.Fprintln(w, `{"response":{"status":"installed"}}`) + } + select { + case requestCh <- struct{}{}: + case <-r.Context().Done(): + } + }) + srv := httptest.NewServer(mux) + defer srv.Close() + + cfg := common.MustNewConfigFrom(map[string]interface{}{ + "data_streams.enabled": true, + "wait_ready_interval": "100ms", + "kibana.enabled": true, + "kibana.host": srv.URL, + }) + _, err := setupServer(t, cfg, nil, nil) + require.NoError(t, err) + + timeout := time.After(10 * time.Second) + for i := 0; i < 3; i++ { + select { + case <-requestCh: + case <-timeout: + t.Fatal("timed out waiting for request") + } + } + select { + case <-requestCh: + t.Fatal("unexpected request") + case <-time.After(50 * time.Millisecond): + } +} + +func TestServerWaitForIntegrationElasticsearch(t *testing.T) { + var mu sync.Mutex + var tracesRequests int + tracesRequestsCh := make(chan int) + bulkCh := make(chan struct{}, 1) + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("X-Elastic-Product", "Elasticsearch") + // We must send a valid JSON response for the libbeat + // elasticsearch client to send bulk requests. + fmt.Fprintln(w, `{"version":{"number":"1.2.3"}}`) + }) + mux.HandleFunc("/_index_template/", func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + defer mu.Unlock() + template := path.Base(r.URL.Path) + if template == "traces-apm" { + tracesRequests++ + if tracesRequests == 1 { + w.WriteHeader(404) + } + tracesRequestsCh <- tracesRequests + } + }) + mux.HandleFunc("/_bulk", func(w http.ResponseWriter, r *http.Request) { + select { + case bulkCh <- struct{}{}: + default: + } + }) + srv := httptest.NewServer(mux) + defer srv.Close() + + cfg := common.MustNewConfigFrom(map[string]interface{}{ + "data_streams.enabled": true, + "wait_ready_interval": "100ms", + }) + var beatConfig beat.BeatConfig + err := beatConfig.Output.Unpack(common.MustNewConfigFrom(map[string]interface{}{ + "elasticsearch": map[string]interface{}{ + "hosts": []string{srv.URL}, + "backoff": map[string]interface{}{"init": "10ms", "max": "10ms"}, + "max_retries": 1000, + }, + })) + require.NoError(t, err) + + beater, err := setupServer(t, cfg, &beatConfig, nil) + require.NoError(t, err) + + // Send some events to the server. They should be accepted and enqueued. + req := makeTransactionRequest(t, beater.baseURL) + req.Header.Add("Content-Type", "application/x-ndjson") + resp, err := beater.client.Do(req) + assert.NoError(t, err) + assert.Equal(t, http.StatusAccepted, resp.StatusCode) + resp.Body.Close() + + // Healthcheck should report that the server is not publish-ready. + resp, err = beater.client.Get(beater.baseURL + api.RootPath) + require.NoError(t, err) + out := decodeJSONMap(t, resp.Body) + resp.Body.Close() + assert.Equal(t, false, out["publish_ready"]) + + // Indexing should be blocked until we receive from tracesRequestsCh. + select { + case <-bulkCh: + t.Fatal("unexpected bulk request") + case <-time.After(50 * time.Millisecond): + } + + timeout := time.After(10 * time.Second) + var done bool + for !done { + select { + case n := <-tracesRequestsCh: + done = n == 2 + case <-timeout: + t.Fatal("timed out waiting for request") + } + } + + // libbeat should keep retrying, and finally succeed now it is unblocked. + select { + case <-bulkCh: + case <-time.After(10 * time.Second): + t.Fatal("timed out waiting for bulk request") + } + + // Healthcheck should now report that the server is publish-ready. + resp, err = beater.client.Get(beater.baseURL + api.RootPath) + require.NoError(t, err) + out = decodeJSONMap(t, resp.Body) + resp.Body.Close() + assert.Equal(t, true, out["publish_ready"]) +} + type chanClient struct { done chan struct{} Channel chan beat.Event @@ -440,7 +741,7 @@ func dummyPipeline(cfg *common.Config, info beat.Info, clients ...outputs.Client if cfg == nil { cfg = common.NewConfig() } - processors, err := processing.MakeDefaultObserverSupport(false)(info, logp.NewLogger("testbeat"), cfg) + processors, err := processing.MakeDefaultSupport(false)(info, logp.NewLogger("testbeat"), cfg) if err != nil { panic(err) } @@ -470,66 +771,8 @@ func dummyPipeline(cfg *common.Config, info beat.Info, clients ...outputs.Client return p } -func setupServer(t *testing.T, cfg *common.Config, beatConfig *beat.BeatConfig, events chan beat.Event) (*testBeater, error) { - if testing.Short() { - t.Skip("skipping server test") - } - - baseConfig := common.MustNewConfigFrom(map[string]interface{}{ - "host": "localhost:0", - - // Enable instrumentation so the profile endpoint is - // available, but set the profiling interval to something - // long enough that it won't kick in. - "instrumentation": map[string]interface{}{ - "enabled": true, - "profiling": map[string]interface{}{ - "cpu": map[string]interface{}{ - "enabled": true, - "interval": "360s", - }, - }, - }, - }) - if cfg != nil { - err := cfg.Unpack(baseConfig) - require.NoError(t, err) - } - - beatId, err := uuid.FromString("fbba762a-14dd-412c-b7e9-b79f903eb492") - require.NoError(t, err) - info := beat.Info{ - Beat: "test-apm-server", - IndexPrefix: "test-apm-server", - Version: version.GetDefaultVersion(), - ID: beatId, - } - - var pub beat.Pipeline - if events != nil { - // capture events - pubClient := newChanClientWith(events) - pub = dummyPipeline(cfg, info, pubClient) - } else { - // don't capture events - pub = dummyPipeline(cfg, info) - } - - instrumentation, err := instrumentation.New(baseConfig, info.Beat, info.Version) - require.NoError(t, err) - - // create a beat - apmBeat := &beat.Beat{ - Publisher: pub, - Info: info, - Config: beatConfig, - Instrumentation: instrumentation, - } - return setupBeater(t, apmBeat, baseConfig, beatConfig) -} - var testData = func() []byte { - b, err := loader.LoadDataAsBytes("../testdata/intake-v2/transactions.ndjson") + b, err := ioutil.ReadFile("../testdata/intake-v2/transactions.ndjson") if err != nil { panic(err) } @@ -545,9 +788,25 @@ func makeTransactionRequest(t *testing.T, baseUrl string) *http.Request { return req } +func decodeJSONMap(t *testing.T, r io.Reader) map[string]interface{} { + out := make(map[string]interface{}) + err := json.NewDecoder(r).Decode(&out) + require.NoError(t, err) + return out +} + func body(t *testing.T, response *http.Response) string { body, err := ioutil.ReadAll(response.Body) require.NoError(t, err) require.NoError(t, response.Body.Close()) return string(body) } + +type mockManager struct { + management.Manager + enabled bool +} + +func (m *mockManager) Enabled() bool { + return m.enabled +} diff --git a/beater/telemetry.go b/beater/telemetry.go index 5c4d90651de..6f9e3d6e6c0 100644 --- a/beater/telemetry.go +++ b/beater/telemetry.go @@ -23,13 +23,13 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/idxmgmt/ilm" - "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/beats/v7/libbeat/monitoring" ) var apmRegistry = monitoring.GetNamespace("state").GetRegistry().NewRegistry("apm-server") type configTelemetry struct { + dataStreamsEnabled *monitoring.Bool rumEnabled *monitoring.Bool apiKeysEnabled *monitoring.Bool kibanaEnabled *monitoring.Bool @@ -45,9 +45,12 @@ type configTelemetry struct { jaegerGRPCEnabled *monitoring.Bool jaegerHTTPEnabled *monitoring.Bool sslEnabled *monitoring.Bool + tailSamplingEnabled *monitoring.Bool + tailSamplingPolicies *monitoring.Int } var configMonitors = &configTelemetry{ + dataStreamsEnabled: monitoring.NewBool(apmRegistry, "data_streams.enabled"), rumEnabled: monitoring.NewBool(apmRegistry, "rum.enabled"), apiKeysEnabled: monitoring.NewBool(apmRegistry, "api_key.enabled"), kibanaEnabled: monitoring.NewBool(apmRegistry, "kibana.enabled"), @@ -63,22 +66,18 @@ var configMonitors = &configTelemetry{ jaegerGRPCEnabled: monitoring.NewBool(apmRegistry, "jaeger.grpc.enabled"), jaegerHTTPEnabled: monitoring.NewBool(apmRegistry, "jaeger.http.enabled"), sslEnabled: monitoring.NewBool(apmRegistry, "ssl.enabled"), + tailSamplingEnabled: monitoring.NewBool(apmRegistry, "sampling.tail.enabled"), + tailSamplingPolicies: monitoring.NewInt(apmRegistry, "sampling.tail.policies"), } -func recordConfigs(info beat.Info, apmCfg *config.Config, rootCfg *common.Config, logger *logp.Logger) { +// recordRootConfig records static properties of the given root config for telemetry. +// This should be called once at startup, with the root config. +func recordRootConfig(info beat.Info, rootCfg *common.Config) error { indexManagementCfg, err := idxmgmt.NewIndexManagementConfig(info, rootCfg) if err != nil { - logger.Errorf("Error recording telemetry data", err) - return + return err } - configMonitors.rumEnabled.Set(apmCfg.RumConfig.IsEnabled()) - configMonitors.apiKeysEnabled.Set(apmCfg.APIKeyConfig.IsEnabled()) - configMonitors.kibanaEnabled.Set(apmCfg.Kibana.Enabled) - configMonitors.jaegerHTTPEnabled.Set(apmCfg.JaegerConfig.HTTP.Enabled) - configMonitors.jaegerGRPCEnabled.Set(apmCfg.JaegerConfig.GRPC.Enabled) - configMonitors.sslEnabled.Set(apmCfg.TLS.IsEnabled()) - configMonitors.pipelinesEnabled.Set(apmCfg.Register.Ingest.Pipeline.IsEnabled()) - configMonitors.pipelinesOverwrite.Set(apmCfg.Register.Ingest.Pipeline.ShouldOverwrite()) + configMonitors.dataStreamsEnabled.Set(indexManagementCfg.DataStreams) configMonitors.setupTemplateEnabled.Set(indexManagementCfg.Template.Enabled) configMonitors.setupTemplateOverwrite.Set(indexManagementCfg.Template.Overwrite) configMonitors.setupTemplateAppendFields.Set(len(indexManagementCfg.Template.AppendFields.GetKeys()) > 0) @@ -87,4 +86,20 @@ func recordConfigs(info beat.Info, apmCfg *config.Config, rootCfg *common.Config configMonitors.ilmSetupRequirePolicy.Set(indexManagementCfg.ILM.Setup.RequirePolicy) mode := indexManagementCfg.ILM.Mode configMonitors.ilmEnabled.Set(mode == ilm.ModeAuto || mode == ilm.ModeEnabled) + return nil +} + +// recordAPMServerConfig records dynamic APM Server config properties for telemetry. +// This should be called once each time runServer is called. +func recordAPMServerConfig(cfg *config.Config) { + configMonitors.rumEnabled.Set(cfg.RumConfig.Enabled) + configMonitors.apiKeysEnabled.Set(cfg.AgentAuth.APIKey.Enabled) + configMonitors.kibanaEnabled.Set(cfg.Kibana.Enabled) + configMonitors.jaegerHTTPEnabled.Set(cfg.JaegerConfig.HTTP.Enabled) + configMonitors.jaegerGRPCEnabled.Set(cfg.JaegerConfig.GRPC.Enabled) + configMonitors.sslEnabled.Set(cfg.TLS.IsEnabled()) + configMonitors.pipelinesEnabled.Set(cfg.Register.Ingest.Pipeline.Enabled) + configMonitors.pipelinesOverwrite.Set(cfg.Register.Ingest.Pipeline.Overwrite) + configMonitors.tailSamplingEnabled.Set(cfg.Sampling.Tail.Enabled) + configMonitors.tailSamplingPolicies.Set(int64(len(cfg.Sampling.Tail.Policies))) } diff --git a/beater/telemetry_test.go b/beater/telemetry_test.go index 869fc02d8d7..cae9c025baa 100644 --- a/beater/telemetry_test.go +++ b/beater/telemetry_test.go @@ -20,9 +20,8 @@ package beater import ( "testing" - "github.com/elastic/beats/v7/libbeat/logp" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/elastic/apm-server/beater/config" "github.com/elastic/beats/v7/libbeat/beat" @@ -35,7 +34,7 @@ func TestRecordConfigs(t *testing.T) { info := beat.Info{Name: "apm-server", Version: "7.x"} apmCfg := config.DefaultConfig() - apmCfg.APIKeyConfig.Enabled = true + apmCfg.AgentAuth.APIKey.Enabled = true apmCfg.Kibana.Enabled = true apmCfg.JaegerConfig.GRPC.Enabled = true apmCfg.JaegerConfig.HTTP.Enabled = true @@ -55,7 +54,8 @@ func TestRecordConfigs(t *testing.T) { }, }, }) - recordConfigs(info, apmCfg, rootCfg, logp.NewLogger("beater")) + require.NoError(t, recordRootConfig(info, rootCfg)) + recordAPMServerConfig(apmCfg) assert.Equal(t, configMonitors.ilmSetupEnabled.Get(), true) assert.Equal(t, configMonitors.rumEnabled.Get(), false) diff --git a/beater/test_approved_es_documents/TestDropNonSampledTransactions.approved.json b/beater/test_approved_es_documents/TestDropNonSampledTransactions.approved.json deleted file mode 100644 index e221283f97a..00000000000 --- a/beater/test_approved_es_documents/TestDropNonSampledTransactions.approved.json +++ /dev/null @@ -1,1003 +0,0 @@ -{ - "events": [ - { - "@metadata": { - "beat": "apm-test", - "pipeline": "apm", - "type": "_doc", - "version": "8.0.0" - }, - "@timestamp": "2017-05-30T18:53:27.154Z", - "agent": { - "name": "js-base", - "version": "1.3" - }, - "client": { - "ip": "8.8.8.8" - }, - "container": { - "id": "container-id" - }, - "ecs": { - "version": "1.5.0" - }, - "host": { - "architecture": "x64", - "ip": "127.0.0.1", - "os": { - "platform": "darwin" - } - }, - "http": { - "request": { - "body": { - "original": { - "additional": { - "bar": 123, - "req": "additional information" - }, - "str": "hello world" - } - }, - "cookies": { - "c1": "v1", - "c2": "v2" - }, - "env": { - "GATEWAY_INTERFACE": "CGI/1.1", - "SERVER_SOFTWARE": "nginx" - }, - "headers": { - "Array": [ - "foo", - "bar", - "baz" - ], - "Content-Type": [ - "text/html" - ], - "Cookie": [ - "c1=v1,c2=v2" - ], - "Some-Other-Header": [ - "foo" - ], - "User-Agent": [ - "Mozilla Chrome Edge" - ] - }, - "method": "post", - "socket": { - "encrypted": true, - "remote_address": "8.8.8.8" - } - }, - "response": { - "finished": true, - "headers": { - "Content-Type": [ - "application/json" - ] - }, - "headers_sent": true, - "status_code": 200 - }, - "version": "1.1" - }, - "kubernetes": { - "namespace": "namespace1", - "pod": { - "name": "pod-name", - "uid": "pod-uid" - } - }, - "labels": { - "bool_error": false, - "number_code": 2, - "organization_uuid": "9f0e9d64-c185-4d21-a6f4-4673ed561ec8" - }, - "observer": { - "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", - "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", - "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "process": { - "args": [ - "node", - "server.js" - ], - "pid": 1234, - "ppid": 6789, - "title": "node" - }, - "processor": { - "event": "transaction", - "name": "transaction" - }, - "service": { - "environment": "staging", - "framework": { - "name": "Express", - "version": "1.2.3" - }, - "language": { - "name": "ecmascript", - "version": "8" - }, - "name": "serviceabc", - "node": { - "name": "special-name" - }, - "runtime": { - "name": "javascript", - "version": "8.0.0" - }, - "version": "5.1.3" - }, - "source": { - "ip": "8.8.8.8" - }, - "timestamp": { - "us": 1496170407154000 - }, - "trace": { - "id": "945254c567a5417eaaaaaaaaaaaaaaaa" - }, - "transaction": { - "custom": { - "(": "not a valid regex and that is fine", - "and_objects": { - "foo": [ - "bar", - "baz" - ] - }, - "my_key": 1, - "some_other_value": "foo bar" - }, - "duration": { - "us": 32592 - }, - "id": "945254c567a5417e", - "marks": { - "another_mark": { - "some_float": 10, - "some_long": 10 - }, - "navigationTiming": { - "appBeforeBootstrap": 608.9300000000001, - "navigationStart": -21 - } - }, - "name": "GET /api/types", - "page": { - "referer": "http://localhost:8000/test/e2e/", - "url": "http://localhost:8000/test/e2e/general-usecase/" - }, - "result": "success", - "sampled": true, - "span_count": { - "dropped": 2, - "started": 4 - }, - "type": "request" - }, - "url": { - "domain": "www.example.com", - "fragment": "#hash", - "full": "https://www.example.com/p/a/t/h?query=string#hash", - "original": "/p/a/t/h?query=string#hash", - "path": "/p/a/t/h", - "port": 8080, - "query": "?query=string", - "scheme": "https" - }, - "user": { - "email": "foo@example.com", - "id": "99" - }, - "user_agent": { - "original": "Mozilla Chrome Edge" - } - }, - { - "@metadata": { - "beat": "apm-test", - "pipeline": "apm", - "type": "_doc", - "version": "8.0.0" - }, - "@timestamp": "2017-05-30T18:53:27.154Z", - "agent": { - "name": "elastic-node", - "version": "3.14.0" - }, - "container": { - "id": "container-id" - }, - "ecs": { - "version": "1.5.0" - }, - "host": { - "architecture": "x64", - "ip": "127.0.0.1", - "os": { - "platform": "darwin" - } - }, - "kubernetes": { - "namespace": "namespace1", - "pod": { - "name": "pod-name", - "uid": "pod-uid" - } - }, - "labels": { - "span_tag": "something" - }, - "observer": { - "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", - "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", - "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "parent": { - "id": "945254c567a5417e" - }, - "process": { - "args": [ - "node", - "server.js" - ], - "pid": 1234, - "ppid": 6789, - "title": "node" - }, - "processor": { - "event": "span", - "name": "transaction" - }, - "service": { - "environment": "staging", - "framework": { - "name": "Express", - "version": "1.2.3" - }, - "language": { - "name": "ecmascript", - "version": "8" - }, - "name": "1234_service-12a3", - "node": { - "name": "container-id" - }, - "runtime": { - "name": "node", - "version": "8.0.0" - }, - "version": "5.1.3" - }, - "span": { - "action": "query", - "db": { - "instance": "customers", - "statement": "SELECT * FROM product_types WHERE user_id=?", - "type": "sql", - "user": { - "name": "readonly_user" - } - }, - "duration": { - "us": 3781 - }, - "http": { - "method": "get", - "response": { - "status_code": 200 - }, - "url": { - "original": "http://localhost:8000" - } - }, - "id": "0aaaaaaaaaaaaaaa", - "name": "SELECT FROM product_types", - "stacktrace": [ - { - "abs_path": "net.js", - "context": { - "post": [ - " ins.currentTransaction = prev", - " return result", - "}" - ], - "pre": [ - " var trans = this.currentTransaction", - "" - ] - }, - "exclude_from_grouping": false, - "filename": "net.js", - "function": "onread", - "library_frame": true, - "line": { - "column": 4, - "context": "line3", - "number": 547 - }, - "module": "some module", - "vars": { - "key": "value" - } - }, - { - "exclude_from_grouping": false, - "filename": "my2file.js", - "line": { - "number": 10 - } - } - ], - "start": { - "us": 2830 - }, - "subtype": "postgresql", - "sync": false, - "type": "db" - }, - "timestamp": { - "us": 1496170407154000 - }, - "trace": { - "id": "945254c567a5417eaaaaaaaaaaaaaaaa" - }, - "transaction": { - "id": "945254c567a5417e" - }, - "user": { - "email": "foo@bar.com", - "id": "123user", - "name": "foo" - } - }, - { - "@metadata": { - "beat": "apm-test", - "pipeline": "apm", - "type": "_doc", - "version": "8.0.0" - }, - "@timestamp": "2017-05-30T18:53:27.154Z", - "agent": { - "name": "elastic-node", - "version": "3.14.0" - }, - "container": { - "id": "container-id" - }, - "ecs": { - "version": "1.5.0" - }, - "host": { - "architecture": "x64", - "ip": "127.0.0.1", - "os": { - "platform": "darwin" - } - }, - "kubernetes": { - "namespace": "namespace1", - "pod": { - "name": "pod-name", - "uid": "pod-uid" - } - }, - "observer": { - "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", - "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", - "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "parent": { - "id": "945254c567a5417e" - }, - "process": { - "args": [ - "node", - "server.js" - ], - "pid": 1234, - "ppid": 6789, - "title": "node" - }, - "processor": { - "event": "span", - "name": "transaction" - }, - "service": { - "environment": "staging", - "framework": { - "name": "Express", - "version": "1.2.3" - }, - "language": { - "name": "ecmascript", - "version": "8" - }, - "name": "1234_service-12a3", - "node": { - "name": "container-id" - }, - "runtime": { - "name": "node", - "version": "8.0.0" - }, - "version": "5.1.3" - }, - "span": { - "duration": { - "us": 32592 - }, - "id": "1aaaaaaaaaaaaaaa", - "name": "GET /api/types", - "start": { - "us": 0 - }, - "subtype": "external", - "type": "request" - }, - "timestamp": { - "us": 1496170407154000 - }, - "trace": { - "id": "945254c567a5417eaaaaaaaaaaaaaaaa" - }, - "transaction": { - "id": "945254c567a5417e" - }, - "user": { - "email": "foo@bar.com", - "id": "123user", - "name": "foo" - } - }, - { - "@metadata": { - "beat": "apm-test", - "pipeline": "apm", - "type": "_doc", - "version": "8.0.0" - }, - "@timestamp": "2017-05-30T18:53:27.154Z", - "agent": { - "name": "elastic-node", - "version": "3.14.0" - }, - "container": { - "id": "container-id" - }, - "ecs": { - "version": "1.5.0" - }, - "host": { - "architecture": "x64", - "ip": "127.0.0.1", - "os": { - "platform": "darwin" - } - }, - "kubernetes": { - "namespace": "namespace1", - "pod": { - "name": "pod-name", - "uid": "pod-uid" - } - }, - "observer": { - "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", - "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", - "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "parent": { - "id": "945254c567a5417e" - }, - "process": { - "args": [ - "node", - "server.js" - ], - "pid": 1234, - "ppid": 6789, - "title": "node" - }, - "processor": { - "event": "span", - "name": "transaction" - }, - "service": { - "environment": "staging", - "framework": { - "name": "Express", - "version": "1.2.3" - }, - "language": { - "name": "ecmascript", - "version": "8" - }, - "name": "1234_service-12a3", - "node": { - "name": "container-id" - }, - "runtime": { - "name": "node", - "version": "8.0.0" - }, - "version": "5.1.3" - }, - "span": { - "action": "post", - "duration": { - "us": 3564 - }, - "id": "2aaaaaaaaaaaaaaa", - "name": "GET /api/types", - "start": { - "us": 1845 - }, - "subtype": "http", - "type": "request" - }, - "timestamp": { - "us": 1496170407154000 - }, - "trace": { - "id": "945254c567a5417eaaaaaaaaaaaaaaaa" - }, - "transaction": { - "id": "945254c567a5417e" - }, - "user": { - "email": "foo@bar.com", - "id": "123user", - "name": "foo" - } - }, - { - "@metadata": { - "beat": "apm-test", - "pipeline": "apm", - "type": "_doc", - "version": "8.0.0" - }, - "@timestamp": "2017-05-30T18:53:27.154Z", - "agent": { - "name": "elastic-node", - "version": "3.14.0" - }, - "child": { - "id": [ - "4aaaaaaaaaaaaaaa" - ] - }, - "container": { - "id": "container-id" - }, - "ecs": { - "version": "1.5.0" - }, - "host": { - "architecture": "x64", - "ip": "127.0.0.1", - "os": { - "platform": "darwin" - } - }, - "kubernetes": { - "namespace": "namespace1", - "pod": { - "name": "pod-name", - "uid": "pod-uid" - } - }, - "observer": { - "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", - "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", - "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "parent": { - "id": "945254c567a5417e" - }, - "process": { - "args": [ - "node", - "server.js" - ], - "pid": 1234, - "ppid": 6789, - "title": "node" - }, - "processor": { - "event": "span", - "name": "transaction" - }, - "service": { - "environment": "staging", - "framework": { - "name": "Express", - "version": "1.2.3" - }, - "language": { - "name": "ecmascript", - "version": "8" - }, - "name": "1234_service-12a3", - "node": { - "name": "container-id" - }, - "runtime": { - "name": "node", - "version": "8.0.0" - }, - "version": "5.1.3" - }, - "span": { - "duration": { - "us": 13980 - }, - "id": "3aaaaaaaaaaaaaaa", - "name": "GET /api/types", - "start": { - "us": 0 - }, - "type": "request" - }, - "timestamp": { - "us": 1496170407154000 - }, - "trace": { - "id": "945254c567a5417eaaaaaaaaaaaaaaaa" - }, - "transaction": { - "id": "945254c567a5417e" - }, - "user": { - "email": "foo@bar.com", - "id": "123user", - "name": "foo" - } - }, - { - "@metadata": { - "beat": "apm-test", - "pipeline": "apm", - "type": "_doc", - "version": "8.0.0" - }, - "@timestamp": "2017-05-30T18:53:42.281Z", - "agent": { - "name": "elastic-node", - "version": "3.14.0" - }, - "container": { - "id": "container-id" - }, - "ecs": { - "version": "1.5.0" - }, - "host": { - "architecture": "x64", - "ip": "127.0.0.1", - "os": { - "platform": "darwin" - } - }, - "kubernetes": { - "namespace": "namespace1", - "pod": { - "name": "pod-name", - "uid": "pod-uid" - } - }, - "observer": { - "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", - "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", - "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "process": { - "args": [ - "node", - "server.js" - ], - "pid": 1234, - "ppid": 6789, - "title": "node" - }, - "processor": { - "event": "transaction", - "name": "transaction" - }, - "service": { - "environment": "staging", - "framework": { - "name": "Express", - "version": "1.2.3" - }, - "language": { - "name": "ecmascript", - "version": "8" - }, - "name": "1234_service-12a3", - "node": { - "name": "container-id" - }, - "runtime": { - "name": "node", - "version": "8.0.0" - }, - "version": "5.1.3" - }, - "timestamp": { - "us": 1496170422281000 - }, - "trace": { - "id": "85925e55b43f4340aaaaaaaaaaaaaaaa" - }, - "transaction": { - "duration": { - "us": 13980 - }, - "id": "85925e55b43f4340", - "name": "GET /api/types", - "result": "failure", - "sampled": true, - "span_count": { - "started": 0 - }, - "type": "request" - }, - "user": { - "email": "foo@bar.com", - "id": "123user", - "name": "foo" - } - }, - { - "@metadata": { - "beat": "apm-test", - "pipeline": "apm", - "type": "_doc", - "version": "8.0.0" - }, - "@timestamp": "2017-05-30T18:53:42.281Z", - "agent": { - "name": "elastic-node", - "version": "3.14.0" - }, - "container": { - "id": "container-id" - }, - "ecs": { - "version": "1.5.0" - }, - "host": { - "architecture": "x64", - "ip": "127.0.0.1", - "os": { - "platform": "darwin" - } - }, - "kubernetes": { - "namespace": "namespace1", - "pod": { - "name": "pod-name", - "uid": "pod-uid" - } - }, - "observer": { - "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", - "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", - "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "process": { - "args": [ - "node", - "server.js" - ], - "pid": 1234, - "ppid": 6789, - "title": "node" - }, - "processor": { - "event": "transaction", - "name": "transaction" - }, - "service": { - "environment": "staging", - "framework": { - "name": "Express", - "version": "1.2.3" - }, - "language": { - "name": "ecmascript", - "version": "8" - }, - "name": "1234_service-12a3", - "node": { - "name": "container-id" - }, - "runtime": { - "name": "node", - "version": "8.0.0" - }, - "version": "5.1.3" - }, - "timestamp": { - "us": 1496170422281999 - }, - "trace": { - "id": "85925e55b43f4342aaaaaaaaaaaaaaaa" - }, - "transaction": { - "duration": { - "us": 13980 - }, - "id": "85925e55b43f4342", - "name": "GET /api/types", - "result": "200", - "sampled": true, - "span_count": { - "dropped": 258, - "started": 1 - }, - "type": "request" - }, - "user": { - "email": "foo@bar.com", - "id": "123user", - "name": "foo" - } - }, - { - "@metadata": { - "beat": "apm-test", - "pipeline": "apm", - "type": "_doc", - "version": "8.0.0" - }, - "@timestamp": "2017-05-30T18:53:42.281Z", - "agent": { - "name": "js-base", - "version": "1.3" - }, - "container": { - "id": "container-id" - }, - "destination": { - "address": "0:0::0:1", - "ip": "0:0::0:1", - "port": 5432 - }, - "ecs": { - "version": "1.5.0" - }, - "host": { - "architecture": "x64", - "ip": "127.0.0.1", - "os": { - "platform": "darwin" - } - }, - "kubernetes": { - "namespace": "namespace1", - "pod": { - "name": "pod-name", - "uid": "pod-uid" - } - }, - "observer": { - "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", - "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", - "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "parent": { - "id": "85925e55b43f4342" - }, - "process": { - "args": [ - "node", - "server.js" - ], - "pid": 1234, - "ppid": 6789, - "title": "node" - }, - "processor": { - "event": "span", - "name": "transaction" - }, - "service": { - "environment": "staging", - "framework": { - "name": "Express", - "version": "1.2.3" - }, - "language": { - "name": "ecmascript", - "version": "8" - }, - "name": "serviceabc", - "node": { - "name": "container-id" - }, - "runtime": { - "name": "javascript", - "version": "8.0.0" - }, - "version": "5.1.3" - }, - "span": { - "action": "query.custom", - "db": { - "instance": "customers", - "statement": "SELECT * FROM product_types WHERE user_id=?", - "type": "sql", - "user": { - "name": "readonly_user" - } - }, - "destination": { - "service": { - "name": "postgresql", - "resource": "postgresql", - "type": "db" - } - }, - "duration": { - "us": 3781 - }, - "id": "15aaaaaaaaaaaaaa", - "name": "SELECT FROM product_types", - "start": { - "us": 2830 - }, - "subtype": "postgresql", - "type": "db.postgresql.query" - }, - "timestamp": { - "us": 1496170422281000 - }, - "trace": { - "id": "85925e55b43f4342aaaaaaaaaaaaaaaa" - }, - "transaction": { - "id": "85925e55b43f4342" - }, - "user": { - "email": "foo@bar.com", - "id": "123user", - "name": "foo" - } - } - ] -} diff --git a/beater/test_approved_es_documents/TestPublishIntegrationErrors.approved.json b/beater/test_approved_es_documents/TestPublishIntegrationErrors.approved.json index 330f7b7349f..f0698464cfc 100644 --- a/beater/test_approved_es_documents/TestPublishIntegrationErrors.approved.json +++ b/beater/test_approved_es_documents/TestPublishIntegrationErrors.approved.json @@ -28,13 +28,16 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "error": { "culprit": "my.module.function_name", @@ -167,7 +170,6 @@ "exclude_from_grouping": false, "filename": "/webpack/file/name.py", "function": "foo", - "library_frame": false, "line": { "column": 4, "context": "line3", @@ -212,10 +214,6 @@ } } ] - }, - "page": { - "referer": "http://localhost:8000/test/e2e/", - "url": "http://localhost:8000/test/e2e/general-usecase/" } }, "host": { @@ -229,9 +227,7 @@ }, "http": { "request": { - "body": { - "original": "Hello World" - }, + "body.original": "Hello World", "cookies": { "c1": "v1", "c2": "v2" @@ -259,12 +255,8 @@ "Mozilla Chrome Edge" ] }, - "method": "post", - "referrer": "http://localhost:8000/test/e2e/", - "socket": { - "encrypted": true, - "remote_address": "12.53.12.1" - } + "method": "POST", + "referrer": "http://localhost:8000/test/e2e/" }, "response": { "finished": true, @@ -291,13 +283,13 @@ "labels": { "organization_uuid": "9f0e9d64-c185-4d21-a6f4-4673ed561ec8" }, + "message": "My service could not talk to the database named foobar", "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "process": { "args": [ @@ -349,6 +341,7 @@ "scheme": "https" }, "user": { + "domain": "ldap://abc", "id": "99", "name": "foo" }, @@ -381,13 +374,16 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "error": { "grouping_key": "dc8dd667f7036ec5f0bae87bf2188243", @@ -421,13 +417,13 @@ "uid": "pod-uid" } }, + "message": "no user found", "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "process": { "args": [ @@ -466,6 +462,7 @@ "us": 1533826745999000 }, "user": { + "domain": "ldap://abc", "email": "bar@example.com", "id": "123", "name": "bar" @@ -496,13 +493,16 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "error": { "exception": [ @@ -532,13 +532,13 @@ "uid": "pod-uid" } }, + "message": "Cannot read property 'baz' no defined", "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "process": { "args": [ @@ -577,6 +577,7 @@ "us": 1533826745999000 }, "user": { + "domain": "ldap://abc", "email": "bar@example.com", "id": "123", "name": "bar" @@ -607,13 +608,16 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "error": { "exception": [ @@ -645,11 +649,10 @@ }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "parent": { "id": "9632587410abcdef" @@ -694,6 +697,7 @@ "id": "0123456789abcdeffedcba0123456789" }, "user": { + "domain": "ldap://abc", "email": "bar@example.com", "id": "123", "name": "bar" @@ -724,13 +728,16 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "error": { "grouping_key": "d6b3f958dfea98dc9ed2b57d5f0c48bb", @@ -759,13 +766,13 @@ "uid": "pod-uid" } }, + "message": "Cannot read property 'baz' of undefined", "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "parent": { "id": "9632587410abcdef" @@ -815,6 +822,7 @@ "type": "request" }, "user": { + "domain": "ldap://abc", "email": "bar@example.com", "id": "123", "name": "bar" diff --git a/beater/test_approved_es_documents/TestPublishIntegrationEvents.approved.json b/beater/test_approved_es_documents/TestPublishIntegrationEvents.approved.json index 75e5ddc9701..f7f5e84d8dc 100644 --- a/beater/test_approved_es_documents/TestPublishIntegrationEvents.approved.json +++ b/beater/test_approved_es_documents/TestPublishIntegrationEvents.approved.json @@ -5,19 +5,142 @@ "agent": { "ephemeral_id": "e71be9ac-93b0-44b9-a997-5638f6ccfc36", "name": "java", - "version": "1.10.0-SNAPSHOT" + "version": "1.10.0" }, "client": { - "ip": "12.53.12.1" + "ip": "192.168.0.1" }, "container": { "id": "8ec7ceb990749e79b37f6dc6cd3628633618d6ce412553a552a0fa6b69419ad4" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, - "event": { - "outcome": "success" + "error": { + "culprit": "opbeans.controllers.DTInterceptor.preHandle(DTInterceptor.java:73)", + "custom": { + "and_objects": { + "foo": [ + "bar", + "baz" + ] + }, + "my_key": 1, + "some_other_value": "foobar" + }, + "exception": [ + { + "attributes": { + "foo": "bar" + }, + "code": "42", + "handled": false, + "message": "Theusernamerootisunknown", + "module": "org.springframework.http.client", + "stacktrace": [ + { + "abs_path": "/tmp/AbstractPlainSocketImpl.java", + "context": { + "post": [ + "line4", + "line5" + ], + "pre": [ + "line1", + "line2" + ] + }, + "exclude_from_grouping": false, + "filename": "AbstractPlainSocketImpl.java", + "function": "connect", + "library_frame": true, + "line": { + "column": 4, + "context": "3", + "number": 3 + }, + "module": "java.net", + "vars": { + "key": "value" + } + }, + { + "exclude_from_grouping": false, + "filename": "AbstractClientHttpRequest.java", + "function": "execute", + "line": { + "number": 102 + }, + "vars": { + "key": "value" + } + } + ], + "type": "java.net.UnknownHostException" + }, + { + "message": "something wrong writing a file", + "type": "InternalDbError" + }, + { + "message": "disk spinning way too fast", + "type": "VeryInternalDbError" + }, + { + "message": "on top of it,internet doesn't work", + "parent": 1, + "type": "ConnectionError" + } + ], + "grouping_key": "9a4054e958afe722b5877e8fac578ff3", + "id": "9876543210abcdeffedcba0123456789", + "log": { + "level": "error", + "logger_name": "http404", + "message": "Request method 'POST' not supported", + "param_message": "Request method 'POST' /events/:event not supported", + "stacktrace": [ + { + "abs_path": "/tmp/Socket.java", + "classname": "Request::Socket", + "context": { + "post": [ + "line4", + "line5" + ], + "pre": [ + "line1", + "line2" + ] + }, + "exclude_from_grouping": false, + "filename": "Socket.java", + "function": "connect", + "library_frame": true, + "line": { + "column": 4, + "context": "line3", + "number": 3 + }, + "module": "java.net", + "vars": { + "key": "value" + } + }, + { + "abs_path": "/tmp/SimpleBufferingClientHttpRequest.java", + "exclude_from_grouping": false, + "filename": "SimpleBufferingClientHttpRequest.java", + "function": "executeInternal", + "line": { + "number": 102 + }, + "vars": { + "key": "value" + } + } + ] + } }, "host": { "architecture": "amd64", @@ -30,15 +153,7 @@ }, "http": { "request": { - "body": { - "original": { - "additional": { - "bar": 123, - "req": "additionalinformation" - }, - "string": "helloworld" - } - }, + "body.original": "HelloWorld", "cookies": { "c1": "v1", "c2": "v2" @@ -48,29 +163,26 @@ "SERVER_SOFTWARE": "nginx" }, "headers": { - "Content-Type": [ - "text/html" + "Content-Length": [ + "0" ], "Cookie": [ - "c1=v1,c2=v2" + "c1=v1", + "c2=v2" ], "Elastic-Apm-Traceparent": [ - "00-33a0bd4cceff0370a7c57d807032688e-69feaabc5b88d7e8-01" + "00-8c21b4b556467a0b17ae5da959b5f388-31301f1fb2998121-01" ], - "User-Agent": [ - "Mozilla/5.0(Macintosh;IntelMacOSX10_10_5)AppleWebKit/537.36(KHTML,likeGecko)Chrome/51.0.2704.103Safari/537.36", - "MozillaChromeEdge" + "Forwarded": [ + "for=192.168.0.1" + ], + "Host": [ + "opbeans-java:3000" ] }, - "method": "post", - "socket": { - "encrypted": true, - "remote_address": "12.53.12.1:8080" - } + "method": "POST" }, "response": { - "decoded_body_size": 401.9, - "encoded_body_size": 356.9, "finished": true, "headers": { "Content-Type": [ @@ -78,8 +190,7 @@ ] }, "headers_sent": true, - "status_code": 200, - "transfer_size": 300 + "status_code": 200 }, "version": "1.1" }, @@ -97,19 +208,18 @@ "ab_testing": true, "group": "experimental", "organization_uuid": "9f0e9d64-c185-4d21-a6f4-4673ed561ec8", - "segment": 5, - "wrapped_reporter": true + "segment": 5 }, + "message": "Request method 'POST' not supported", "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "parent": { - "id": "abcdefabcdef01234567" + "id": "9632587410abcdef" }, "process": { "args": [ @@ -120,22 +230,22 @@ "title": "/usr/lib/jvm/java-10-openjdk-amd64/bin/java" }, "processor": { - "event": "transaction", - "name": "transaction" + "event": "error", + "name": "error" }, "service": { "environment": "production", "framework": { - "name": "spring", - "version": "5.0.0" + "name": "Node", + "version": "1" }, "language": { "name": "Java", - "version": "10.0.2" + "version": "1.2" }, - "name": "experimental-java", + "name": "service1", "node": { - "name": "8ec7ceb990749e79b37f6dc6cd3628633618d6ce412553a552a0fa6b69419ad4" + "name": "node-xyz" }, "runtime": { "name": "Java", @@ -150,32 +260,12 @@ "us": 1571657444929001 }, "trace": { - "id": "0acd456789abcdef0123456789abcdef" + "id": "0123456789abcdeffedcba0123456789" }, "transaction": { - "custom": { - "(": "notavalidregexandthatisfine", - "and_objects": { - "foo": [ - "bar", - "baz" - ] - }, - "my_key": 1, - "some_other_value": "foobar" - }, - "duration": { - "us": 32592 - }, - "id": "4340a8e0df1906ecbfa9", - "name": "ResourceHttpRequestHandler", - "result": "HTTP2xx", + "id": "1234567890987654", "sampled": true, - "span_count": { - "dropped": 0, - "started": 17 - }, - "type": "http" + "type": "request" }, "url": { "domain": "www.example.com", @@ -188,12 +278,9 @@ "scheme": "https" }, "user": { - "email": "foo@mail.com", + "email": "user@foo.mail", "id": "99", "name": "foo" - }, - "user_agent": { - "original": "Mozilla/5.0(Macintosh;IntelMacOSX10_10_5)AppleWebKit/537.36(KHTML,likeGecko)Chrome/51.0.2704.103Safari/537.36, MozillaChromeEdge" } }, { @@ -207,7 +294,7 @@ "id": "8ec7ceb990749e79b37f6dc6cd3628633618d6ce412553a552a0fa6b69419ad4" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "outcome": "success" @@ -221,6 +308,22 @@ "platform": "Linux" } }, + "http": { + "request": { + "method": "GET" + }, + "response": { + "decoded_body_size": 401, + "encoded_body_size": 356, + "headers": { + "Content-Type": [ + "application/json" + ] + }, + "status_code": 302, + "transfer_size": 300.12 + } + }, "kubernetes": { "namespace": "default", "node": { @@ -238,11 +341,10 @@ }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "parent": { "id": "abcdef0123456789" @@ -294,7 +396,7 @@ "us": 3781 }, "http": { - "method": "get", + "method": "GET", "response": { "decoded_body_size": 401, "encoded_body_size": 356, @@ -303,13 +405,11 @@ "application/json" ] }, - "status_code": 200, + "status_code": 302, "transfer_size": 300.12 - }, - "url": { - "original": "http://localhost:8000" } }, + "http.url.original": "http://localhost:8000", "id": "1234567890aaaade", "name": "GET users-authenticated", "stacktrace": [ @@ -349,6 +449,9 @@ }, "transaction": { "id": "1234567890987654" + }, + "url": { + "original": "http://localhost:8000" } }, { @@ -356,22 +459,20 @@ "agent": { "ephemeral_id": "e71be9ac-93b0-44b9-a997-5638f6ccfc36", "name": "java", - "version": "1.10.0" + "version": "1.10.0-SNAPSHOT" + }, + "client": { + "ip": "12.53.12.1" }, - "byte_counter": 1, "container": { "id": "8ec7ceb990749e79b37f6dc6cd3628633618d6ce412553a552a0fa6b69419ad4" }, - "dotted": { - "float": { - "gauge": 6.12 - } - }, - "double_gauge": 3.141592653589793, "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "event": { + "outcome": "success" }, - "float_gauge": 9.16, "host": { "architecture": "amd64", "hostname": "node-name", @@ -381,45 +482,81 @@ "platform": "Linux" } }, - "integer_gauge": 42767, - "kubernetes": { - "namespace": "default", - "node": { - "name": "node-name" - }, - "pod": { - "name": "instrumented-java-service", - "uid": "b17f231da0ad128dc6c6c0b2e82f6f303d3893e3" - } - }, - "labels": { - "ab_testing": true, - "code": 200, - "group": "experimental", - "segment": 5, - "success": true - }, - "long_gauge": 3147483648, - "negative": { - "d": { - "o": { - "t": { - "t": { - "e": { - "d": -1022 - } - } - } - } - } - }, - "observer": { - "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", + "http": { + "request": { + "body.original": { + "additional": { + "bar": 123, + "req": "additionalinformation" + }, + "string": "helloworld" + }, + "cookies": { + "c1": "v1", + "c2": "v2" + }, + "env": { + "GATEWAY_INTERFACE": "CGI/1.1", + "SERVER_SOFTWARE": "nginx" + }, + "headers": { + "Content-Type": [ + "text/html" + ], + "Cookie": [ + "c1=v1,c2=v2" + ], + "Elastic-Apm-Traceparent": [ + "00-33a0bd4cceff0370a7c57d807032688e-69feaabc5b88d7e8-01" + ], + "User-Agent": [ + "Mozilla/5.0(Macintosh;IntelMacOSX10_10_5)AppleWebKit/537.36(KHTML,likeGecko)Chrome/51.0.2704.103Safari/537.36", + "MozillaChromeEdge" + ] + }, + "method": "POST" + }, + "response": { + "decoded_body_size": 401.9, + "encoded_body_size": 356.9, + "finished": true, + "headers": { + "Content-Type": [ + "application/json" + ] + }, + "headers_sent": true, + "status_code": 200, + "transfer_size": 300 + }, + "version": "1.1" + }, + "kubernetes": { + "namespace": "default", + "node": { + "name": "node-name" + }, + "pod": { + "name": "instrumented-java-service", + "uid": "b17f231da0ad128dc6c6c0b2e82f6f303d3893e3" + } + }, + "labels": { + "ab_testing": true, + "group": "experimental", + "organization_uuid": "9f0e9d64-c185-4d21-a6f4-4673ed561ec8", + "segment": 5, + "wrapped_reporter": true + }, + "observer": { + "ephemeral_id": "00000000-0000-0000-0000-000000000000", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 + }, + "parent": { + "id": "abcdefabcdef01234567" }, "process": { "args": [ @@ -430,8 +567,8 @@ "title": "/usr/lib/jvm/java-10-openjdk-amd64/bin/java" }, "processor": { - "event": "metric", - "name": "metric" + "event": "transaction", + "name": "transaction" }, "service": { "environment": "production", @@ -443,7 +580,7 @@ "name": "Java", "version": "10.0.2" }, - "name": "1234_service-12a3", + "name": "experimental-java", "node": { "name": "8ec7ceb990749e79b37f6dc6cd3628633618d6ce412553a552a0fa6b69419ad4" }, @@ -453,35 +590,57 @@ }, "version": "4.3.0" }, - "short_counter": 227, - "span": { - "self_time": { - "count": 1, - "sum": { - "us": 633.288 - } - }, - "subtype": "mysql", - "type": "db" + "source": { + "ip": "12.53.12.1" + }, + "timestamp": { + "us": 1571657444929001 + }, + "trace": { + "id": "0acd456789abcdef0123456789abcdef" }, "transaction": { - "breakdown": { - "count": 12 + "custom": { + "(": "notavalidregexandthatisfine", + "and_objects": { + "foo": [ + "bar", + "baz" + ] + }, + "my_key": 1, + "some_other_value": "foobar" }, "duration": { - "count": 2, - "sum": { - "us": 12 - } + "us": 32592 }, - "name": "GET/", - "self_time": { - "count": 2, - "sum": { - "us": 10 - } + "id": "4340a8e0df1906ecbfa9", + "name": "ResourceHttpRequestHandler", + "result": "HTTP2xx", + "sampled": true, + "span_count": { + "dropped": 0, + "started": 17 }, - "type": "request" + "type": "http" + }, + "url": { + "domain": "www.example.com", + "fragment": "#hash", + "full": "https://www.example.com/p/a/t/h?query=string#hash", + "original": "/p/a/t/h?query=string#hash", + "path": "/p/a/t/h", + "port": 8080, + "query": "?query=string", + "scheme": "https" + }, + "user": { + "email": "foo@mail.com", + "id": "99", + "name": "foo" + }, + "user_agent": { + "original": "Mozilla/5.0(Macintosh;IntelMacOSX10_10_5)AppleWebKit/537.36(KHTML,likeGecko)Chrome/51.0.2704.103Safari/537.36, MozillaChromeEdge" } }, { @@ -491,140 +650,11 @@ "name": "java", "version": "1.10.0" }, - "client": { - "ip": "192.168.0.1" - }, "container": { "id": "8ec7ceb990749e79b37f6dc6cd3628633618d6ce412553a552a0fa6b69419ad4" }, "ecs": { - "version": "1.5.0" - }, - "error": { - "culprit": "opbeans.controllers.DTInterceptor.preHandle(DTInterceptor.java:73)", - "custom": { - "and_objects": { - "foo": [ - "bar", - "baz" - ] - }, - "my_key": 1, - "some_other_value": "foobar" - }, - "exception": [ - { - "attributes": { - "foo": "bar" - }, - "code": "42", - "handled": false, - "message": "Theusernamerootisunknown", - "module": "org.springframework.http.client", - "stacktrace": [ - { - "abs_path": "/tmp/AbstractPlainSocketImpl.java", - "context": { - "post": [ - "line4", - "line5" - ], - "pre": [ - "line1", - "line2" - ] - }, - "exclude_from_grouping": false, - "filename": "AbstractPlainSocketImpl.java", - "function": "connect", - "library_frame": true, - "line": { - "column": 4, - "context": "3", - "number": 3 - }, - "module": "java.net", - "vars": { - "key": "value" - } - }, - { - "exclude_from_grouping": false, - "filename": "AbstractClientHttpRequest.java", - "function": "execute", - "line": { - "number": 102 - }, - "vars": { - "key": "value" - } - } - ], - "type": "java.net.UnknownHostException" - }, - { - "message": "something wrong writing a file", - "type": "InternalDbError" - }, - { - "message": "disk spinning way too fast", - "type": "VeryInternalDbError" - }, - { - "message": "on top of it,internet doesn't work", - "parent": 1, - "type": "ConnectionError" - } - ], - "grouping_key": "9a4054e958afe722b5877e8fac578ff3", - "id": "9876543210abcdeffedcba0123456789", - "log": { - "level": "error", - "logger_name": "http404", - "message": "Request method 'POST' not supported", - "param_message": "Request method 'POST' /events/:event not supported", - "stacktrace": [ - { - "abs_path": "/tmp/Socket.java", - "classname": "Request::Socket", - "context": { - "post": [ - "line4", - "line5" - ], - "pre": [ - "line1", - "line2" - ] - }, - "exclude_from_grouping": false, - "filename": "Socket.java", - "function": "connect", - "library_frame": true, - "line": { - "column": 4, - "context": "line3", - "number": 3 - }, - "module": "java.net", - "vars": { - "key": "value" - } - }, - { - "abs_path": "/tmp/SimpleBufferingClientHttpRequest.java", - "exclude_from_grouping": false, - "filename": "SimpleBufferingClientHttpRequest.java", - "function": "executeInternal", - "line": { - "number": 102 - }, - "vars": { - "key": "value" - } - } - ] - } + "version": "1.11.0" }, "host": { "architecture": "amd64", @@ -635,55 +665,6 @@ "platform": "Linux" } }, - "http": { - "request": { - "body": { - "original": "HelloWorld" - }, - "cookies": { - "c1": "v1", - "c2": "v2" - }, - "env": { - "GATEWAY_INTERFACE": "CGI/1.1", - "SERVER_SOFTWARE": "nginx" - }, - "headers": { - "Content-Length": [ - "0" - ], - "Cookie": [ - "c1=v1", - "c2=v2" - ], - "Elastic-Apm-Traceparent": [ - "00-8c21b4b556467a0b17ae5da959b5f388-31301f1fb2998121-01" - ], - "Forwarded": [ - "for=192.168.0.1" - ], - "Host": [ - "opbeans-java:3000" - ] - }, - "method": "post", - "socket": { - "encrypted": true, - "remote_address": "12.53.12.1" - } - }, - "response": { - "finished": true, - "headers": { - "Content-Type": [ - "application/json" - ] - }, - "headers_sent": true, - "status_code": 200 - }, - "version": "1.1" - }, "kubernetes": { "namespace": "default", "node": { @@ -696,20 +677,18 @@ }, "labels": { "ab_testing": true, + "code": 200, "group": "experimental", - "organization_uuid": "9f0e9d64-c185-4d21-a6f4-4673ed561ec8", - "segment": 5 + "segment": 5, + "success": true }, + "metricset.name": "span_breakdown", "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "parent": { - "id": "9632587410abcdef" + "version": "1.2.3", + "version_major": 1 }, "process": { "args": [ @@ -720,22 +699,22 @@ "title": "/usr/lib/jvm/java-10-openjdk-amd64/bin/java" }, "processor": { - "event": "error", - "name": "error" + "event": "metric", + "name": "metric" }, "service": { "environment": "production", "framework": { - "name": "Node", - "version": "1" + "name": "spring", + "version": "5.0.0" }, "language": { "name": "Java", - "version": "1.2" + "version": "10.0.2" }, - "name": "service1", + "name": "1234_service-12a3", "node": { - "name": "node-xyz" + "name": "8ec7ceb990749e79b37f6dc6cd3628633618d6ce412553a552a0fa6b69419ad4" }, "runtime": { "name": "Java", @@ -743,34 +722,22 @@ }, "version": "4.3.0" }, - "source": { - "ip": "192.168.0.1" - }, - "timestamp": { - "us": 1571657444929001 - }, - "trace": { - "id": "0123456789abcdeffedcba0123456789" + "span": { + "self_time": { + "count": 1, + "sum.us": 633 + }, + "subtype": "mysql", + "type": "db" }, "transaction": { - "id": "1234567890987654", - "sampled": true, + "breakdown.count": 12, + "duration": { + "count": 2, + "sum.us": 12 + }, + "name": "GET/", "type": "request" - }, - "url": { - "domain": "www.example.com", - "fragment": "#hash", - "full": "https://www.example.com/p/a/t/h?query=string#hash", - "original": "/p/a/t/h?query=string#hash", - "path": "/p/a/t/h", - "port": 8080, - "query": "?query=string", - "scheme": "https" - }, - "user": { - "email": "user@foo.mail", - "id": "99", - "name": "foo" } } ] diff --git a/beater/test_approved_es_documents/TestPublishIntegrationMetricsets.approved.json b/beater/test_approved_es_documents/TestPublishIntegrationMetricsets.approved.json index f84e77473f7..a17783bbbdf 100644 --- a/beater/test_approved_es_documents/TestPublishIntegrationMetricsets.approved.json +++ b/beater/test_approved_es_documents/TestPublishIntegrationMetricsets.approved.json @@ -6,21 +6,12 @@ "name": "elastic-node", "version": "3.14.0" }, - "byte_counter": 1, - "dotted": { - "float": { - "gauge": 6.12 - } - }, - "double_gauge": 3.141592653589793, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, - "float_gauge": 9.16, "host": { "ip": "127.0.0.1" }, - "integer_gauge": 42767, "labels": { "code": 200, "some": "abc", @@ -28,27 +19,13 @@ "tag1": "one", "tag2": 2 }, - "long_gauge": 3147483648, - "negative": { - "d": { - "o": { - "t": { - "t": { - "e": { - "d": -1022 - } - } - } - } - } - }, + "metricset.name": "span_breakdown", "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "process": { "pid": 1234 @@ -66,34 +43,21 @@ "name": "node-1" } }, - "short_counter": 227, "span": { "self_time": { "count": 1, - "sum": { - "us": 633.288 - } + "sum.us": 633 }, "subtype": "mysql", "type": "db" }, "transaction": { - "breakdown": { - "count": 12 - }, + "breakdown.count": 12, "duration": { "count": 2, - "sum": { - "us": 12 - } + "sum.us": 12 }, "name": "GET /", - "self_time": { - "count": 2, - "sum": { - "us": 10 - } - }, "type": "request" }, "user": { @@ -109,17 +73,55 @@ "version": "3.14.0" }, "ecs": { - "version": "1.5.0" - }, - "go": { - "memstats": { - "heap": { - "sys": { - "bytes": 6520832 - } - } + "version": "1.11.0" + }, + "go.memstats.heap.sys.bytes": 6520832, + "host": { + "ip": "127.0.0.1" + }, + "labels": { + "tag1": "one", + "tag2": 2 + }, + "metricset.name": "app", + "observer": { + "ephemeral_id": "00000000-0000-0000-0000-000000000000", + "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", + "type": "test-apm-server", + "version": "1.2.3", + "version_major": 1 + }, + "process": { + "pid": 1234 + }, + "processor": { + "event": "metric", + "name": "metric" + }, + "service": { + "language": { + "name": "ecmascript" + }, + "name": "1234_service-12a3", + "node": { + "name": "node-1" } }, + "user": { + "email": "user@mail.com", + "id": "axb123hg", + "name": "logged-in-user" + } + }, + { + "@timestamp": "2017-05-30T18:53:41.366Z", + "agent": { + "name": "elastic-node", + "version": "3.14.0" + }, + "ecs": { + "version": "1.11.0" + }, "host": { "ip": "127.0.0.1" }, @@ -127,13 +129,13 @@ "tag1": "one", "tag2": 2 }, + "metricset.name": "app", "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "process": { "pid": 1234 @@ -151,6 +153,8 @@ "name": "node-1" } }, + "system.process.cgroup.memory.mem.limit.bytes": 2048, + "system.process.cgroup.memory.mem.usage.bytes": 1024, "user": { "email": "user@mail.com", "id": "axb123hg", @@ -164,7 +168,7 @@ "version": "3.14.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" @@ -173,13 +177,13 @@ "tag1": "one", "tag2": 2 }, + "metricset.name": "app", "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "process": { "pid": 1234 @@ -197,25 +201,76 @@ "name": "node-1" } }, - "system": { - "process": { - "cgroup": { - "memory": { - "mem": { - "limit": { - "bytes": 2048 - }, - "usage": { - "bytes": 1024 - } - }, - "stats": { - "inactive_file": { - "bytes": 48 - } - } - } - } + "system.process.cgroup.cpu.cfs.period.us": 1024, + "system.process.cgroup.cpu.cfs.quota.us": 2048, + "system.process.cgroup.cpu.id": 2048, + "system.process.cgroup.cpu.stats.periods": 2048, + "system.process.cgroup.cpu.stats.throttled.ns": 2048, + "system.process.cgroup.cpu.stats.throttled.periods": 2048, + "system.process.cgroup.cpuacct.id": 2048, + "system.process.cgroup.cpuacct.total.ns": 2048, + "user": { + "email": "user@mail.com", + "id": "axb123hg", + "name": "logged-in-user" + } + }, + { + "@timestamp": "2017-05-30T18:53:41.366Z", + "_metric_descriptions": { + "latency_distribution": { + "type": "histogram", + "unit": "s" + } + }, + "agent": { + "name": "elastic-node", + "version": "3.14.0" + }, + "ecs": { + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" + }, + "labels": { + "tag1": "one", + "tag2": 2 + }, + "latency_distribution": { + "counts": [ + 1, + 2, + 3 + ], + "values": [ + 1.1, + 2.2, + 3.3 + ] + }, + "metricset.name": "app", + "observer": { + "ephemeral_id": "00000000-0000-0000-0000-000000000000", + "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", + "type": "test-apm-server", + "version": "1.2.3", + "version_major": 1 + }, + "process": { + "pid": 1234 + }, + "processor": { + "event": "metric", + "name": "metric" + }, + "service": { + "language": { + "name": "ecmascript" + }, + "name": "1234_service-12a3", + "node": { + "name": "node-1" } }, "user": { diff --git a/beater/test_approved_es_documents/TestPublishIntegrationMinimalEvents.approved.json b/beater/test_approved_es_documents/TestPublishIntegrationMinimalEvents.approved.json index cb9970dd59c..a29f07a735b 100644 --- a/beater/test_approved_es_documents/TestPublishIntegrationMinimalEvents.approved.json +++ b/beater/test_approved_es_documents/TestPublishIntegrationMinimalEvents.approved.json @@ -7,48 +7,114 @@ "version": "3.14.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, - "event": { - "outcome": "unknown" + "error": { + "grouping_key": "0b9cba09845a097a271c6beb4c6207f3", + "id": "abcdef0123456789", + "log": { + "message": "error log message" + } }, "host": { "ip": "127.0.0.1" }, - "labels": { - "wrapped_reporter": true + "message": "error log message", + "observer": { + "ephemeral_id": "00000000-0000-0000-0000-000000000000", + "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", + "type": "test-apm-server", + "version": "1.2.3", + "version_major": 1 + }, + "processor": { + "event": "error", + "name": "error" + }, + "service": { + "name": "1234_service-12a3" + }, + "timestamp": { + "us": 1547070053000000 + } + }, + { + "@timestamp": "2019-01-09T21:40:53.000Z", + "agent": { + "name": "elastic-node", + "version": "3.14.0" }, + "ecs": { + "version": "1.11.0" + }, + "error": { + "exception": [ + { + "message": "error exception message" + } + ], + "grouping_key": "3a1fb5609458fbb132b44d8fc7cde104", + "id": "abcdef0123456790" + }, + "host": { + "ip": "127.0.0.1" + }, + "message": "error exception message", "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { - "event": "transaction", - "name": "transaction" + "event": "error", + "name": "error" }, "service": { "name": "1234_service-12a3" }, "timestamp": { "us": 1547070053000000 + } + }, + { + "@timestamp": "2019-01-09T21:40:53.000Z", + "agent": { + "name": "elastic-node", + "version": "3.14.0" }, - "trace": { - "id": "01234567890123456789abcdefabcdef" + "ecs": { + "version": "1.11.0" }, - "transaction": { - "duration": { - "us": 32592 - }, - "id": "abcdef1478523690", - "sampled": true, - "span_count": { - "started": 0 - }, - "type": "request" + "error": { + "exception": [ + { + "type": "error exception type" + } + ], + "grouping_key": "fa405fa2bd848dab17207e7b544d9ad4", + "id": "abcdef0123456791" + }, + "host": { + "ip": "127.0.0.1" + }, + "observer": { + "ephemeral_id": "00000000-0000-0000-0000-000000000000", + "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", + "type": "test-apm-server", + "version": "1.2.3", + "version_major": 1 + }, + "processor": { + "event": "error", + "name": "error" + }, + "service": { + "name": "1234_service-12a3" + }, + "timestamp": { + "us": 1547070053000000 } }, { @@ -58,7 +124,7 @@ "version": "3.14.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "outcome": "unknown" @@ -68,11 +134,10 @@ }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "parent": { "id": "ab23456a89012345" @@ -109,7 +174,7 @@ "version": "3.14.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "outcome": "unknown" @@ -119,11 +184,10 @@ }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "parent": { "id": "ab23456a89012345" @@ -150,34 +214,6 @@ "id": "0123456789abcdef0123456789abcdef" } }, - { - "@timestamp": "2017-05-30T18:53:42.281Z", - "agent": { - "name": "elastic-node", - "version": "3.14.0" - }, - "ecs": { - "version": "1.5.0" - }, - "host": { - "ip": "127.0.0.1" - }, - "observer": { - "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", - "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", - "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "processor": { - "event": "metric", - "name": "metric" - }, - "service": { - "name": "1234_service-12a3" - } - }, { "@timestamp": "2019-01-09T21:40:53.000Z", "agent": { @@ -185,115 +221,76 @@ "version": "3.14.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, - "error": { - "grouping_key": "0b9cba09845a097a271c6beb4c6207f3", - "id": "abcdef0123456789", - "log": { - "message": "error log message" - } + "event": { + "outcome": "unknown" }, "host": { "ip": "127.0.0.1" }, + "labels": { + "wrapped_reporter": true + }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { - "event": "error", - "name": "error" + "event": "transaction", + "name": "transaction" }, "service": { "name": "1234_service-12a3" }, "timestamp": { "us": 1547070053000000 - } - }, - { - "@timestamp": "2019-01-09T21:40:53.000Z", - "agent": { - "name": "elastic-node", - "version": "3.14.0" - }, - "ecs": { - "version": "1.5.0" }, - "error": { - "exception": [ - { - "message": "error exception message" - } - ], - "grouping_key": "3a1fb5609458fbb132b44d8fc7cde104", - "id": "abcdef0123456790" - }, - "host": { - "ip": "127.0.0.1" - }, - "observer": { - "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", - "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", - "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "processor": { - "event": "error", - "name": "error" - }, - "service": { - "name": "1234_service-12a3" + "trace": { + "id": "01234567890123456789abcdefabcdef" }, - "timestamp": { - "us": 1547070053000000 + "transaction": { + "duration": { + "us": 32592 + }, + "id": "abcdef1478523690", + "sampled": true, + "span_count": { + "started": 0 + }, + "type": "request" } }, { - "@timestamp": "2019-01-09T21:40:53.000Z", + "@timestamp": "2017-05-30T18:53:42.281Z", + "a": 3.2, "agent": { "name": "elastic-node", "version": "3.14.0" }, "ecs": { - "version": "1.5.0" - }, - "error": { - "exception": [ - { - "type": "error exception type" - } - ], - "grouping_key": "fa405fa2bd848dab17207e7b544d9ad4", - "id": "abcdef0123456791" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, + "metricset.name": "app", "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { - "event": "error", - "name": "error" + "event": "metric", + "name": "metric" }, "service": { "name": "1234_service-12a3" - }, - "timestamp": { - "us": 1547070053000000 } } ] diff --git a/beater/test_approved_es_documents/TestPublishIntegrationProfileCPUProfile.approved.json b/beater/test_approved_es_documents/TestPublishIntegrationProfileCPUProfile.approved.json index 5e0cbf122da..ac2e87d166d 100644 --- a/beater/test_approved_es_documents/TestPublishIntegrationProfileCPUProfile.approved.json +++ b/beater/test_approved_es_documents/TestPublishIntegrationProfileCPUProfile.approved.json @@ -3,15 +3,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -137,15 +139,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -217,15 +221,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -369,15 +375,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -449,15 +457,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -511,15 +521,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -609,15 +621,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -701,15 +715,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -787,15 +803,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -891,15 +909,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1013,15 +1033,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1069,15 +1091,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1137,15 +1161,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1241,15 +1267,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1375,15 +1403,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1437,15 +1467,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1553,15 +1585,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1657,15 +1691,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1749,15 +1785,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1841,15 +1879,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1963,15 +2003,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2019,15 +2061,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2105,15 +2149,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2167,15 +2213,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2235,15 +2283,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2363,15 +2413,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2443,15 +2495,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2499,15 +2553,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2543,15 +2599,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2623,15 +2681,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2745,15 +2805,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2855,15 +2917,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2965,15 +3029,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3039,15 +3105,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3155,15 +3223,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3217,15 +3287,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3297,15 +3369,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3365,15 +3439,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3487,15 +3563,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3657,15 +3735,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3737,15 +3817,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3835,15 +3917,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3999,15 +4083,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -4055,15 +4141,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -4183,15 +4271,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -4299,15 +4389,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -4361,15 +4453,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -4447,15 +4541,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -4497,15 +4593,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -4571,15 +4669,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -4627,15 +4727,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -4755,15 +4857,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -4811,15 +4915,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -4855,15 +4961,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -4935,15 +5043,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -5045,15 +5155,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -5137,15 +5249,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -5211,15 +5325,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -5315,15 +5431,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -5419,15 +5537,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -5499,15 +5619,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -5621,15 +5743,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -5707,15 +5831,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -5799,15 +5925,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -5885,15 +6013,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -5959,15 +6089,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -6015,15 +6147,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -6125,15 +6259,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -6259,15 +6395,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -6405,15 +6543,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -6491,15 +6631,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -6547,15 +6689,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -6639,15 +6783,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -6791,15 +6937,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -6925,15 +7073,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -7047,15 +7197,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -7109,15 +7261,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -7177,15 +7331,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -7281,15 +7437,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -7319,15 +7477,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -7369,15 +7529,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -7437,15 +7599,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -7511,15 +7675,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -7585,15 +7751,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -7671,15 +7839,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -7781,15 +7951,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -7849,15 +8021,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -7923,15 +8097,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -8039,15 +8215,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -8107,15 +8285,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -8217,15 +8397,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -8309,15 +8491,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -8401,15 +8585,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -8451,15 +8637,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -8555,15 +8743,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -8629,15 +8819,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -8679,15 +8871,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -8813,15 +9007,17 @@ { "@timestamp": "2019-11-22T10:30:36.305Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", diff --git a/beater/test_approved_es_documents/TestPublishIntegrationProfileCPUProfileMetadata.approved.json b/beater/test_approved_es_documents/TestPublishIntegrationProfileCPUProfileMetadata.approved.json index 6c0608a5794..93ac3029edf 100644 --- a/beater/test_approved_es_documents/TestPublishIntegrationProfileCPUProfileMetadata.approved.json +++ b/beater/test_approved_es_documents/TestPublishIntegrationProfileCPUProfileMetadata.approved.json @@ -7,18 +7,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -151,18 +150,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -241,18 +239,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -403,18 +400,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -493,18 +489,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -565,18 +560,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -673,18 +667,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -775,18 +768,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -871,18 +863,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -985,18 +976,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1117,18 +1107,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1183,18 +1172,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1261,18 +1249,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1375,18 +1362,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1519,18 +1505,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1591,18 +1576,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1717,18 +1701,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1831,18 +1814,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1933,18 +1915,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2035,18 +2016,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2167,18 +2147,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2233,18 +2212,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2329,18 +2307,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2401,18 +2378,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2479,18 +2455,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2617,18 +2592,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2707,18 +2681,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2773,18 +2746,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2827,18 +2799,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2917,18 +2888,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3049,18 +3019,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3169,18 +3138,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3289,18 +3257,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3373,18 +3340,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3499,18 +3465,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3571,18 +3536,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3661,18 +3625,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3739,18 +3702,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3871,18 +3833,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -4051,18 +4012,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -4141,18 +4101,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -4249,18 +4208,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -4423,18 +4381,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -4489,18 +4446,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -4627,18 +4583,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -4753,18 +4708,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -4825,18 +4779,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -4921,18 +4874,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -4981,18 +4933,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -5065,18 +5016,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -5131,18 +5081,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -5269,18 +5218,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -5335,18 +5283,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -5389,18 +5336,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -5479,18 +5425,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -5599,18 +5544,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -5701,18 +5645,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -5785,18 +5728,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -5899,18 +5841,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -6013,18 +5954,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -6103,18 +6043,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -6235,18 +6174,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -6331,18 +6269,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -6433,18 +6370,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -6529,18 +6465,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -6613,18 +6548,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -6679,18 +6613,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -6799,18 +6732,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -6943,18 +6875,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -7099,18 +7030,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -7195,18 +7125,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -7261,18 +7190,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -7363,18 +7291,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -7525,18 +7452,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -7669,18 +7595,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -7801,18 +7726,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -7873,18 +7797,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -7951,18 +7874,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -8065,18 +7987,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -8113,18 +8034,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -8173,18 +8093,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -8251,18 +8170,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -8335,18 +8253,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -8419,18 +8336,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -8515,18 +8431,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -8635,18 +8550,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -8713,18 +8627,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -8797,18 +8710,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -8923,18 +8835,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -9001,18 +8912,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -9121,18 +9031,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -9223,18 +9132,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -9325,18 +9233,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -9385,18 +9292,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -9499,18 +9405,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -9583,18 +9488,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -9643,18 +9547,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -9787,18 +9690,17 @@ "version": "1.0.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "host": { "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", diff --git a/beater/test_approved_es_documents/TestPublishIntegrationProfileHeapProfile.approved.json b/beater/test_approved_es_documents/TestPublishIntegrationProfileHeapProfile.approved.json index bd38d1312af..ab43b53b07a 100644 --- a/beater/test_approved_es_documents/TestPublishIntegrationProfileHeapProfile.approved.json +++ b/beater/test_approved_es_documents/TestPublishIntegrationProfileHeapProfile.approved.json @@ -3,15 +3,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -72,15 +74,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -183,15 +187,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -282,15 +288,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -381,15 +389,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -426,15 +436,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -519,15 +531,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -564,15 +578,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -633,15 +649,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -696,15 +714,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -813,15 +833,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -894,15 +916,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1005,15 +1029,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1086,15 +1112,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1155,15 +1183,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1200,15 +1230,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1245,15 +1277,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1320,15 +1354,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1365,15 +1401,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1578,15 +1616,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1629,15 +1669,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1692,15 +1734,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1827,15 +1871,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -1878,15 +1924,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2007,15 +2055,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2046,15 +2096,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2109,15 +2161,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2178,15 +2232,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2331,15 +2387,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2490,15 +2548,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2697,15 +2757,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2814,15 +2876,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -2901,15 +2965,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3036,15 +3102,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3159,15 +3227,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3264,15 +3334,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3459,15 +3531,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3576,15 +3650,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3693,15 +3769,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3774,15 +3852,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3855,15 +3935,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", @@ -3960,15 +4042,17 @@ { "@timestamp": "2019-11-22T10:30:54.440Z", "ecs": { - "version": "1.5.0" + "version": "1.11.0" + }, + "host": { + "ip": "127.0.0.1" }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "processor": { "event": "profile", diff --git a/beater/test_approved_es_documents/TestPublishIntegrationSpans.approved.json b/beater/test_approved_es_documents/TestPublishIntegrationSpans.approved.json index 82a24027f00..1a562aedba7 100644 --- a/beater/test_approved_es_documents/TestPublishIntegrationSpans.approved.json +++ b/beater/test_approved_es_documents/TestPublishIntegrationSpans.approved.json @@ -29,13 +29,16 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "outcome": "success" @@ -64,11 +67,10 @@ }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "parent": { "id": "abcdef0123456789" @@ -126,6 +128,7 @@ "id": "01af25874dec69dd" }, "user": { + "domain": "ldap://abc", "email": "s@test.com", "id": "123", "name": "john" @@ -155,13 +158,16 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "outcome": "unknown" @@ -190,11 +196,10 @@ }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "parent": { "id": "0000000011111111" @@ -253,6 +258,7 @@ "id": "ab45781d265894fe" }, "user": { + "domain": "ldap://abc", "email": "s@test.com", "id": "123", "name": "john" @@ -282,13 +288,16 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "outcome": "unknown" @@ -320,11 +329,10 @@ }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "parent": { "id": "abcdefabcdef7890" @@ -384,6 +392,7 @@ "id": "ab23456a89012345" }, "user": { + "domain": "ldap://abc", "email": "s@test.com", "id": "123", "name": "john" @@ -413,13 +422,16 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "outcome": "unknown" @@ -448,11 +460,10 @@ }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "parent": { "id": "ababcdcdefefabde" @@ -511,6 +522,7 @@ "id": "abcdef0123456789abcdef9876543210" }, "user": { + "domain": "ldap://abc", "email": "s@test.com", "id": "123", "name": "john" @@ -541,7 +553,10 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" @@ -552,7 +567,7 @@ "port": 5432 }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "outcome": "success" @@ -566,6 +581,17 @@ "platform": "darwin" } }, + "http": { + "request": { + "method": "GET" + }, + "response": { + "decoded_body_size": 401, + "encoded_body_size": 356, + "status_code": 200, + "transfer_size": 300.12 + } + }, "kubernetes": { "namespace": "namespace1", "node": { @@ -581,11 +607,10 @@ }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "parent": { "id": "abcdef0123456789" @@ -646,17 +671,15 @@ "us": 3781 }, "http": { - "method": "get", + "method": "GET", "response": { "decoded_body_size": 401, "encoded_body_size": 356, "status_code": 200, "transfer_size": 300.12 - }, - "url": { - "original": "http://localhost:8000" } }, + "http.url.original": "http://localhost:8000", "id": "1234567890aaaade", "name": "SELECT FROM product_types", "stacktrace": [ @@ -721,7 +744,11 @@ "trace": { "id": "abcdef0123456789abcdef9876543210" }, + "url": { + "original": "http://localhost:8000" + }, "user": { + "domain": "ldap://abc", "email": "s@test.com", "id": "123", "name": "john" @@ -751,7 +778,10 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" @@ -761,7 +791,7 @@ "ip": "0:0::0:1" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "outcome": "unknown" @@ -790,11 +820,10 @@ }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "parent": { "id": "abcdef0123456789" @@ -860,6 +889,146 @@ "id": "01af25874dec69dd" }, "user": { + "domain": "ldap://abc", + "email": "s@test.com", + "id": "123", + "name": "john" + } + }, + { + "@timestamp": "2021-07-06T11:58:05.682Z", + "agent": { + "name": "elastic-node", + "version": "3.14.0" + }, + "cloud": { + "account": { + "id": "account_id", + "name": "account_name" + }, + "availability_zone": "cloud_availability_zone", + "instance": { + "id": "instance_id", + "name": "instance_name" + }, + "machine": { + "type": "machine_type" + }, + "project": { + "id": "project_id", + "name": "project_name" + }, + "provider": "cloud_provider", + "region": "cloud_region", + "service": { + "name": "lambda" + } + }, + "container": { + "id": "container-id" + }, + "ecs": { + "version": "1.11.0" + }, + "event": { + "outcome": "success" + }, + "host": { + "architecture": "x64", + "hostname": "node-name", + "ip": "127.0.0.1", + "name": "node-name", + "os": { + "platform": "darwin" + } + }, + "kubernetes": { + "namespace": "namespace1", + "node": { + "name": "node-name" + }, + "pod": { + "name": "pod-name", + "uid": "pod-uid" + } + }, + "labels": { + "tag1": "label1" + }, + "observer": { + "ephemeral_id": "00000000-0000-0000-0000-000000000000", + "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", + "type": "test-apm-server", + "version": "1.2.3", + "version_major": 1 + }, + "parent": { + "id": "abcdef0123456789" + }, + "process": { + "args": [ + "node", + "server.js" + ], + "pid": 1234, + "ppid": 6789, + "title": "node" + }, + "processor": { + "event": "span", + "name": "transaction" + }, + "service": { + "environment": "staging", + "framework": { + "name": "Express", + "version": "1.2.3" + }, + "language": { + "name": "ecmascript", + "version": "8" + }, + "name": "backendspans", + "node": { + "name": "container-id" + }, + "runtime": { + "name": "node", + "version": "8.0.0" + }, + "version": "5.1.3" + }, + "span": { + "action": "query", + "composite": { + "compression_strategy": "exact_match", + "count": 10, + "sum": { + "us": 359298 + } + }, + "duration": { + "us": 378191 + }, + "id": "abcdef01234567", + "name": "SELECT FROM p_details", + "start": { + "us": 2830 + }, + "subtype": "postgresql", + "type": "db" + }, + "timestamp": { + "us": 1625572685682272 + }, + "trace": { + "id": "edcbaf0123456789abcdef9876543210" + }, + "transaction": { + "id": "01af25874dec69dd" + }, + "user": { + "domain": "ldap://abc", "email": "s@test.com", "id": "123", "name": "john" diff --git a/beater/test_approved_es_documents/TestPublishIntegrationTransactions.approved.json b/beater/test_approved_es_documents/TestPublishIntegrationTransactions.approved.json index 565ffdca65b..82123b0d700 100644 --- a/beater/test_approved_es_documents/TestPublishIntegrationTransactions.approved.json +++ b/beater/test_approved_es_documents/TestPublishIntegrationTransactions.approved.json @@ -24,13 +24,16 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "outcome": "unknown" @@ -61,11 +64,10 @@ }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "parent": { "id": "abcdefabcdef01234567" @@ -153,13 +155,16 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "outcome": "success" @@ -175,14 +180,12 @@ }, "http": { "request": { - "body": { - "original": { - "additional": { - "bar": 123, - "req": "additional information" - }, - "str": "hello world" - } + "body.original": { + "additional": { + "bar": 123, + "req": "additional information" + }, + "str": "hello world" }, "cookies": { "c1": "v1", @@ -212,12 +215,8 @@ "Mozilla Chrome Edge" ] }, - "method": "post", - "referrer": "http://localhost:8000/test/e2e/", - "socket": { - "encrypted": true, - "remote_address": "12.53.12.1" - } + "method": "POST", + "referrer": "http://localhost:8000/test/e2e/" }, "response": { "decoded_body_size": 29.9, @@ -254,11 +253,10 @@ }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "process": { "args": [ @@ -319,10 +317,6 @@ }, "id": "4340a8e0df1906ecbfa9", "name": "GET /api/types", - "page": { - "referer": "http://localhost:8000/test/e2e/", - "url": "http://localhost:8000/test/e2e/general-usecase/" - }, "result": "success", "sampled": true, "span_count": { @@ -341,6 +335,7 @@ "scheme": "https" }, "user": { + "domain": "ldap://abc", "id": "99", "name": "foo" }, @@ -373,13 +368,16 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "outcome": "unknown" @@ -395,10 +393,7 @@ }, "http": { "request": { - "method": "post", - "socket": { - "remote_address": "192.0.1" - } + "method": "POST" } }, "kubernetes": { @@ -418,11 +413,10 @@ }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "process": { "args": [ @@ -470,6 +464,11 @@ "experience": { "cls": 1, "fid": 2, + "longtask": { + "count": 3, + "max": 1, + "sum": 2.5 + }, "tbt": 3.4 }, "id": "cdef4340a8e0df19", @@ -520,13 +519,16 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "outcome": "unknown" @@ -557,11 +559,10 @@ }, "observer": { "ephemeral_id": "00000000-0000-0000-0000-000000000000", - "hostname": "", "id": "fbba762a-14dd-412c-b7e9-b79f903eb492", "type": "test-apm-server", - "version": "8.0.0", - "version_major": 8 + "version": "1.2.3", + "version_major": 1 }, "parent": { "id": "abcdefabcdef01234567" @@ -599,6 +600,10 @@ }, "version": "5.1.3" }, + "session": { + "id": "sunday", + "sequence": 123 + }, "timestamp": { "us": 1547070053000000 }, diff --git a/beater/tracing.go b/beater/tracing.go index 30a4327bf36..4817aef0d4f 100644 --- a/beater/tracing.go +++ b/beater/tracing.go @@ -22,70 +22,111 @@ import ( "net" "net/http" - "go.elastic.co/apm" - + "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/apm-server/agentcfg" "github.com/elastic/apm-server/beater/api" + "github.com/elastic/apm-server/beater/auth" "github.com/elastic/apm-server/beater/config" - logs "github.com/elastic/apm-server/log" + "github.com/elastic/apm-server/beater/ratelimit" + "github.com/elastic/apm-server/model" "github.com/elastic/apm-server/publish" ) -func init() { - apm.DefaultTracer.Close() -} - type tracerServer struct { - cfg *config.Config - logger *logp.Logger server *http.Server - listener net.Listener + logger *logp.Logger + requests <-chan tracerServerRequest } -func newTracerServer(cfg *config.Config, listener net.Listener) *tracerServer { - if listener == nil { +func newTracerServer(listener net.Listener, logger *logp.Logger) (*tracerServer, error) { + requests := make(chan tracerServerRequest) + nopReporter := func(ctx context.Context, _ publish.PendingReq) error { return nil } - - cfgCopy := *cfg // Copy cfg so we can disable auth - cfg = &cfgCopy - cfg.SecretToken = "" - cfg.APIKeyConfig = nil - + processBatch := model.ProcessBatchFunc(func(ctx context.Context, batch *model.Batch) error { + result := make(chan error, 1) + request := tracerServerRequest{ctx: ctx, batch: batch, res: result} + select { + case <-ctx.Done(): + return ctx.Err() + case requests <- request: + } + select { + case <-ctx.Done(): + return ctx.Err() + case err := <-result: + return err + } + }) + cfg := config.DefaultConfig() + ratelimitStore, err := ratelimit.NewStore(1, 1, 1) // unused, arbitrary params + if err != nil { + return nil, err + } + authenticator, err := auth.NewAuthenticator(config.AgentAuth{}) + if err != nil { + return nil, err + } + mux, err := api.NewMux( + beat.Info{}, + cfg, + nopReporter, + processBatch, + authenticator, + agentcfg.NewFetcher(cfg), + ratelimitStore, + nil, // no sourcemap store + false, // not managed + func() bool { return true }, // ready for publishing + ) + if err != nil { + return nil, err + } server := &http.Server{ + Handler: mux, IdleTimeout: cfg.IdleTimeout, ReadTimeout: cfg.ReadTimeout, WriteTimeout: cfg.WriteTimeout, MaxHeaderBytes: cfg.MaxHeaderSize, } - + go func() { + if err := server.Serve(listener); err != http.ErrServerClosed { + logger.Error(err.Error()) + } + }() return &tracerServer{ - cfg: cfg, - logger: logp.NewLogger(logs.Beater), server: server, - listener: listener, - } + logger: logger, + requests: requests, + }, nil } -func (s *tracerServer) serve(report publish.Reporter) error { - mux, err := api.NewMux(s.cfg, report) - if err != nil { - return err - } - s.server.Handler = mux - if err := s.server.Serve(s.listener); err != http.ErrServerClosed { - return err - } - return nil +// Close closes the tracerServer's listener. +func (s *tracerServer) Close() error { + return s.server.Shutdown(context.Background()) } -func (s *tracerServer) stop() { - err := s.server.Shutdown(context.Background()) - if err != nil { - s.logger.Error(err.Error()) - if err := s.server.Close(); err != nil { - s.logger.Error(err.Error()) +// serve serves batch processing requests for the tracer server. +// +// This may be called multiple times in series, but not concurrently. +func (s *tracerServer) serve(ctx context.Context, batchProcessor model.BatchProcessor) error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + case req := <-s.requests: + // Disable tracing for requests that come through the + // tracer server, to avoid recursive tracing. + req.ctx = context.WithValue(req.ctx, disablePublisherTracingKey{}, true) + req.res <- batchProcessor.ProcessBatch(req.ctx, req.batch) } } } + +type tracerServerRequest struct { + ctx context.Context + batch *model.Batch + res chan<- error +} diff --git a/beater/tracing_test.go b/beater/tracing_test.go index 51b9e61338c..0476663000e 100644 --- a/beater/tracing_test.go +++ b/beater/tracing_test.go @@ -29,16 +29,15 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/apm-server/beater/api" - "github.com/elastic/apm-server/tests" ) // transactions from testdata/intake-v2/transactions.ndjson used to trigger tracing -var testTransactionIds = tests.NewSet( - "945254c567a5417e", - "4340a8e0df1906ecbfa9", - "cdef4340a8e0df19", - "00xxxxFFaaaa1234", -) +var testTransactionIds = map[string]bool{ + "945254c567a5417e": true, + "4340a8e0df1906ecbfa9": true, + "cdef4340a8e0df19": true, + "00xxxxFFaaaa1234": true, +} func TestServerTracingEnabled(t *testing.T) { events, teardown := setupTestServerInstrumentation(t, true) @@ -49,7 +48,7 @@ func TestServerTracingEnabled(t *testing.T) { for len(selfTransactions) < 2 { select { case e := <-txEvents: - if testTransactionIds.Contains(eventTransactionId(e)) { + if testTransactionIds[eventTransactionId(e)] { continue } @@ -85,7 +84,7 @@ func TestServerTracingDisabled(t *testing.T) { for { select { case e := <-txEvents: - assert.True(t, testTransactionIds.Contains(eventTransactionId(e))) + assert.Contains(t, testTransactionIds, eventTransactionId(e)) case <-time.After(time.Second): return } diff --git a/beater/waitready.go b/beater/waitready.go new file mode 100644 index 00000000000..399ebe884b4 --- /dev/null +++ b/beater/waitready.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package beater + +import ( + "context" + "net/http" + "time" + + "go.elastic.co/apm" + + "github.com/elastic/beats/v7/libbeat/logp" +) + +// waitReady waits for preconditions to be satisfied, by calling check +// in a loop every interval until ctx is cancelled or check returns nil. +func waitReady( + ctx context.Context, + interval time.Duration, + tracer *apm.Tracer, + logger *logp.Logger, + check func(context.Context) error, +) error { + logger.Info("waiting for preconditions") + tx := tracer.StartTransaction("wait_for_preconditions", "init") + ctx = apm.ContextWithTransaction(ctx, tx) + var ticker *time.Ticker + for { + if ticker == nil { + // We start the ticker on the first iteration, rather than + // before the loop, so we don't have to wait for a tick + // (5 seconds by default) before peforming the first check. + ticker = time.NewTicker(interval) + defer ticker.Stop() + } else { + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + } + } + if err := check(ctx); err != nil { + logger.Errorf("error checking preconditions: %s", err) + continue + } + return nil + } +} + +// waitReadyRoundTripper wraps a *net/http.Transport, ensuring the server's +// indexing preconditions have been satisfied by waiting for "ready" channel +// to be signalled, prior to allowing any requests through. +// +// This is used to prevent elasticsearch clients from proceeding with requests +// until the APM integration is installed to ensure we don't index any documents +// prior to the data stream index templates being ready. +type waitReadyRoundTripper struct { + *http.Transport + ready <-chan struct{} +} + +func (c *waitReadyRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { + select { + case <-c.ready: + case <-r.Context().Done(): + return nil, r.Context().Err() + } + return c.Transport.RoundTrip(r) +} diff --git a/changelogs/6.8.asciidoc b/changelogs/6.8.asciidoc index a43c01ec0e3..6d2b3b76aad 100644 --- a/changelogs/6.8.asciidoc +++ b/changelogs/6.8.asciidoc @@ -3,6 +3,13 @@ https://github.com/elastic/apm-server/compare/6.7\...6.8[View commits] +* <> +* <> +* <> +* <> +* <> +* <> +* <> * <> * <> * <> @@ -16,6 +23,62 @@ https://github.com/elastic/apm-server/compare/6.7\...6.8[View commits] * <> * <> +[float] +[[release-notes-6.8.18]] +=== APM Server version 6.8.18 + +https://github.com/elastic/apm-server/compare/v6.8.17\...v6.8.18[View commits] + +No significant changes. + +[float] +[[release-notes-6.8.17]] +=== APM Server version 6.8.17 + +https://github.com/elastic/apm-server/compare/v6.8.16\...v6.8.17[View commits] + +No significant changes. + +[float] +[[release-notes-6.8.16]] +=== APM Server version 6.8.16 + +https://github.com/elastic/apm-server/compare/v6.8.15\...v6.8.16[View commits] + +No significant changes. + +[float] +[[release-notes-6.8.15]] +=== APM Server version 6.8.15 + +https://github.com/elastic/apm-server/compare/v6.8.14\...v6.8.15[View commits] + +No significant changes. + +[float] +[[release-notes-6.8.14]] +=== APM Server version 6.8.14 + +https://github.com/elastic/apm-server/compare/v6.8.13\...v6.8.14[View commits] + +No significant changes. + +[float] +[[release-notes-6.8.13]] +=== APM Server version 6.8.13 + +https://github.com/elastic/apm-server/compare/v6.8.12\...v6.8.13[View commits] + +No significant changes. + +[float] +[[release-notes-6.8.12]] +=== APM Server version 6.8.12 + +https://github.com/elastic/apm-server/compare/v6.8.11\...v6.8.12[View commits] + +No significant changes. + [float] [[release-notes-6.8.11]] === APM Server version 6.8.11 diff --git a/changelogs/7.0.asciidoc b/changelogs/7.0.asciidoc index ea643c16813..b55dfaf9ab6 100644 --- a/changelogs/7.0.asciidoc +++ b/changelogs/7.0.asciidoc @@ -25,7 +25,7 @@ https://github.com/elastic/apm-server/compare/v7.0.0\...v7.0.1[View commits] [[release-notes-7.0.0]] === APM Server version 7.0.0 -https://github.com/elastic/apm-server/compare/v6.8.5\...v7.0.0[View commits] +https://github.com/elastic/apm-server/compare/v6.8.18\...v7.0.0[View commits] These release notes include all changes made in the alpha, beta, and RC releases of 7.0.0. diff --git a/changelogs/7.10.asciidoc b/changelogs/7.10.asciidoc new file mode 100644 index 00000000000..29f94f9f184 --- /dev/null +++ b/changelogs/7.10.asciidoc @@ -0,0 +1,77 @@ +[[release-notes-7.10]] +== APM Server version 7.10 + +https://github.com/elastic/apm-server/compare/7.9\...7.10[View commits] + +* <> +* <> +* <> + +[float] +[[release-notes-7.10.2]] +=== APM Server version 7.10.2 + +https://github.com/elastic/apm-server/compare/v7.10.1\...v7.10.2[View commits] + +No significant changes. + +[float] +[[release-notes-7.10.1]] +=== APM Server version 7.10.1 + +https://github.com/elastic/apm-server/compare/v7.10.0\...v7.10.1[View commits] + +[float] +==== Added +* Upgrade Go to 1.14.12 {pull}4478[4478] + +[float] +==== Bug fixes +* Add maxLen=1024 requirement to `metadata.system.container.id` {pull}4429[4429] + +[float] +[[release-notes-7.10.0]] +=== APM Server version 7.10.0 + +https://github.com/elastic/apm-server/compare/v7.9.2\...v7.10.0[View commits] + +[float] +==== Breaking Changes + +[float] +==== Bug fixes + +* Transaction metrics aggregation now flushes on shutdown, respecting apm-server.shutdown_timeout {pull}3971[3971] +* De-dot Jaeger process tag keys, fixing indexing errors when using jaeger-php {pull}4191[4191] +* Fix json schema validation on `metadata.service.*` fields {pull}4142[4142] +* Fix regression where policy_name was ignored in ILM setup {pull}4354[4354] + +[float] +==== Intake API Changes +* Changed error messages for invalid events due to internal changes of decoder logic {pull}4261[4261] + +[float] +==== Added + +* Use peer.address for destinationService.Resource if peer.address is not given on Jaeger span {pull}3975[3975] +* Add event.duration to API request logs {pull}4030[4030] +* Set destination.service.* from http.url for Jaeger spans {pull}4046[4046] +* Use service.version for Metadata.Service.Version when converting a Jaeger span {pull}4061[4061] +* Report basic telemetry {pull}4055[4055] +* Add transaction.experience fields {pull}4056[4056] +* Upgrade Go to 1.14.7 {pull}4067[4067] +* Aggregate service destination span metrics {pull}4077[4077] +* Added apm-server.kibana.headers configuration {pull}4087[4087] +* Add a new Docker image based on UBI minimal 8 to packaging. {pull}4105[4105] +* Add event.outcome to transactions and spans {pull}4064[4064] +* Add event.outcome to aggregated transaction metrics {pull}4110[4110] +* Set event.outcome for Jaeger spans based on http.status_code {pull}4127[4127] +* Set event.outcome for transactions and spans based on http.status_code {pull}4165[4165] +* Add mapping for `system.process.cgroup.*` metrics {pull}4176[4176] +* Use transaction.sample_rate to calculate transaction metrics {pull}4212[4212] +* Add longtask metric fields to transaction.experience {pull}4230[4230] + +[float] +==== Comments + +A big thank you to https://github.com/tobiasstadler[@tobiasstadler] for their contributions to this release! diff --git a/changelogs/7.11.asciidoc b/changelogs/7.11.asciidoc new file mode 100644 index 00000000000..6192eb4c612 --- /dev/null +++ b/changelogs/7.11.asciidoc @@ -0,0 +1,55 @@ +[[release-notes-7.11]] +== APM Server version 7.11 + +https://github.com/elastic/apm-server/compare/7.10\...7.11[View commits] + +* <> +* <> +* <> + +[float] +[[release-notes-7.11.2]] +=== APM Server version 7.11.2 + +https://github.com/elastic/apm-server/compare/v7.11.1\...v7.11.2[View commits] + +No significant changes. + +[float] +[[release-notes-7.11.1]] +=== APM Server version 7.11.1 + +https://github.com/elastic/apm-server/compare/v7.11.0\...v7.11.1[View commits] + +No significant changes. + +[float] +[[release-notes-7.11.0]] +=== APM Server version 7.11.0 + +https://github.com/elastic/apm-server/compare/v7.10.2\...v7.11.0[View commits] + +[float] +==== Bug fixes +* JSON schema metricset: nest type and subtype under span {pull}4329[4329] +* JSON schema metricset: nest type and name under transaction {pull}4329[4329] +* Carriage returns are now stripped from source-mapped context source lines {pull}4348[4348] +* Remove config defaults from `apm-server export config` {pull}4458[4458] + +[float] +==== Intake API Changes +* Intake RUM v3: Changed error messages for invalid events due to internal changes of decoder logic {pull}4358[4358] +* Auto-generate JSON schemas for Intake API v2 and v3/rum {pull}4393[4393] + +[float] +==== Added +* Monitoring for aggregation of transaction metrics {pull}4287[4287] +* Log warnings in aggregation of transaction metrics when grouping limit is reached {pull}4313[4313] +* Configurable tail-based sampling policies {pull}4320[4320] +* Monitoring and telemetry for tail-based sampling {pull}4346[4346] {pull}4360[4360] +* Experimental support for data streams {pull}4409[4409] +* Label/custom/mark keys are now sanitized (rather than validated and rejected) by the server {pull}4465[4465] +* Upgrade Go to 1.14.12 {pull}4478[4478] +* Added apm-server.response_headers config {pull}4523[4523] +* Switch logging format to be ECS compliant where possible {pull}3829[3829] +* Switch logging format to JSON to work with ECS logging {pull}4590[4590] diff --git a/changelogs/7.12.asciidoc b/changelogs/7.12.asciidoc new file mode 100644 index 00000000000..7753415dd6d --- /dev/null +++ b/changelogs/7.12.asciidoc @@ -0,0 +1,55 @@ +[[release-notes-7.12]] +== APM Server version 7.12 + +https://github.com/elastic/apm-server/compare/7.11\...7.12[View commits] + +* <> +* <> + +[float] +[[release-notes-7.12.1]] +=== APM Server version 7.12.1 + +https://github.com/elastic/apm-server/compare/v7.12.0\...v7.12.1[View commits] + +[float] +==== Added +* Upgrade Go to 1.15.9 {pull}5100[5100] + +[float] +[[release-notes-7.12.0]] +== APM Server version 7.12.0 + +https://github.com/elastic/apm-server/compare/v7.11.2\...v7.12.0[View commits] + +[float] +==== Breaking Changes +* Leading 0s are no longer removed from trace/span ids if they are created by Jaeger {pull}4671[4671] +* Jaeger spans will now have a type of "app" where they previously were "custom" {pull}4711[4711] +* Jaeger spans may now have a (more accurate) outcome of "unknown" where they previously were "success" {pull}4711[4711] + +[float] +==== Bug fixes +* Dynamic templates for labels are no longer repeated {pull}4695[4695] +* Tail-based sampling policies are now always matched in the order given {pull}4685[4685] + +[float] +==== Intake API Changes +* Add `cloud.service.name` support for metadata {pull}4626[4626] + +[float] +==== Added +* Jaeger gRPC is now served over the same port as the Elastic APM agent protocol {pull}4618[4618] +* Support for reloading config in Fleet mode, gracefully stopping the HTTP server and starting a new one {pull}4623[4623] +* Add a `_doc_count` field to transaction histogram docs {pull}4647[4647] +* OpenTelemetry Protocol (OTLP) over gRPC is now supported on the standard endpoint (8200) {pull}4677[4677] {pull}4722[4722] +* Add initial support for APM central config and sourcemaps when running under Fleet {pull}4670[4670] +* Data stream and ILM policy for tail-based sampling {pull}4707[4707] +* Add service name to index dataset {pull}4674[4674] +* When tail-sampling is enabled, a default policy must be defined {pull}4729[4729] +* Support additional config options when running under Fleet {pull}4690[4690] +* Upgrade Go to 1.15.8 {pull}4733[4733] + +[float] +==== Deprecated +* `apm-server.jaeger` config is deprecated and will be removed in 8.0. Jaeger is now served on 8200 {pull}4618[4618] diff --git a/changelogs/7.13.asciidoc b/changelogs/7.13.asciidoc new file mode 100644 index 00000000000..c0a49b15bea --- /dev/null +++ b/changelogs/7.13.asciidoc @@ -0,0 +1,87 @@ +[[release-notes-7.13]] +== APM Server version 7.13 + +https://github.com/elastic/apm-server/compare/7.12\...7.13[View commits] + +* <> +* <> +* <> +* <> +* <> + +[float] +[[release-notes-7.13.4]] +=== APM Server version 7.13.4 + +https://github.com/elastic/apm-server/compare/v7.13.3\...v7.13.4[View commits] + +No significant changes. + +[float] +[[release-notes-7.13.3]] +=== APM Server version 7.13.3 + +https://github.com/elastic/apm-server/compare/v7.13.2\...v7.13.3[View commits] + +No significant changes. + +[float] +[[release-notes-7.13.2]] +=== APM Server version 7.13.2 + +https://github.com/elastic/apm-server/compare/v7.13.1\...v7.13.2[View commits] + +No significant changes. + +[float] +[[release-notes-7.13.1]] +=== APM Server version 7.13.1 + +https://github.com/elastic/apm-server/compare/v7.13.0\...v7.13.1[View commits] + +[float] +==== Bug fixes + +* Fix document grouping of translated OpenTelemetry Java metrics {pull}5309[5309] +* model/modeldecoder: fix 32-bit timestamp decoding {pull}5308[5308] +* OpenTelemetry: record array attributes as labels {pull}5286[5286] +* Don't auto-disable ILM due to a failure to communicate with Elasticsearch {pull}5264[5264] + +[float] +[[release-notes-7.13.0]] +=== APM Server version 7.13.0 + +https://github.com/elastic/apm-server/compare/v7.12.1\...v7.13.0[View commits] + +[float] +==== Bug fixes +* Fix `setup.template` config merging {pull}4950[4950] +* The server now responds with 503 instead of 401 when failure is unrelated to API Key validity, e.g. if Elasticsearch is inaccessible {pull}5053[5053] +* Fix panic due to misaligned 64-bit access on 32-bit architectures {pull}5277[5277] + +[float] +==== Added +* Add support for Node.js wall time profiles {pull}4728[4728] +* Add `metricset.name` field to metric docs {pull}4857[4857] +* Add `apm-server.default_service_environment` config {pull}4861[4861] +* Transaction histogram metrics are now recorded by default {pull}4882[4882] +* Add `error.grouping_name` field to speed up error grouping aggregations {pull}4886[4886] +* Add support for OpenTelemetry exception span events {pull}4876[4876] +* Set `metricset.name` for breakdown metrics {pull}4910[4910] +* Set log and http responses for server timeout {pull}4918[4918] +* Define ES fields for `cgroup.cpu` and `cgroup.cpuacct` metrics {pull}4956[4956] +* Log gRPC tracing requests {pull}4934[4934] +* Improved coverage of translation of OpenTelemetry resource conventions {pull}4955[4955] +* Set `client.ip` for events from the Elastic APM iOS agent {pull}4975[4975] +* Calculate service destination metrics for OpenTelemetry spans {pull}4976[4976] +* Add exponential retries to api key and tail sampling requests{pull}4991[4991] +* Add `apm-server.rum.allow_service_names` config {pull}5030[5030] +* Ingest pipeline for translating OpenTelemetry Java metrics to Elastic APM fields {pull}4986[4986] +* Set `event.ingested` first in the ingest pipeline {pull}5048[5048] +* The server now responds with a reason for some 401 Unauthorized requests {pull}5053[5053] +* Add `session.id` and `session.sequence` fields for RUM session tracking {pull}5056[5056] +* Support for ingesting `user.domain` {pull}5067[5067] +* Add `"application": "apm"` metadata to API Keys created with `apm-server apikey create` {pull}5090[5090] +* API Key auth is no longer considered experimental {pull}5091[5091] +* Set gRPC status code to `DEADLINE_EXCEEDED` on request timeout {pull}5089[5089] +* Add support for OpenTelemetry RPC semantic conventions {pull}5074[5074] diff --git a/changelogs/7.14.asciidoc b/changelogs/7.14.asciidoc new file mode 100644 index 00000000000..f629591fb20 --- /dev/null +++ b/changelogs/7.14.asciidoc @@ -0,0 +1,57 @@ +[[release-notes-7.14]] +== APM Server version 7.14 + +https://github.com/elastic/apm-server/compare/7.13\...7.14[View commits] + +* <> +* <> + +[float] +[[release-notes-7.14.1]] +=== APM Server version 7.14.1 + +https://github.com/elastic/apm-server/compare/v7.14.0\...v7.14.1[View commits] + +No significant changes. + +[float] +[[release-notes-7.14.0]] +=== APM Server version 7.14.0 + +https://github.com/elastic/apm-server/compare/v7.13.4\...v7.14.0[View commits] + +[float] +==== Breaking Changes +* Removed monitoring counters `apm-server.processor.stream.errors.{queue,server,closed}` {pull}5453[5453] + +[float] +==== Bug fixes +* Fix panic on Fleet policy change when transaction metrics or tail-based sampling are enabled {pull}5670[5670] +* Remove multipart form temporary files left behind by source map uploads {pull}5718[5718] +* Removed service name from dataset {pull}5451[5451] +* Fixed tail-based sampling pubsub to use _seq_no correctly {pull}5126[5126] + +[float] +==== Added +* Support fetching sourcemaps from fleet {pull}5410[5410] +* Add support for more input variables in fleet integration {pull}5444[5444] +* Add debug logging of OpenTelemetry payloads {pull}5474[5474] +* Add support for OpenTelemetry labels describing mobile connectivity {pull}5436[5436] +* Introduce `apm-server.auth.*` config {pull}5457[5457] +* Add support for adjusting OTel event timestamps using `telemetry.sdk.elastic_export_timestamp` {pull}5433[5433] +* Add units to metric fields {pull}5395[5395] +* Add support for histograms to metrics intake {pull}5360[5360] +* Display apm-server url in fleet ui's apm-server integration {pull}4895[4895] +* Translate otel messaging.* semantic conventions to ECS {pull}5334[5334] +* Tail-sampling processor now resumes subscription from previous position after restart {pull}5350[5350] +* Add support for dynamic histogram metrics {pull}5239[5239] +* Support setting agent configuration from apm-server.yml {pull}5177[5177] +* Add metric_type and unit to field metadata of system metrics {pull}5230[5230] +* Under fleet, report which agent configs have been applied {pull}5481[5481] +* Server sends its raw config to kibana when running on ECE/ESS {pull}5424[5424] + +[float] +==== Deprecated +* Make `destination.service.name` and `destination.service.type` fields optional and deprecated {pull}5468[5468] +* `apm-server.secret_token` is now `apm-server.auth.secret_token` {pull}5457[5457] +* `apm-server.api_key` is now `apm-server.auth.api_key` {pull}5457[5457] diff --git a/changelogs/7.9.asciidoc b/changelogs/7.9.asciidoc index 5d116ff354c..d93a2c3284f 100644 --- a/changelogs/7.9.asciidoc +++ b/changelogs/7.9.asciidoc @@ -3,9 +3,40 @@ https://github.com/elastic/apm-server/compare/7.8\...7.9[View commits] +[IMPORTANT] +==== +*Known Issue:* APM Server introduced support for cloud metadata in v7.9 ({pull}3729[3729]). +Unfortunately, the JSON Schema was too strict, and does not account for `null` values. +As a result, sending `null` values for cloud metadata causes the payload to be rejected. +This issue was resolved in v7.10.0 ({pull}4142[4142]). +To avoid problems, we recommend updating to version ≥7.10.0. +==== + +* <> +* <> * <> * <> +[float] +[[release-notes-7.9.3]] +=== APM Server version 7.9.3 + +https://github.com/elastic/apm-server/compare/v7.9.2\...v7.9.3[View commits] + +[float] +==== Bug fixes +* Ensure custom index names are lowercased {pull}4295[4295], {pull}4322[4322] + +[float] +[[release-notes-7.9.2]] +=== APM Server version 7.9.2 + +https://github.com/elastic/apm-server/compare/v7.9.1\...v7.9.2[View commits] + +[float] +==== Bug fixes +* De-dot metadata labels set from process and resource tags to prevent indexing errors {pull}4193[4193] + [float] [[release-notes-7.9.1]] === APM Server version 7.9.1 diff --git a/changelogs/8.0.asciidoc b/changelogs/8.0.asciidoc new file mode 100644 index 00000000000..5a5d1b7d560 --- /dev/null +++ b/changelogs/8.0.asciidoc @@ -0,0 +1,34 @@ +[[release-notes-8.0]] +== APM Server version 8.0 + +https://github.com/elastic/apm-server/compare/7.13\...master[View commits] + +* <> + +[float] +[[release-notes-8.0.0-alpha1]] +=== APM Server version 8.0.0-alpha1 + +[float] +==== Breaking Changes +* APM Server now responds with 403 (HTTP) and PermissionDenied (gRPC) for authenticated but unauthorized requests {pull}5545[5545] +* `sourcemap.error` and `sourcemap.updated` are no longer set due to failing to find a matching source map {pull}5631[5631] +* experimental:["This breaking change applies to the experimental <>."] Removed `service.name` from dataset {pull}5451[5451] + +// [float] +// ==== Bug fixes + +[float] +==== Intake API Changes +* Add support for composite spans in the intake API {pull}5661[5661] + +[float] +==== Added +* Upgrade Go to 1.16.5 {pull}5454[5454] +* Add HTTP span fields as top level ECS fields {pull}5396[5396] +* Introduce `apm-server.auth.anonymous.*` config {pull}5623[5623] +* Upgrade Go to 1.16.6 {pull}5754[5754] +* Introduce ingest pipeline `apm_data_stream_migration` for migrating pre-data stream indices {5768}[5768] + +// [float] +// ==== Deprecated diff --git a/changelogs/head.asciidoc b/changelogs/head.asciidoc index e5886f1a4fa..ebe9792ed5d 100644 --- a/changelogs/head.asciidoc +++ b/changelogs/head.asciidoc @@ -1,36 +1,50 @@ [[release-notes-head]] == APM Server version HEAD -https://github.com/elastic/apm-server/compare/7.9\...master[View commits] +https://github.com/elastic/apm-server/compare/7.13\...master[View commits] [float] ==== Breaking Changes +- `network.connection_type` is now `network.connection.type` {pull}5671[5671] +- `transaction.page` and `error.page` no longer recorded {pull}5872[5872] +- experimental:["This breaking change applies to the experimental tail-based sampling feature."] `apm-server.sampling.tail` now requires `apm-server.data_streams.enabled` {pull}5952[5952] +- beta:["This breaking change applies to the beta <>."] The `traces-sampled-*` data stream is now `traces-apm.sampled-*` {pull}5952[5952] +- Removed unused stacktrace/frame monitoring counters {pull}5984[5984] +- Removed unused support for top-level metricsets and metricset tags for RUMv3 {pull}6065[6065] +- Removed `apm-server.mode` configuration, and "experimental" fields {pull}6086[6086] +- `transaction.sampled` is now only set for sampled transactions {pull}6066[6066] +- Unknown metrics are dropped when `transaction.*` or `span.*` are present in a metricset {pull}6111[6111] +- Removed `metricset.period` from service_destination metrics {pull}6111[6111] +- Removed `http.request.socket` fields {pull}6152[6152] [float] ==== Bug fixes - -* Transaction metrics aggregation now flushes on shutdown, respecting apm-server.shutdown_timeout {pull}3971[3971] -* De-dot Jaeger process tag keys, fixing indexing errors when using jaeger-php {pull}4191[4191] -* Fix json schema validation on `metadata.service.*` fields {pull}[4142]4142 +- Fix apm_error_grouping_name and apm_convert_destination_address {pull}5876[5876] +- corrected OTel attribute names for `net.host.connection.*` {pull}5671[5671] +- Fix response to agent config when running under Fleet with no agent config defined {pull}5917[5917] +- Fix handling of OTLP sum/gauge metrics with integer values {pull}6106[6106] [float] ==== Intake API Changes +- `network.connection.type` was added to stream metadata {pull}5671[5671] [float] ==== Added +- `service_destination` span metrics now take into account composite spans {pull}5896[5896] +- add zero-downtime config reloads via `SO_REUSEPORT` {pull}5911[5911] +- experimental support for writing data streams in standalone mode {pull}5928[5928] +- Data streams now define a default `dynamic` mapping parameter, overridable in the `@custom` template {pull}5947[5947] +- The `error.log.message` or `error.exception.message` field of errors will be copied to the ECS field `message` {pull}5974[5974] +- Define index sorting for internal metrics data stream {pull}6116[6116] +- Add histogram dynamic_template to app metrics data stream {pull}6043[6043] +- Index OpenTelemetry span events and Jaeger logs into a log data stream {pull}6122[6122] +- With `apm-server.data_streams.enabled` in standalone mode, the server now accepts and enqueues events while waiting for the integration to be installed {pull}6130[6130] +- HTTP server errors (e.g. TLS handshake errors) are now logged {pull}6141[6141] +- Span documents now duplicate extended HTTP fields, which were previously only under `span.http.*`, under `http.*` {pull}6147[6147] +- We now record the direct network peer for incoming requests as `source.ip` and `source.port`; origin IP is recorded in `client.ip` {pull}6152[6152] -* Use peer.address for destinationService.Resource if peer.address is not given on Jaeger span {pull}3975[3975] -* Add event.duration to API request logs {pull}4030[4030] -* Set destination.service.* from http.url for Jaeger spans {pull}4046[4046] -* Use service.version for Metadata.Service.Version when converting a Jaeger span {pull}4061[4061] -* Report basic telemetry {pull}4055[4055] -* Add transaction.experience fields {pull}4056[4056] -* Upgrade Go to 1.14.7 {pull}4067[4067] -* Aggregate service destination span metrics {pull}4077[4077] -* Added apm-server.kibana.headers configuration {pull}4087[4087] -* Add a new Docker image based on UBI minimal 8 to packaging. {pull}4105[4105] -* Add event.outcome to transactions and spans {pull}4064[4064] -* Add event.outcome to aggregated transaction metrics {pull}4110[4110] -* Set event.outcome for Jaeger spans based on http.status_code {pull}4127[4127] -* Set event.outcome for transactions and spans based on http.status_code {pull}4165[4165] -* Add mapping for `system.process.cgroup.*` metrics {pull}[4176]4176 +[float] +==== Deprecated +- Setting `service.version` as a span tag (Jaeger) or attribute (OTel) is deprecated; use tracer tags (Jaeger) and resource attributes (OTel) {pull}6131[6131] +- Setting up Elasticsearch templates, ILM policies, and pipelines directly with apm-server is now deprecated. Users should use the integration package {pull}6145[6145] +- `span.http.*` fields are deprecated, replaced by `http.*`, and will be removed in 8.0 {pull}6147[6147] diff --git a/cmd/apikey.go b/cmd/apikey.go index 49aab1e346c..15b8babd63f 100644 --- a/cmd/apikey.go +++ b/cmd/apikey.go @@ -39,7 +39,7 @@ import ( "github.com/elastic/apm-server/beater/config" "github.com/elastic/apm-server/beater/headers" - auth "github.com/elastic/apm-server/beater/authorization" + "github.com/elastic/apm-server/beater/auth" es "github.com/elastic/apm-server/elasticsearch" ) @@ -49,9 +49,9 @@ func genApikeyCmd(settings instance.Settings) *cobra.Command { apikeyCmd := cobra.Command{ Use: "apikey", Short: short, - Long: short + `. -Most operations require the "manage_api_key" cluster privilege. Ensure to configure "apm-server.api_key.*" or -"output.elasticsearch.*" appropriately. APM Server will create security privileges for the "apm" application; + Long: short + `. +Most operations require the "manage_api_key" cluster privilege. Ensure to configure "apm-server.api_key.*" or +"output.elasticsearch.*" appropriately. APM Server will create security privileges for the "apm" application; you can freely query them. If you modify or delete apm privileges, APM Server might reject all requests. Check the Elastic Security API documentation for details.`, } @@ -78,7 +78,7 @@ If no privilege(s) are specified, the API Key will be valid for all.`, privileges := booleansToPrivileges(ingest, sourcemap, agentConfig) if len(privileges) == 0 { // No privileges specified, grant all. - privileges = auth.ActionsAll() + privileges = auth.AllPrivilegeActions() } return createAPIKey(client, keyName, expiration, privileges, json) }), @@ -116,7 +116,7 @@ If neither of them are, an error will be returned.`, // TODO(axw) this should trigger usage return errors.New(`either "id" or "name" are required`) } - return invalidateAPIKey(client, &id, &name, json) + return invalidateAPIKey(client, id, name, json) }), } invalidate.Flags().StringVar(&id, "id", "", "id of the API Key to delete") @@ -170,8 +170,7 @@ If no privilege(s) are specified, the credentials will be queried for all.` Run: makeAPIKeyRun(settings, &json, func(client es.Client, config *config.Config, args []string) error { privileges := booleansToPrivileges(ingest, sourcemap, agentConfig) if len(privileges) == 0 { - // can't use "*" for querying - privileges = auth.ActionsAll() + privileges = auth.AllPrivilegeActions() } return verifyAPIKey(config, privileges, credentials, json) }), @@ -245,7 +244,7 @@ func bootstrap(settings instance.Settings) (es.Client, *config.Config, error) { return nil, nil, err } - client, err := es.NewClient(beaterConfig.APIKeyConfig.ESConfig) + client, err := es.NewClient(beaterConfig.AgentAuth.APIKey.ESConfig) if err != nil { return nil, nil, err } @@ -315,11 +314,12 @@ PUT /_security/role/my_role { { Name: auth.Application, Privileges: privileges, - Resources: []es.Resource{auth.ResourceAny}, + Resources: []es.Resource{"*"}, }, }, }, }, + Metadata: map[string]interface{}{"application": "apm"}, } if expiry != "" { apikeyRequest.Expiration = &expiry @@ -393,17 +393,12 @@ func getAPIKey(client es.Client, id, name *string, validOnly, asJSON bool) error return nil } -func invalidateAPIKey(client es.Client, id, name *string, asJSON bool) error { - if isSet(id) { - name = nil - } else if isSet(name) { - id = nil - } - invalidateKeysRequest := es.InvalidateAPIKeyRequest{ - APIKeyQuery: es.APIKeyQuery{ - ID: id, - Name: name, - }, +func invalidateAPIKey(client es.Client, id string, name string, asJSON bool) error { + invalidateKeysRequest := es.InvalidateAPIKeyRequest{} + if id != "" { + invalidateKeysRequest.IDs = []string{id} + } else if name != "" { + invalidateKeysRequest.Name = &name } invalidation, err := es.InvalidateAPIKey(context.Background(), client, invalidateKeysRequest) if err != nil { @@ -418,19 +413,34 @@ func invalidateAPIKey(client es.Client, id, name *string, asJSON bool) error { } func verifyAPIKey(config *config.Config, privileges []es.PrivilegeAction, credentials string, asJSON bool) error { + authenticator, err := auth.NewAuthenticator(config.AgentAuth) + if err != nil { + return err + } + _, authz, err := authenticator.Authenticate(context.Background(), headers.APIKey, credentials) + if err != nil { + return err + } perms := make(es.Permissions) printText, printJSON := printers(asJSON) for _, privilege := range privileges { - builder, err := auth.NewBuilder(config) - if err != nil { - return err + var action auth.Action + switch privilege { + case auth.PrivilegeAgentConfigRead.Action: + action = auth.ActionAgentConfig + case auth.PrivilegeEventWrite.Action: + action = auth.ActionEventIngest + case auth.PrivilegeSourcemapWrite.Action: + action = auth.ActionSourcemapUpload } - authorized, err := builder. - ForPrivilege(privilege). - AuthorizationFor(headers.APIKey, credentials). - AuthorizedFor(context.Background(), auth.ResourceInternal) - if err != nil { - return err + + authorized := true + if err := authz.Authorize(context.Background(), action, auth.Resource{}); err != nil { + if errors.Is(err, auth.ErrUnauthorized) { + authorized = false + } else { + return err + } } perms[privilege] = authorized printText("Authorized for %s...: %s", humanPrivilege(privilege), humanBool(authorized)) @@ -447,12 +457,7 @@ func humanBool(b bool) string { } func humanPrivilege(privilege es.PrivilegeAction) string { - switch privilege { - case auth.ActionAny: - return fmt.Sprintf("all privileges (\"%v\")", privilege) - default: - return fmt.Sprintf("privilege \"%v\"", privilege) - } + return fmt.Sprintf("privilege \"%v\"", privilege) } func humanTime(millis *int64) string { diff --git a/cmd/pprofessor/fetcher.go b/cmd/pprofessor/fetcher.go index 44713a80cab..d2e618bbd9b 100644 --- a/cmd/pprofessor/fetcher.go +++ b/cmd/pprofessor/fetcher.go @@ -166,6 +166,11 @@ func (f *fetcher) fetchProfile( "field": "profile.cpu.ns", }, }, + "wall_us": map[string]interface{}{ + "sum": map[string]interface{}{ + "field": "profile.wall.us", + }, + }, "alloc_objects": map[string]interface{}{ "sum": map[string]interface{}{ "field": "profile.alloc_objects.count", @@ -257,6 +262,7 @@ func (f *fetcher) fetchProfile( if i == 0 { node.samplesCount += int64(stack.SamplesCount.Value) node.cpuNanos += int64(stack.CPUNanos.Value) + node.wallMicros += int64(stack.WallMicros.Value) node.allocObjects += int64(stack.AllocObjects.Value) node.allocSpaceBytes += int64(stack.AllocSpace.Value) node.inuseObjects += int64(stack.InuseObjects.Value) @@ -285,6 +291,7 @@ func (f *fetcher) fetchProfile( SampleType: []*profile.ValueType{ {Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}, + {Type: "wall", Unit: "microseconds"}, {Type: "alloc_objects", Unit: "count"}, {Type: "alloc_space", Unit: "bytes"}, {Type: "inuse_objects", Unit: "count"}, @@ -340,6 +347,7 @@ func (f *fetcher) fetchProfile( Value: []int64{ node.samplesCount, node.cpuNanos, + node.wallMicros, node.allocObjects, node.allocSpaceBytes, node.inuseObjects, @@ -377,6 +385,7 @@ type aggregationsResult struct { } DocCount int `json:"doc_count"` CPUNanos numericAggValue `json:"cpu_ns"` + WallMicros numericAggValue `json:"wall_us"` AllocObjects numericAggValue `json:"alloc_objects"` AllocSpace numericAggValue `json:"alloc_space"` InuseObjects numericAggValue `json:"inuse_objects"` @@ -416,6 +425,7 @@ type profileNode struct { samplesCount int64 cpuNanos int64 + wallMicros int64 allocObjects int64 allocSpaceBytes int64 inuseObjects int64 diff --git a/cmd/root.go b/cmd/root.go index 037d0920331..789a8d2bf25 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -19,6 +19,9 @@ package cmd import ( "fmt" + "math" + "os" + "strconv" "github.com/spf13/pflag" @@ -37,59 +40,56 @@ import ( const ( beatName = "apm-server" apmIndexPattern = "apm" + cloudEnv = "CLOUD_APM_CAPACITY" ) -var libbeatConfigOverrides = []cfgfile.ConditionalOverride{{ - Check: func(_ *common.Config) bool { - return true - }, - Config: common.MustNewConfigFrom(map[string]interface{}{ - "logging": map[string]interface{}{ - "metrics": map[string]interface{}{ - "enabled": false, +var libbeatConfigOverrides = func() []cfgfile.ConditionalOverride { + return []cfgfile.ConditionalOverride{ + { + Check: func(_ *common.Config) bool { + return true }, - "files": map[string]interface{}{ - "rotateeverybytes": 10 * 1024 * 1024, - }, - }, - "setup": map[string]interface{}{ - "template": map[string]interface{}{ - "settings": map[string]interface{}{ - "index": map[string]interface{}{ - "codec": "best_compression", - "mapping": map[string]interface{}{ - "total_fields": map[string]int{ - "limit": 2000, - }, - }, - "number_of_shards": 1, - }, - "_source": map[string]interface{}{ - "enabled": true, + Config: common.MustNewConfigFrom(map[string]interface{}{ + "logging": map[string]interface{}{ + "metrics": map[string]interface{}{ + "enabled": false, }, + "ecs": true, + "json": true, }, - }, + }), }, - }), -}, + { + Check: func(_ *common.Config) bool { + return true + }, + Config: func() *common.Config { + m := map[string]interface{}{} + cloudValues(m) + return common.MustNewConfigFrom(m) + }(), + }} } -// NewRootCommand returns the "apm-server" root command. -func NewRootCommand(newBeat beat.Creator) *cmd.BeatsRootCmd { - var runFlags = pflag.NewFlagSet(beatName, pflag.ExitOnError) - settings := instance.Settings{ +// DefaultSettings return the default settings for APM Server to pass into +// the GenRootCmdWithSettings. +func DefaultSettings() instance.Settings { + return instance.Settings{ Name: beatName, IndexPrefix: apmIndexPattern, Version: defaultBeatVersion, - RunFlags: runFlags, + RunFlags: pflag.NewFlagSet(beatName, pflag.ExitOnError), Monitoring: report.Settings{ DefaultUsername: "apm_system", }, IndexManagement: idxmgmt.MakeDefaultSupporter, Processing: processing.MakeDefaultObserverSupport(false), - ConfigOverrides: libbeatConfigOverrides, + ConfigOverrides: libbeatConfigOverrides(), } +} +// NewRootCommand returns the "apm-server" root command. +func NewRootCommand(newBeat beat.Creator, settings instance.Settings) *cmd.BeatsRootCmd { rootCmd := cmd.GenRootCmdWithSettings(newBeat, settings) rootCmd.AddCommand(genApikeyCmd(settings)) modifyBuiltinCommands(rootCmd, settings) @@ -112,12 +112,14 @@ func modifyBuiltinCommands(rootCmd *cmd.BeatsRootCmd, settings instance.Settings // only add defined flags to setup command setup := rootCmd.SetupCmd - setup.Short = "Setup Elasticsearch index management components and pipelines" + setup.Short = "Setup Elasticsearch index management components and pipelines (deprecated)" setup.Long = `This command does initial setup of the environment: * Index management including loading Elasticsearch templates, ILM policies and write aliases. * Ingest pipelines -` + +` + idxmgmt.SetupDeprecatedWarning + "\n" + setup.ResetFlags() //lint:ignore SA1019 Setting up template must still be supported until next major version upgrade. @@ -127,3 +129,33 @@ func modifyBuiltinCommands(rootCmd *cmd.BeatsRootCmd, settings instance.Settings setup.Flags().Bool(cmd.IndexManagementKey, false, "Setup Elasticsearch index management") setup.Flags().Bool(cmd.PipelineKey, false, "Setup ingest pipelines") } + +func cloudValues(m map[string]interface{}) { + cap, err := strconv.ParseFloat(os.Getenv(cloudEnv), 64) + if err != nil { + return + } + multiplier := math.Round(cap / 512) + queueMemEvents := 2000 * multiplier + workers := math.Round(3.72549 + 1.626502*multiplier - 0.03826692*(multiplier*multiplier)) + if cap > 8192 { + workers = 20 //plateau on number of workers + } + bulkMaxSize := math.Round(((queueMemEvents / 1.5) / workers)) + m["output"] = map[string]interface{}{ + "elasticsearch": map[string]interface{}{ + "compression_level": 5, //default to medium compression on cloud + "worker": workers, + "bulk_max_size": bulkMaxSize, + }, + } + m["queue"] = map[string]interface{}{ + "mem": map[string]interface{}{ + "events": queueMemEvents, + "flush": map[string]interface{}{ + "min_events": bulkMaxSize, + "timeout": "1s", //default aligned with cloud value + }, + }, + } +} diff --git a/cmd/root_test.go b/cmd/root_test.go new file mode 100644 index 00000000000..fce667eeb3c --- /dev/null +++ b/cmd/root_test.go @@ -0,0 +1,71 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package cmd + +import ( + "os" + "testing" + + "github.com/elastic/beats/v7/libbeat/common" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCloudEnv(t *testing.T) { + defer os.Unsetenv(cloudEnv) + + // no cloud environment variable set + settings := DefaultSettings() + assert.Len(t, settings.ConfigOverrides, 2) + assert.Equal(t, common.NewConfig(), settings.ConfigOverrides[1].Config) + + // cloud environment picked up + var cloudMatrix = map[string]struct { + worker int + bulkMaxSize int + events int + minEvents int + }{ + "512": {5, 267, 2000, 267}, + "1024": {7, 381, 4000, 381}, + "2048": {10, 533, 8000, 533}, + "4096": {14, 762, 16000, 762}, + "8192": {20, 1067, 32000, 1067}, + "16384": {20, 2133, 64000, 2133}, + "32768": {20, 4267, 128000, 4267}, + } + for capacity, throughputSettings := range cloudMatrix { + os.Setenv(cloudEnv, capacity) + settings = DefaultSettings() + assert.Len(t, settings.ConfigOverrides, 2) + cfg := settings.ConfigOverrides[1].Config + assert.NotNil(t, cfg) + assertEqual(t, cfg, "output.elasticsearch.worker", float64(throughputSettings.worker)) + assertEqual(t, cfg, "output.elasticsearch.bulk_max_size", float64(throughputSettings.bulkMaxSize)) + assertEqual(t, cfg, "queue.mem.events", float64(throughputSettings.events)) + assertEqual(t, cfg, "queue.mem.flush.min_events", float64(throughputSettings.minEvents)) + assertEqual(t, cfg, "output.elasticsearch.compression_level", 5) + } +} + +func assertEqual(t *testing.T, cfg *common.Config, key string, expected float64) { + val, err := cfg.Float(key, -1) + require.NoError(t, err) + assert.Equal(t, expected, val) +} diff --git a/convert/convert.go b/convert/convert.go deleted file mode 100644 index a1d472d23f4..00000000000 --- a/convert/convert.go +++ /dev/null @@ -1,45 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package convert - -import ( - "bytes" - "encoding/json" - "io" -) - -// FromReader reads the given reader into the given interface -func FromReader(r io.ReadCloser, i interface{}) error { - var buf bytes.Buffer - _, err := buf.ReadFrom(r) - return FromBytes(buf.Bytes(), i, err) -} - -// FromBytes reads the given byte slice into the given interface -func FromBytes(bs []byte, i interface{}, err error) error { - if err != nil || len(bs) == 0 { - return err - } - return json.Unmarshal(bs, i) -} - -// ToReader converts a marshall-able interface into a reader -func ToReader(i interface{}) io.Reader { - b, _ := json.Marshal(i) - return bytes.NewReader(b) -} diff --git a/datastreams/constants.go b/datastreams/constants.go new file mode 100644 index 00000000000..2ab6f6f7f5f --- /dev/null +++ b/datastreams/constants.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package datastreams + +// Constants for data stream types. +const ( + LogsType = "logs" + MetricsType = "metrics" + TracesType = "traces" +) + +// Cosntants for data stream event metadata fields. +const ( + TypeField = "data_stream.type" + DatasetField = "data_stream.dataset" + NamespaceField = "data_stream.namespace" +) + +// IndexFormat holds the variable "index" format to use for the libbeat Elasticsearch output. +// Each event the server publishes is expected to contain data_stream.* fields, which will be +// added to the documents as well as be used for routing documents to the correct data stream. +const IndexFormat = "%{[data_stream.type]}-%{[data_stream.dataset]}-%{[data_stream.namespace]}" diff --git a/datastreams/servicename.go b/datastreams/servicename.go new file mode 100644 index 00000000000..a72703a5c1c --- /dev/null +++ b/datastreams/servicename.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package datastreams + +import "strings" + +// NormalizeServiceName translates serviceName into a string suitable +// for inclusion in a data stream name. +// +// Concretely, this function will lowercase the string and replace any +// reserved characters with "_". +// +func NormalizeServiceName(s string) string { + s = strings.ToLower(s) + s = strings.Map(replaceReservedRune, s) + return s +} + +func replaceReservedRune(r rune) rune { + switch r { + case '\\', '/', '*', '?', '"', '<', '>', '|', ' ', ',', '#', ':': + // These characters are not permitted in data stream names + // by Elasticsearch. + return '_' + case '-': + // Hyphens are used to separate the data stream type, dataset, + // and namespace. + return '_' + } + return r +} diff --git a/datastreams/servicename_test.go b/datastreams/servicename_test.go new file mode 100644 index 00000000000..e71e1532c03 --- /dev/null +++ b/datastreams/servicename_test.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package datastreams_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/apm-server/datastreams" +) + +func TestNormalizeServiceName(t *testing.T) { + testNormalizeServiceName := func(expected, input string) { + t.Helper() + assert.Equal(t, expected, datastreams.NormalizeServiceName(input)) + } + testNormalizeServiceName("upper_case", "UPPER-CASE") + testNormalizeServiceName("____________", "\\/*?\"<>| ,#:") +} diff --git a/decoder/decoder.go b/decoder/decoder.go index 439bd90c2f6..fbe6d02e537 100644 --- a/decoder/decoder.go +++ b/decoder/decoder.go @@ -24,22 +24,21 @@ import ( ) //TODO(simitt): look into config options for performance tuning -var jsonit = jsoniter.ConfigCompatibleWithStandardLibrary +var json = jsoniter.ConfigCompatibleWithStandardLibrary type Decoder interface { Decode(v interface{}) error } -// JSONIterDecoder can decode from a given reader, using jsoniter -// TODO(simitt): rename to JSONDecoder when everything is integrated -type JSONIterDecoder struct { +type JSONDecoder struct { *jsoniter.Decoder + reader io.Reader } -// NewJSONIteratorDecoder returns a *json.Decoder where numbers are unmarshaled +// NewJSONDecoder returns a *json.Decoder where numbers are unmarshaled // as a Number instead of a float64 into an interface{} -func NewJSONIteratorDecoder(r io.Reader) JSONIterDecoder { - d := jsonit.NewDecoder(r) +func NewJSONDecoder(r io.Reader) JSONDecoder { + d := json.NewDecoder(r) d.UseNumber() - return JSONIterDecoder{Decoder: d} + return JSONDecoder{Decoder: d, reader: r} } diff --git a/decoder/decoder_test.go b/decoder/decoder_test.go new file mode 100644 index 00000000000..3e81bea2f57 --- /dev/null +++ b/decoder/decoder_test.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package decoder_test + +import ( + "encoding/json" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/apm-server/decoder" +) + +func TestDecodeJSONData(t *testing.T) { + d := decoder.NewJSONDecoder(strings.NewReader( + `{"id":"85925e55b43f4342","system": {"hostname":"prod1.example.com"},"number":123}`, + )) + var decoded map[string]interface{} + err := d.Decode(&decoded) + assert.Nil(t, err) + assert.Equal(t, map[string]interface{}{ + "id": "85925e55b43f4342", + "system": map[string]interface{}{"hostname": "prod1.example.com"}, + "number": json.Number("123"), + }, decoded) +} diff --git a/decoder/req_decoder.go b/decoder/req_decoder.go index 3bc930644e4..b5274d842ac 100644 --- a/decoder/req_decoder.go +++ b/decoder/req_decoder.go @@ -20,7 +20,6 @@ package decoder import ( "compress/gzip" "compress/zlib" - "encoding/json" "io" "net/http" @@ -85,18 +84,3 @@ func CompressedRequestReader(req *http.Request) (io.ReadCloser, error) { readerCounter.Inc() return reader, nil } - -func DecodeJSONData(reader io.Reader) (map[string]interface{}, error) { - v := make(map[string]interface{}) - d := NewJSONDecoder(reader) - if err := d.Decode(&v); err != nil { - return nil, err - } - return v, nil -} - -func NewJSONDecoder(r io.Reader) *json.Decoder { - d := json.NewDecoder(r) - d.UseNumber() - return d -} diff --git a/decoder/req_decoder_test.go b/decoder/req_decoder_test.go deleted file mode 100644 index 905e08f8034..00000000000 --- a/decoder/req_decoder_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package decoder_test - -import ( - "encoding/json" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/apm-server/decoder" -) - -func TestDecodeJSONData(t *testing.T) { - decoded, err := decoder.DecodeJSONData(strings.NewReader( - `{"id":"85925e55b43f4342","system": {"hostname":"prod1.example.com"},"number":123}`, - )) - assert.Nil(t, err) - assert.Equal(t, map[string]interface{}{ - "id": "85925e55b43f4342", - "system": map[string]interface{}{"hostname": "prod1.example.com"}, - "number": json.Number("123"), - }, decoded) -} diff --git a/decoder/stream_decoder.go b/decoder/stream_decoder.go index 7abf66d940c..22bc001cdac 100644 --- a/decoder/stream_decoder.go +++ b/decoder/stream_decoder.go @@ -20,8 +20,9 @@ package decoder import ( "bufio" "bytes" - "encoding/json" "io" + + jsoniter "github.com/json-iterator/go" ) // NewNDJSONStreamDecoder returns a new NDJSONStreamDecoder which decodes @@ -40,9 +41,10 @@ type NDJSONStreamDecoder struct { lineReader *LineReader isEOF bool + latestError error latestLine []byte latestLineReader bytes.Reader - decoder *json.Decoder + decoder *jsoniter.Decoder } // Reset sets sr's underlying io.Reader to r, and resets any reading/decoding state. @@ -51,35 +53,46 @@ func (dec *NDJSONStreamDecoder) Reset(r io.Reader) { dec.lineReader.Reset(dec.bufioReader) dec.isEOF = false dec.latestLine = nil - dec.latestLineReader.Reset(nil) + dec.resetLatestLineReader() } func (dec *NDJSONStreamDecoder) resetDecoder() { - dec.decoder = NewJSONDecoder(&dec.latestLineReader) + dec.decoder = json.NewDecoder(&dec.latestLineReader) + dec.decoder.UseNumber() } // Decode decodes the next line into v. func (dec *NDJSONStreamDecoder) Decode(v interface{}) error { - buf, readErr := dec.readLine() - if len(buf) == 0 || (readErr != nil && !dec.isEOF) { - return readErr + defer dec.resetLatestLineReader() + if dec.latestLineReader.Size() == 0 { + dec.ReadAhead() + } + if len(dec.latestLine) == 0 || (dec.latestError != nil && !dec.isEOF) { + return dec.latestError } if err := dec.decoder.Decode(v); err != nil { dec.resetDecoder() // clear out decoding state return JSONDecodeError("data read error: " + err.Error()) } - return readErr // this might be io.EOF + return dec.latestError // this might be io.EOF } -func (dec *NDJSONStreamDecoder) readLine() ([]byte, error) { +// ReadAhead reads the next NDJSON line, buffering it for a subsequent call to Decode. +func (dec *NDJSONStreamDecoder) ReadAhead() ([]byte, error) { // readLine can return valid data in `buf` _and_ also an io.EOF line, readErr := dec.lineReader.ReadLine() dec.latestLine = line dec.latestLineReader.Reset(dec.latestLine) + dec.latestError = readErr dec.isEOF = readErr == io.EOF return line, readErr } +func (dec *NDJSONStreamDecoder) resetLatestLineReader() { + dec.latestLineReader.Reset(nil) + dec.latestError = nil +} + // IsEOF signals whether the underlying reader reached the end func (dec *NDJSONStreamDecoder) IsEOF() bool { return dec.isEOF } diff --git a/decoder/stream_decoder_test.go b/decoder/stream_decoder_test.go index f17272ce0f9..bba8abce03e 100644 --- a/decoder/stream_decoder_test.go +++ b/decoder/stream_decoder_test.go @@ -19,6 +19,8 @@ package decoder import ( "bytes" + "fmt" + "io" "strings" "testing" @@ -49,8 +51,8 @@ func TestNDStreamReader(t *testing.T) { latestLine: `{"key": "value2", "t`, }, { - out: nil, - errPattern: "invalid character", + out: map[string]interface{}{}, + errPattern: "data read error", latestLine: `{invalid-json}`, }, { @@ -64,20 +66,49 @@ func TestNDStreamReader(t *testing.T) { n := NewNDJSONStreamDecoder(buf, 20) for idx, test := range expected { - var out map[string]interface{} - err := n.Decode(&out) - assert.Equal(t, test.out, out, "Failed at idx %v", idx) - if test.errPattern == "" { - assert.Nil(t, err) - } else { - require.NotNil(t, err, "Failed at idx %v", idx) - assert.Contains(t, err.Error(), test.errPattern, "Failed at idx %v", idx) - } - assert.Equal(t, test.isEOF, n.IsEOF()) - if test.latestLine == "" { - assert.Nil(t, n.LatestLine(), "Failed at idx %v", idx) - } else { - assert.Equal(t, []byte(test.latestLine), n.LatestLine(), "Failed at idx %v", idx) + t.Run(fmt.Sprintf("%v", idx), func(t *testing.T) { + var out map[string]interface{} + err := n.Decode(&out) + assert.Equal(t, test.out, out, "Failed at idx %v", idx) + if test.errPattern == "" { + assert.Nil(t, err) + } else { + require.NotNil(t, err, "Failed at idx %v", idx) + assert.Contains(t, err.Error(), test.errPattern, "Failed at idx %v", idx) + } + assert.Equal(t, test.isEOF, n.IsEOF()) + if test.latestLine == "" { + assert.Nil(t, n.LatestLine(), "Failed at idx %v", idx) + } else { + assert.Equal(t, []byte(test.latestLine), n.LatestLine(), "Failed at idx %v", idx) + } + }) + } +} + +func TestNDStreamReaderReadAhead(t *testing.T) { + lines := []string{ + `{"key":"value1"}`, + `{"a": "b"}`, + } + buf := bytes.NewBufferString(strings.Join(lines, "\n")) + n := NewNDJSONStreamDecoder(buf, 100) + + // Decode reads the next line if it hasn't been buffered already + var out map[string]interface{} + require.NoError(t, n.Decode(&out)) + assert.Equal(t, map[string]interface{}{"key": "value1"}, out) + // ReadAhead buffers the next line, to be consumed by the next call to `Decode` + var readAheadOut, decodeOut map[string]interface{} + b, errAhead := n.ReadAhead() + require.NoError(t, json.Unmarshal(b, &readAheadOut)) + assert.Equal(t, map[string]interface{}{"a": "b"}, readAheadOut) + errDecode := n.Decode(&decodeOut) + assert.Equal(t, readAheadOut, decodeOut) + // ReadAhead and Decode return an error for EOF + for _, err := range []error{errAhead, errDecode} { + if assert.Error(t, err) { + assert.Equal(t, io.EOF, err) } } } diff --git a/docker-compose.yml b/docker-compose.yml index 0deb5d7e0d6..f96e1396b69 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -32,12 +32,18 @@ services: kibana: { condition: service_healthy } elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:8.0.0-64718381-SNAPSHOT ports: - 9200:9200 - extends: - file: ./testing/environments/${TESTING_ENVIRONMENT:-snapshot}.yml - service: elasticsearch + healthcheck: + test: ["CMD-SHELL", "curl -s http://localhost:9200/_cluster/health?wait_for_status=yellow&timeout=500ms"] + retries: 300 + interval: 1s environment: + - "ES_JAVA_OPTS=-Xms1g -Xmx1g" + - "network.host=" + - "transport.host=127.0.0.1" + - "http.host=0.0.0.0" - "cluster.routing.allocation.disk.threshold_enabled=false" - "discovery.type=single-node" - "xpack.security.authc.anonymous.roles=remote_monitoring_collector" @@ -48,19 +54,73 @@ services: - "xpack.security.authc.token.enabled=true" - "xpack.security.authc.api_key.enabled=true" - "logger.org.elasticsearch=${ES_LOG_LEVEL:-error}" + - "action.destructive_requires_name=false" volumes: - "./testing/docker/elasticsearch/roles.yml:/usr/share/elasticsearch/config/roles.yml" - "./testing/docker/elasticsearch/users:/usr/share/elasticsearch/config/users" - "./testing/docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" kibana: + image: docker.elastic.co/kibana/kibana:8.0.0-64718381-SNAPSHOT ports: - 5601:5601 - extends: - file: ./testing/environments/${TESTING_ENVIRONMENT:-snapshot}.yml - service: kibana + healthcheck: + test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status | grep -q 'All services are available'"] + retries: 300 + interval: 1s environment: + SERVER_HOST: 0.0.0.0 STATUS_ALLOWANONYMOUS: "true" ELASTICSEARCH_URL: elasticsearch:9200 ELASTICSEARCH_USERNAME: "${KIBANA_ES_USER:-kibana_system_user}" ELASTICSEARCH_PASSWORD: "${KIBANA_ES_PASS:-changeme}" + XPACK_XPACK_MAIN_TELEMETRY_ENABLED: "false" + XPACK_SECURITY_ENCRYPTIONKEY: "fhjskloppd678ehkdfdlliverpoolfcr" + XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: "fhjskloppd678ehkdfdlliverpoolfcr" + XPACK_FLEET_AGENTS_ELASTICSEARCH_HOST: "http://elasticsearch:9200" + XPACK_FLEET_AGENTS_TLSCHECKDISABLED: "true" + XPACK_FLEET_REGISTRYURL: "http://package-registry:8080" + depends_on: + elasticsearch: { condition: service_healthy } + package-registry: { condition: service_healthy } + + fleet-server: + image: docker.elastic.co/beats/elastic-agent:8.0.0-64718381-SNAPSHOT + ports: + - 8220:8220 + healthcheck: + test: ["CMD-SHELL", "curl -s -k https://localhost:8220/api/status | grep -q 'HEALTHY'"] + retries: 300 + interval: 1s + environment: + FLEET_SERVER_ENABLE: "1" + FLEET_SERVER_ELASTICSEARCH_HOST: http://elasticsearch:9200 + FLEET_SERVER_ELASTICSEARCH_USERNAME: "${ES_SUPERUSER_USER:-admin}" + FLEET_SERVER_ELASTICSEARCH_PASSWORD: "${ES_SUPERUSER_PASS:-changeme}" + FLEET_SERVER_CERT: /etc/pki/tls/certs/fleet-server.pem + FLEET_SERVER_CERT_KEY: /etc/pki/tls/private/fleet-server-key.pem + FLEET_URL: https://fleet-server:8220 + KIBANA_FLEET_SETUP: "true" + KIBANA_HOST: "http://kibana:5601" + KIBANA_USERNAME: "${ES_SUPERUSER_USER:-admin}" + KIBANA_PASSWORD: "${ES_SUPERUSER_PASS:-changeme}" + depends_on: + elasticsearch: { condition: service_healthy } + kibana: { condition: service_healthy } + volumes: + - "./testing/docker/fleet-server/certificate.pem:/etc/pki/tls/certs/fleet-server.pem" + - "./testing/docker/fleet-server/key.pem:/etc/pki/tls/private/fleet-server-key.pem" + + package-registry: + image: docker.elastic.co/package-registry/distribution:snapshot + ports: + - 8080 + entrypoint: /entrypoint.sh + healthcheck: + test: ["CMD-SHELL", "curl --fail localhost:8080/health"] + retries: 100 + interval: 5s + volumes: + - "./testing/docker/package-registry/entrypoint.sh:/entrypoint.sh" + - "./testing/docker/package-registry/config.yml:/package-registry/config.yml" + - "./apmpackage:/apmpackage" # copied to /packages/local/apm/ by entrypoint.sh diff --git a/docs/agent-configuration.asciidoc b/docs/agent-configuration.asciidoc index bff45082da0..734aa3b2ab4 100644 --- a/docs/agent-configuration.asciidoc +++ b/docs/agent-configuration.asciidoc @@ -8,6 +8,11 @@ APM Server exposes an API endpoint that allows agents to query the server for configuration changes. More information on this feature is available in {kibana-ref}/agent-configuration.html[APM Agent configuration in Kibana]. +Starting with release 7.14, agent configuration can be declared directly within +`apm-server.yml`. Requests to the endpoint are unchanged; `apm-server` responds +directly without querying kibana for the agent configuration. Refer to the +example in `apm-server.yml` under Agent Configuration. + [[agent-config-endpoint]] [float] === Agent configuration endpoint diff --git a/docs/api-keys.asciidoc b/docs/api-keys.asciidoc new file mode 100644 index 00000000000..fe06a7a8494 --- /dev/null +++ b/docs/api-keys.asciidoc @@ -0,0 +1,149 @@ +[role="xpack"] +[[beats-api-keys]] +== Grant access using API keys + +Instead of using usernames and passwords, you can use API keys to grant +access to {es} resources. You can set API keys to expire at a certain time, +and you can explicitly invalidate them. Any user with the `manage_api_key` +or `manage_own_api_key` cluster privilege can create API keys. + +{beatname_uc} instances typically send both collected data and monitoring +information to {es}. If you are sending both to the same cluster, you can use the same +API key. For different clusters, you need to use an API key per cluster. + +NOTE: For security reasons, we recommend using a unique API key per {beatname_uc} instance. +You can create as many API keys per user as necessary. + +[float] +[[beats-api-key-publish]] +=== Create an API key for writing events + +In {kib}, navigate to **Stack Management** > **API keys** and click **Create API key**. + +[role="screenshot"] +image::images/server-api-key-create.png[API key creation] + +Enter a name for your API key and select **Restrict privileges**. +In the role descriptors box, assign the appropriate privileges to the new API key. For example: + +[source,json,subs="attributes,callouts"] +---- +{ + "{beat_default_index_prefix}_writer": { + "index": [ + { + "names": ["{beat_default_index_prefix}-*"], + "privileges": ["create_index", "create_doc"] + }, + { + "names": ["{beat_default_index_prefix}-*sourcemap"], + "privileges": ["read"] + }, + ] + } +} +---- + +NOTE: This example only provides privileges for **writing data**. +See <> for additional privileges and information. + +To set an expiration date for the API key, select **Expire after time** +and input the lifetime of the API key in days. + +Click **Create API key**. In the dropdown, switch to **Beats** and copy the API key. + +You can now use this API key in your +{beatname_lc}.yml+ configuration file: + +["source","yml",subs="attributes"] +-------------------- +output.elasticsearch: + api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA <1> +-------------------- +<1> Format is `id:api_key` (as shown in the Beats dropdown) + +[float] +[[beats-api-key-monitor]] +=== Create an API key for monitoring + +In {kib}, navigate to **Stack Management** > **API keys** and click **Create API key**. + +[role="screenshot"] +image::images/server-api-key-create.png[API key creation] + +Enter a name for your API key and select **Restrict privileges**. +In the role descriptors box, assign the appropriate privileges to the new API key. +For example: + +[source,json,subs="attributes,callouts"] +---- +{ + "{beat_default_index_prefix}_monitoring": { + "index": [ + { + "names": [".monitoring-beats-*"], + "privileges": ["create_index", "create_doc"] + } + ] + } +} +---- + +NOTE: This example only provides privileges for **publishing monitoring data**. +See <> for additional privileges and information. + +To set an expiration date for the API key, select **Expire after time** +and input the lifetime of the API key in days. + +Click **Create API key**. In the dropdown, switch to **Beats** and copy the API key. + +You can now use this API key in your +{beatname_lc}.yml+ configuration file like this: + +["source","yml",subs="attributes"] +-------------------- +monitoring.elasticsearch: + api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA <1> +-------------------- +<1> Format is `id:api_key` (as shown in the Beats dropdown) + +[float] +[[beats-api-key-es]] +=== Create an API key with {es} APIs + +You can also use {es}'s {ref}/security-api-create-api-key.html[Create API key API] to create a new API key. +For example: + +[source,console,subs="attributes,callouts"] +------------------------------------------------------------ +POST /_security/api_key +{ + "name": "{beat_default_index_prefix}_host001", <1> + "role_descriptors": { + "{beat_default_index_prefix}_writer": { <2> + "index": [ + { + "names": ["{beat_default_index_prefix}-*"], + "privileges": ["create_index", "create_doc"] + }, + { + "names": ["{beat_default_index_prefix}-*sourcemap"], + "privileges": ["read"] + }, + ] + } + } +} +------------------------------------------------------------ +<1> Name of the API key +<2> Granted privileges, see <> + +See the {ref}/security-api-create-api-key.html[Create API key] reference for more information. + +[[learn-more-api-keys]] +[float] +=== Learn more about API keys + +See the {es} API key documentation for more information: + +* {ref}/security-api-create-api-key.html[Create API key] +* {ref}/security-api-get-api-key.html[Get API key information] +* {ref}/security-api-invalidate-api-key.html[Invalidate API key] diff --git a/docs/apm-package/apm-input-settings.asciidoc b/docs/apm-package/apm-input-settings.asciidoc new file mode 100644 index 00000000000..81b89dafe2e --- /dev/null +++ b/docs/apm-package/apm-input-settings.asciidoc @@ -0,0 +1,247 @@ +// tag::NAME-setting[] +| +[id="input-{input-type}-NAME-setting"] +`NAME` + +| (TYPE) DESCRIPTION. + +*Default:* `DEFAULT` + +OPTIONAL INFO AND EXAMPLE +// end::NAME-setting[] + +// ============================================================================= + +// These settings are shared across the docs for multiple inputs. Copy and use +// the above template to add a shared setting. Replace values in all caps. +// Use an include statement // to pull the tagged region into your source file: +// include::input-shared-settings.asciidoc[tag=NAME-setting] + +// tag::host-setting[] +| +[id="input-{input-type}-host-setting"] +`host` + +| (string) Defines the host and port the server is listening on. +Use `unix:/path/to.sock` to listen on a unix domain socket. + +*Default:* `localhost:8200` +// end::host-setting[] + +// ============================================================================= + +// tag::secret_token-setting[] +| +[id="input-{input-type}-secret_token-setting"] +`secret_token` + +| (string) Authorization token for sending APM data. +The same token must also be set in each APM agent. +This token is not used for RUM endpoints. + +*Default:* No secret token set +// end::secret_token-setting[] + +// ============================================================================= + +// tag::max_event_bytes-setting[] +| +[id="input-{input-type}-max_event_bytes-setting"] +`max_event_bytes` + +| (int) Maximum permitted size of an event accepted by the server to be processed (in Bytes). + +*Default:* `307200` Bytes +// end::max_event_bytes-setting[] + +// ============================================================================= + +// tag::default_service_environment-setting[] +| +[id="input-{input-type}-default_service_environment-setting"] +`default_service_environment` + +| (string) The default service environment for events without a defined service environment. + +// end::default_service_environment-setting[] + +// ============================================================================= + +// tag::capture_personal_data-setting[] +| +[id="input-{input-type}-capture_personal_data-setting"] +`capture_personal_data` + +| (bool) Capture personal data such as IP or User Agent. +If true, APM Server captures the IP of the instrumented service and its User Agent if any. + +*Default:* `true` +// end::capture_personal_data-setting[] + +// ============================================================================= + +// tag::enable_rum-setting[] +| +[id="input-{input-type}-enable_rum-setting"] +`enable_rum` + +| (bool) Enables and disables Real User Monitoring (RUM). + +*Default:* `false` (disabled) +// end::enable_rum-setting[] + +// ============================================================================= + +// tag::rum_allow_origins-setting[] +| +[id="input-{input-type}-rum_allow_origins-setting"] +`rum_allow_origins` + +| (string) A list of permitted origins for RUM support. +User-agents send an Origin header that will be validated against this list. +This is done automatically by modern browsers as part of the https://www.w3.org/TR/cors/[CORS specification]. +An origin is made of a protocol scheme, host and port, without the URL path. + +*Default:* `["*"]` (allows everything) +// end::rum_allow_origins-setting[] + +// ============================================================================= + +// tag::rum_allow_headers-setting[] +| +[id="input-{input-type}-rum_allow_headers-setting"] +`rum_allow_headers` + +| (string) By default, HTTP requests made from the RUM agent to the APM integration are limited in the HTTP headers they are allowed to have. +If any other headers are added, the request will be rejected by the browser due to Cross-Origin Resource Sharing (CORS) restrictions. +If you need to add extra headers to these requests, use this configuration to allow additional headers. + +The default list of values includes `"Content-Type"`, `"Content-Encoding"`, and `"Accept"`. +Configured values are appended to the default list and used as the value for the +`Access-Control-Allow-Headers` header. +// end::rum_allow_headers-setting[] + +// ============================================================================= + +// tag::rum_response_headers-setting[] +| +[id="input-{input-type}-rum_response_headers-setting"] +`rum_response_headers` + +| (yaml) Custom HTTP headers to add to RUM responses. For example, for security policy compliance. + +// end::rum_response_headers-setting[] + +// ============================================================================= + +// tag::anonymous_enabled-setting[] +| +[id="input-{input-type}-anonymous_enabled-setting"] +`anonymous_enabled` + +| (bool) Enable or disable anonymous authentication. + +*Default:* `true` (enabled) +// end::anonymous_enabled-setting[] + +// ============================================================================= + +// tag::anonymous_allow_agent-setting[] +| +[id="input-{input-type}-anonymous_allow_agent-setting"] +`anonymous_allow_agent` + +| (array) A list of permitted APM agent names for anonymous authentication. +Names in this list must match the agent's `agent.name`. + +*Default:* `[rum-js, js-base, iOS/swift]` (only RUM and iOS/Swift agent events are accepted) +// end::anonymous_allow_agent-setting[] + +// ============================================================================= + +// tag::anonymous_allow_service-setting[] +| +[id="input-{input-type}-anonymous_allow_service-setting"] +`anonymous_allow_service` + +| (array) A list of permitted service names for anonymous authentication. +Names in this list must match the agent's `service.name`. +This can be used to limit the number of service-specific indices or data streams created. + +*Default:* Not set (any service name is accepted) +// end::anonymous_allow_service-setting[] + +// ============================================================================= + +// tag::anonymous_rate_limit_event_limit-setting[] +| +[id="input-{input-type}-anonymous_rate_limit_event_limit-setting"] +`anonymous_rate_limit_event_limit` + +| (int) The maximum amount of events allowed to be sent to the APM Server anonymous auth endpoint per IP per second. + +*Default:* `10` +// end::anonymous_rate_limit_event_limit-setting[] + +// ============================================================================= + +// tag::anonymous_rate_limit_ip_limit-setting[] +| +[id="input-{input-type}-anonymous_rate_limit_ip_limit-setting"] +`anonymous_rate_limit_ip_limit` + +| (int) The number of unique IP addresses to track in an LRU cache. +IP addresses in the cache will be rate limited according to the `anonymous_rate_limit_event_limit` setting. +Consider increasing this default if your application has many concurrent clients. + +*Default:* `10000` +// end::anonymous_rate_limit_ip_limit-setting[] + +// ============================================================================= + +// tag::api_key_enabled-setting[] +| +[id="input-{input-type}-api_key_enabled-setting"] +`api_key_enabled` + +| (bool) Enable or disable API key authorization between APM Server and APM agents. + +*Default:* `false` (disabled) +// end::api_key_enabled-setting[] + +// ============================================================================= + +// tag::api_key_limit-setting[] +| +[id="input-{input-type}-api_key_limit-setting"] +`api_key_limit` + +| (int) Each unique API key triggers one request to Elasticsearch. +This setting restricts the number of unique API keys are allowed per minute. +The minimum value for this setting should be the number of API keys configured in your monitored services. + +*Default:* `100` +// end::api_key_limit-setting[] + +// ============================================================================= + +// tag::sourcemap_api_key-setting[] +| +[id="input-{input-type}-sourcemap_api_key-setting"] +`sourcemap_api_key` + +| (string) RUM API key for sourcemaps. Formatted as `:`. +// end::sourcemap_api_key-setting[] + +// ============================================================================= + +// tag::kibana_api_key-setting[] +| +[id="input-{input-type}-kibana_api_key-setting"] +`kibana_api_key` + +| (string) API Key for APM central configuration feature. Formatted as `:`. + +// end::kibana_api_key-setting[] + +// ============================================================================= diff --git a/docs/apm-package/apm-integration.asciidoc b/docs/apm-package/apm-integration.asciidoc new file mode 100644 index 00000000000..4aaaf41a832 --- /dev/null +++ b/docs/apm-package/apm-integration.asciidoc @@ -0,0 +1,121 @@ +[[apm-integration]] += APM integration for {agent} + +++++ +APM integration ({agent}) +++++ + +beta::[] + +{agent} is a single, unified way to add monitoring for logs, metrics, and other types of data to each host. +The APM integration for {agent} assigns the APM input to a specified policy, +and installs {es} templates and ingest node pipelines for APM data. +When {agent} is assigned a policy with an APM input, +{agent} will run the APM Server binary locally and listen for APM data. + +[discrete] +[[apm-integration-get-started]] +=== Get started + +Ready to jump in? +Read through the <>, then head over to the +quick start guide: {fleet-guide}/fleet-quick-start-traces.html[Get application traces into the {stack}]. + +[discrete] +[[apm-integration-architecture]] +=== Architecture + +If RUM is enabled, you must run {agent} centrally. +If RUM is disabled, {agent} can be run on edge machines. To do this, +download and enroll an {agent} on the same machines that your instrumented services run on. + +[discrete] +[[apm-integration-limitations]] +=== Limitations + +IMPORTANT: This integration is still in beta and does not have feature parity with standalone APM. +Do not migrate production deployments. + +Data steams migration:: +Existing APM users will need to migrate to data streams to use the APM integration. +This change cannot be reverted, and impacts how APM Server and its indices are configured -- see <> and <>. +Additionally, users on {ece} require additional steps prior to migrating, like configuring TLS certificates for the connection between APM Server and {es}. + +Stack monitoring:: +<> is not yet available. + +Index lifecycle management (ILM):: +A default ILM policy, named `traces-apm.traces-default_policy` is created for all event types. +This policy moves indices to a warm data tier after 30 days. +No default cold or delete data tier is defined. +It is not possible to configure this policy in APM Server or {agent}– +it must be configured with {es} or {kib}. +See {ref}/example-using-index-lifecycle-policy.html[Customize built-in ILM policies] for more information. + +Onboarding:: +APM Server no longer writes an onboarding document when setting up. + +Standalone mode:: +{fleet-guide}/run-elastic-agent-standalone.html[Standalone mode] is not currently supported. +An {agent} with the APM integration enabled must be managed by fleet. + +Service names:: +Service names are case-insensitive and must be unique. +See <> for more information. + +Upgrading from prior {agent} versions:: +Due to changing API key permissions, an {agent} enrolled before version 7.12 is not compatible with the APM integration. +You must enroll a new {agent} to use the integration. + +[discrete] +[[apm-integration-terminology]] +=== Terminology + +Agents:: + +{agent} and APM agents are different components: ++ +{fleet-guide}/fleet-overview.html[**{agent}**] is a single, +unified agent that you can deploy to hosts or containers to collect data and send it to the {stack}. +Behind the scenes, {agent} runs APM Server to listen for `apm` data. ++ +{apm-overview-ref-v}/components.html[**APM agents**] are open source libraries written in the same language as your service. +You may only need one, or you might use all of them. +You install them into your service as you would install any other library. +They instrument your code and collect performance data and errors at runtime. +This data is sent to APM Server. + +Central management/configuration:: + +// to do: add links to these docs +Fleet central management and APM agent central configuration are two different features +that can be accessed in {kib}: ++ +**Fleet central management** serves as the communication channel with your {agent}s; +agents check in for the latest updates on a regular basis. ++ +**APM agent central configuration** allows you to fine-tune your agent configuration from within the APM app. +Changes are automatically propagated to your APM agents, so there’s no need to redeploy your services. + + +[discrete] +[[apm-integration-versioning]] +=== Package versioning + +The APM package is versioned separately from the Elastic Stack. +In the future, we may align with Elastic Stack versioning. + +[discrete] +[[apm-integration-learn-more]] +=== Learn more + +// to do: update these links +* <> +* <> +* {fleet-guide}/fleet-overview.html[Fleet overview] + +include::./data-streams.asciidoc[] + +include::./input-apm.asciidoc[] + +include::./configure.asciidoc[] diff --git a/docs/apm-package/configure.asciidoc b/docs/apm-package/configure.asciidoc new file mode 100644 index 00000000000..fa17dd7d226 --- /dev/null +++ b/docs/apm-package/configure.asciidoc @@ -0,0 +1,51 @@ +[[apm-integration-configure]] +== Configure APM integration + +++++ +Configure +++++ + +beta::[] + +Templates, pipelines, index lifecycle management, etc., +cannot be configured with APM Server or Fleet, and must instead be configured in {kib} or with +{es} APIs. + +[[apm-integration-templates]] +=== Index templates + +The APM integration loads default index templates into {es}. +These templates configure the APM data stream indices. +To view and edit these templates in {kib}, +select *Stack Management* / *Index Management* / *Index Templates*. +Search for `apm`. + +See {ref}/index-templates.html[index templates] for more information. + +[[apm-integration-pipelines]] +=== Pipelines + +The APM integration loads default ingest node pipelines into {es}. +These pipelines preprocess and enrich APM documents before indexing them. +To view and edit these pipelines in {kib}, +select *Stack Management* / *Index Node Pipelines*. +Search for `apm`. + +See {ref}/ingest.html[ingest node pipelines] for more information. + +[[apm-integration-ilm]] +=== Index lifecycle management (ILM) + +The index lifecycle management (ILM) feature in {es} allows you to automate the +lifecycle of your APM Server indices as they grow and age. +ILM is enabled by default, and a default policy is applied to all APM indices. + +To view and edit these index lifecycle policies in {kib}, +select *Stack Management* / *Index Lifecycle Management*. +Search for `apm`. + +See {ref}/getting-started-index-lifecycle-management.html[manage the index lifecycle] for more information. + +// to do +// [[apm-integration-sourcemaps]] +// === RUM Source maps diff --git a/docs/apm-package/data-streams.asciidoc b/docs/apm-package/data-streams.asciidoc new file mode 100644 index 00000000000..5e4eb05f63a --- /dev/null +++ b/docs/apm-package/data-streams.asciidoc @@ -0,0 +1,78 @@ +[[apm-integration-data-streams]] +== Data streams + +**** +beta::[] + +Existing APM users need to migrate to data streams to use the APM integration. +The integration does not have feature parity with standalone APM. +Production deployments should not be migrated at this time. + +Migration limitations include: + +* This change cannot be reverted +* This change impacts how APM Server and its indices are configured -- see <> and <> +* Users on {ece} require additional steps prior to migrating, like configuring TLS certificates for the connection between APM Server and {es} +* Additional <> +**** + +[discrete] +[[apm-integration-naming-scheme]] +=== Data stream naming scheme + +{agent} uses data streams to store append-only time series data across multiple indices +while giving users a single named resource for requests. +If you're new to data streams, see the {fleet-guide}/data-streams.html[Fleet user guide] to learn more. + +`apm` input data is divided into three types: + +Traces:: + +Traces are comprised of {apm-overview-ref-v}/apm-data-model.html[spans and transactions]. +Traces are stored in the following data stream: + +- Application traces: `traces-apm-` + +Metrics:: + +Metrics include application-based metrics and basic system metrics. +Metrics are stored in the following data streams: + +- Application defined metrics: `metrics-apm.app.-` +- APM internal metrics: `metrics-apm.internal-` +- APM profiling metrics: `metrics-apm.profiling-` + +Logs:: + +Logs include application error events and application logs. +Logs are stored in the following data streams: + +- APM error/exception logging: `logs-apm.error-` + +[discrete] +[[apm-integration-service-name]] +=== Service names + +The APM integration maps an instrumented service's name–defined in each APM agent's +configuration–to the index that its application defined metrics are stored in {es}. +Service names therefore must follow index naming rules: + +* Service names are case-insensitive and must be unique. +For example, you cannot have a service named `Foo` and another named `foo`. +* Special characters will be removed from service names and replaced with underscores (`_`). +Special characters include: ++ +[source,text] +---- +'\\', '/', '*', '?', '"', '<', '>', '|', ' ', ',', '#', ':', '-' +---- + +[discrete] +[[apm-integration-namespace]] +=== Namespace + +There is no recommendation for what to use as your namespace; +it's intentionally flexible which allows greater control over how your data is indexed. +For example, you might create namespaces for each of your environments, +like `dev`, `prod`, `production`, etc. +Or, you might create namespaces that correspond to strategic business units within your organization. diff --git a/docs/apm-package/input-apm.asciidoc b/docs/apm-package/input-apm.asciidoc new file mode 100644 index 00000000000..d1e47cd58cb --- /dev/null +++ b/docs/apm-package/input-apm.asciidoc @@ -0,0 +1,83 @@ +:input-type: apm + +[[input-apm]] +== APM input settings + +++++ +Input settings +++++ + +beta::[] + +To edit the APM integration input settings, open {kib} and select: +**Integrations** > **Elastic APM** > **Add Elastic APM**. +Expand the carrot to view all settings. + +A limited number of settings are currently supported. + +[NOTE] +==== +Templates, pipelines, index lifecycle management, etc., cannot be configured +with APM Server or Fleet, and must instead be configured with {kib} or {es}. +<>. +// Configuration via the `apm-server.yml` file is no longer supported. +==== + +[float] +[[apm-input-general-settings]] +=== General settings + +[cols="2*> +APM agents communicate with the APM server by sending events in an HTTP request. Each event is sent as its own line in the HTTP request body. If events are too large, you should consider increasing the <> setting in the APM Server, and adjusting relevant settings in the agent. [[unauthorized]] @@ -90,8 +88,8 @@ APM Server has an internal queue that helps to: When the queue has reached the maximum size, APM Server returns an HTTP 503 status with the message "Queue is full". -A full queue generally means that the agents collect more data than APM server is able to process. -This might happen when APM Server is not configured properly for the size of your Elasticsearch cluster, +A full queue generally means that the agents collect more data than APM server can process. +This might happen when APM Server is not configured properly for your Elasticsearch cluster size, or because your Elasticsearch cluster is underpowered or not configured properly for the given workload. The queue can also fill up if Elasticsearch runs out of disk space. @@ -125,7 +123,7 @@ To alleviate this problem, you can try to: The target host running might be unreachable or the certificate may not be valid. To resolve your issue: -* Make sure that server process on the target host is running and you can connect to it. +* Make sure that the APM Server process on the target host is running and you can connect to it. First, try to ping the target host to verify that you can reach it from the host running {beatname_uc}. Then use either `nc` or `telnet` to make sure that the port is available. For example: + @@ -159,8 +157,8 @@ This happens because your certificate is only valid for the hostname present in To resolve this problem, try one of these solutions: -* Create a DNS entry for the hostname mapping it to the server's IP. -* Create an entry in `/etc/hosts` for the hostname. Or on Windows add an entry to +* Create a DNS entry for the hostname, mapping it to the server's IP. +* Create an entry in `/etc/hosts` for the hostname. Or, on Windows, add an entry to `C:\Windows\System32\drivers\etc\hosts`. * Re-create the server certificate and add a SubjectAltName (SAN) for the IP address of the server. This makes the server's certificate valid for both the hostname and the IP address. @@ -213,6 +211,7 @@ you won't see a sign of failures as the APM server asynchronously sends the data However, the APM server and Elasticsearch log a warning like this: +[source,logs] ---- {\"type\":\"illegal_argument_exception\",\"reason\":\"Limit of total fields [1000] in index [apm-7.0.0-transaction-2017.05.30] has been exceeded\"} ---- @@ -226,12 +225,13 @@ especially when using a load balancer. You may see an error like the one below in the agent logs, and/or a similar error on the APM Server side: +[source,logs] ---------------------------------------------------------------------- [ElasticAPM] APM Server responded with an error: "read tcp 123.34.22.313:8200->123.34.22.40:41602: i/o timeout" ---------------------------------------------------------------------- -To fix this, ensure timeouts are incrementing from the {apm-agents-ref}[APM Agent], +To fix this, ensure timeouts are incrementing from the {apm-agents-ref}[APM agent], through your load balancer, to the <>. By default, the agent timeouts are set at 10 seconds, and the server timeout is set at 30 seconds. @@ -239,8 +239,9 @@ Your load balancer should be set somewhere between these numbers. For example: +[source,txt] ---------------------------------------------------------------------- -APM Agent --> Load Balancer --> APM Server +APM agent --> Load Balancer --> APM Server 10s 15s 30s ---------------------------------------------------------------------- @@ -260,19 +261,21 @@ and data will be lost. Some agents have internal queues or buffers that will temporarily store data if the APM Server goes down. As a general rule of thumb, queues fill up quickly. Assume data will be lost if APM Server goes down. -Adjusting these queues/buffers can increase the overhead of the agent, so use caution when updating default values. +Adjusting these queues/buffers can increase the agent's overhead, so use caution when updating default values. -* **Go Agent** - Circular buffer with configurable size: +* **Go agent** - Circular buffer with configurable size: {apm-go-ref}/configuration.html#config-api-buffer-size[`ELASTIC_APM_BUFFER_SIZE`]. -* **Java Agent** - Internal buffer with configurable size: +// * **iOS agent** - ?? +* **Java agent** - Internal buffer with configurable size: {apm-java-ref}/config-reporter.html#config-max-queue-size[`max_queue_size`]. -* **Node.js Agent** - No internal queue. Data is lost. -* **Python Agent** - Internal {apm-py-ref}/tuning-and-overhead.html#tuning-queue[Transaction queue] +* **Node.js agent** - No internal queue. Data is lost. +* **PHP agent** - No internal queue. Data is lost. +* **Python agent** - Internal {apm-py-ref}/tuning-and-overhead.html#tuning-queue[Transaction queue] with configurable size and time between flushes. -* **Ruby Agent** - Internal queue with configurable size: +* **Ruby agent** - Internal queue with configurable size: {apm-ruby-ref}/configuration.html#config-api-buffer-size[`api_buffer_size`]. -* **RUM Agent** - No internal queue. Data is lost. -* **.NET Agent** - No internal queue. Data is lost. +* **RUM agent** - No internal queue. Data is lost. +* **.NET agent** - No internal queue. Data is lost. [[server-resource-exists-not-alias]] [float] diff --git a/docs/configuration-anonymous.asciidoc b/docs/configuration-anonymous.asciidoc new file mode 100644 index 00000000000..aad3787b7d7 --- /dev/null +++ b/docs/configuration-anonymous.asciidoc @@ -0,0 +1,88 @@ +[[configuration-anonymous]] +== Anonymous auth configuration options + +++++ +Anonymous authentication +++++ + +Elastic APM agents can send unauthenticated (anonymous) events to the APM Server. +This is useful for agents that run on clients, like the Real User Monitoring (RUM) agent running in a browser, +or the iOS/Swift agent running in a user application. + +Example configuration: + +["source","yaml"] +---- +apm-server.auth.anonymous.enabled: true +apm-server.auth.anonymous.allow_agent: [rum-js] +apm-server.auth.anonymous.allow_service: [my_service_name] +apm-server.auth.anonymous.rate_limit.event_limit: 300 +apm-server.auth.anonymous.rate_limit.ip_limit: 1000 +---- + +[float] +[[config-auth-anon-rum]] +=== Real User Monitoring (RUM) + +Anonymous authentication must be enabled to collect RUM data. +For this reason, anonymous auth will be enabled automatically if <> +is set to `true`, and <> is not explicitly defined. + +See <> for additional RUM configuration options. + +[float] +[[config-auth-anon]] +=== Configuration reference + +Specify the following options in the `apm-server.auth.anonymous` section of the `apm-server.yml` config file: + +[float] +[[config-auth-anon-enabled]] +==== `enabled` + +Enable or disable anonymous authentication. + +Default: `false` (disabled) + +[float] +[[config-auth-anon-allow-agent]] +==== `allow_agent` +A list of permitted APM agent names for anonymous authentication. +Names in this list must match the agent's `agent.name`. + +Default: `[rum-js, js-base]` (only RUM agent events are accepted) + +[float] +[[config-auth-anon-allow-service]] +==== `allow_service` +A list of permitted service names for anonymous authentication. +Names in this list must match the agent's `service.name`. +This can be used to limit the number of service-specific indices or data streams created. + +Default: Not set (any service name is accepted) + +[float] +[[config-auth-anon-ip-limit]] +==== `rate_limit.ip_limit` +The number of unique IP addresses to track in an LRU cache. +IP addresses in the cache will be rate limited according to the <> setting. +Consider increasing this default if your application has many concurrent clients. + +Default: `1000` + +[float] +[[config-auth-anon-event-limit]] +==== `rate_limit.event_limit` +The maximum number of events allowed per second, per agent IP address. + +The APM Server first attempts to derive the IP address from proxy headers. The +supported headers are parsed in the following order: + +- `Forwarded` +- `X-Real-Ip` +- `X-Forwarded-For` + +If none of these headers is present, the remote address for the incoming +request is used. + +Default: `300` diff --git a/docs/configuration-process.asciidoc b/docs/configuration-process.asciidoc index c3f8feafdaa..c1cb83d8a26 100644 --- a/docs/configuration-process.asciidoc +++ b/docs/configuration-process.asciidoc @@ -90,7 +90,7 @@ Default value is 0, which means _unlimited_. [[config-secret-token]] [float] -==== `secret_token` +==== `auth.secret_token` Authorization token for sending data to the APM server. If a token is set, the agents must send it in the following format: Authorization: Bearer . @@ -99,6 +99,15 @@ The token is not used for RUM endpoints. By default, no authorization token is s It is recommended to use an authorization token in combination with SSL enabled. Read more about <> and the <>. +[[config-secret-token-legacy]] +[float] +==== `secret_token` + +deprecated::[7.14.0, Replaced by `auth.secret_token`. See <>] + +In versions prior to 7.14.0, secret token authorization was known as `apm-server.secret_token`. In 7.14.0 this was renamed `apm-server.auth.secret_token`. +The old configuration will continue to work until 8.0.0, and the new configuration will take precedence. + [[capture_personal_data]] [float] ==== `capture_personal_data` @@ -106,6 +115,11 @@ If true, APM Server captures the IP of the instrumented service and its User Agent if any. Enabled by default. +[[default_service_environment]] +[float] +==== `default_service_environment` +Sets the default service environment to associate with data and requests received from agents which have no service environment defined. + [[expvar.enabled]] [float] ==== `expvar.enabled` @@ -170,3 +184,21 @@ Default value is 1 second. ==== `max_procs` Sets the maximum number of CPUs that can be executing simultaneously. The default is the number of logical CPUs available in the system. + +[float] +=== Configuration options: `data_streams` + +[[data_streams.enabled]] +[float] +==== `enabled` +Write events to Elasticsearch data streams instead of indices. +Events will be written to `traces-*`, `logs-*`, and `metrics-*` data streams. +Enabling data streams disables the setup of index templates, ILM policies, and ingest pipelines. +Defaults to false. + +[[data_streams.wait_for_integration]] +[float] +==== `wait_for_integration` +Wait for the `apm` Fleet integration to be installed by Kibana. Requires either <> +or for the <> to be configured. +Defaults to true. diff --git a/docs/configuration-rum.asciidoc b/docs/configuration-rum.asciidoc index a02ad46c321..e5661fdc855 100644 --- a/docs/configuration-rum.asciidoc +++ b/docs/configuration-rum.asciidoc @@ -5,24 +5,23 @@ Real User Monitoring (RUM) ++++ -Real User Monitoring captures user interaction with clients such as web browsers. -The {apm-rum-ref-v}/index.html[JavaScript Agent] is Elastic's RUM Agent. - -By default, Real User Monitoring is disabled. To enable it, -set `apm-server.rum.enabled` to `true` in your APM Server configuration file. +The {apm-rum-ref-v}/index.html[Real User Monitoring (RUM) agent] captures user interactions with clients such as web browsers. +These interactions are sent as events to the APM Server. +Because the RUM agent runs on the client side, the connection between agent and server is unauthenticated. +As a security precaution, RUM is therefore disabled by default. +To enable it, set `apm-server.rum.enabled` to `true` in your APM Server configuration file. In addition, if APM Server is deployed in an origin different than the page’s origin, you will need to configure {apm-rum-ref-v}/configuring-cors.html[Cross-Origin Resource Sharing (CORS)] in the Agent. -A full list of APM Server configuration options is available in the <>. - Example config with RUM enabled: ["source","yaml"] ---- apm-server.rum.enabled: true -apm-server.rum.event_rate.limit: 300 -apm-server.rum.event_rate.lru_size: 1000 +apm-server.auth.anonymous.rate_limit.event_limit: 300 +apm-server.auth.anonymous.rate_limit.ip_limit: 1000 +apm-server.auth.anonymous.allow_service: [your_service_name] apm-server.rum.allow_origins: ['*'] apm-server.rum.allow_headers: ["header1", "header2"] apm-server.rum.library_pattern: "node_modules|bower_components|~" @@ -34,7 +33,9 @@ apm-server.rum.source_mapping.index_pattern: "apm-*-sourcemap*" [float] [[enable-rum-support]] -=== Configuration +=== Configuration reference + +Specify the following options in the `apm-server.rum` section of the `apm-server.yml` config file: [[rum-enable]] [float] @@ -42,18 +43,41 @@ apm-server.rum.source_mapping.index_pattern: "apm-*-sourcemap*" To enable RUM support, set `apm-server.rum.enabled` to `true`. By default this is disabled. +NOTE: Enabling RUM support automatically enables <>. +Anonymous access is required as the RUM agent runs in end users' browsers. + [float] [[event_rate.limit]] ==== `event_rate.limit` -Defines the maximum amount of events allowed to be sent to the APM Server RUM endpoint per ip per second. -Defaults to 300. + +deprecated::[7.15.0, Replaced by <>.] + +The maximum number of events allowed per second, per agent IP address. + +Default: `300` [float] ==== `event_rate.lru_size` -An LRU cache is used to keep a rate limit per IP for the most recently seen IPs. -This setting defines the number of unique IPs that can be tracked in the cache. -Sites with many concurrent clients should consider increasing this limit. -Defaults to 1000. + +deprecated::[7.15.0, Replaced by <>.] + +The number of unique IP addresses to track in an LRU cache. +IP addresses in the cache will be rate limited according to the <> setting. +Consider increasing this default if your site has many concurrent clients. + +Default: `1000` + +[float] +[[rum-allow-service-names]] +==== `allow_service_names` + +deprecated::[7.15.0, Replaced by <>.] +A list of permitted service names for RUM support. +Names in this list must match the agent's `service.name`. +This can be set to restrict RUM events to those with one of a set of known service names, +in order to limit the number of service-specific indices or data streams created. + +Default: Not set (any service name is accepted) [float] [[rum-allow-origins]] @@ -62,17 +86,30 @@ A list of permitted origins for RUM support. User-agents send an Origin header that will be validated against this list. This is done automatically by modern browsers as part of the https://www.w3.org/TR/cors/[CORS specification]. An origin is made of a protocol scheme, host and port, without the URL path. -Default value is set to `['*']`, which allows everything. + +Default: `['*']` (allows everything) [float] [[rum-allow-headers]] ==== `allow_headers` -By default, HTTP requests made from the RUM agent to the APM Server are limited in the HTTP headers they are allowed to have. +HTTP requests made from the RUM agent to the APM Server are limited in the HTTP headers they are allowed to have. If any other headers are added, the request will be rejected by the browser due to Cross-Origin Resource Sharing (CORS) restrictions. -If you need to add extra headers to these requests, you can use this configuration to allow additional headers. -The default list of values includes "Content-Type", "Content-Encoding", and "Accept"; +Use this setting to allow additional headers. +The default list of allowed headers includes "Content-Type", "Content-Encoding", and "Accept"; custom values configured here are appended to the default list and used as the value for the `Access-Control-Allow-Headers` header. +Default: `[]` + +[float] +[[rum-response-headers]] +==== `response_headers` +Custom HTTP headers to add to RUM responses. +This can be useful for security policy compliance. + +Values set for the same key will be concatenated. + +Default: Not set + [float] [[rum-library-pattern]] ==== `library_pattern` @@ -81,19 +118,22 @@ If the RegExp matches, the stacktrace frame is considered to be a library frame. When source mapping is applied, the `error.culprit` is set to reflect the _function_ and the _filename_ of the first non library frame. This aims to provide an entry point for identifying issues. -Default value is `"node_modules|bower_components|~"`. + +Default: `"node_modules|bower_components|~"` [float] ==== `exclude_from_grouping` RegExp to be matched against a stacktrace frame's `file_name`. If the RegExp matches, the stacktrace frame is excluded from being used for calculating error groups. -The default pattern excludes stacktrace frames that have a filename starting with `/webpack`. + +Default: `"^/webpack"` (excludes stacktrace frames that have a filename starting with `/webpack`) [[config-sourcemapping-enabled]] [float] ==== `source_mapping.enabled` Used to enable/disable <> for RUM events. -Defaults to `true`. + +Default: `true` [[config-sourcemapping-elasticsearch]] [float] @@ -109,13 +149,16 @@ If a source map has been uploaded to the APM Server, <> is automatically applied to documents sent to the RUM endpoint. Source maps are fetched from Elasticsearch and then kept in an in-memory cache for the configured time. Values configured without a time unit are treated as seconds. -Default value is 5 minutes. + +Default: `5m` (5 minutes) [float] ==== `source_mapping.index_pattern` Source maps are stored in a separate index `apm-%{[observer.version]}-sourcemap` by default. If changed, a matching index pattern needs to be specified here. +Default: `"apm-*-sourcemap*"` + [float] === Ingest pipelines diff --git a/docs/configure-kibana-endpoint.asciidoc b/docs/configure-kibana-endpoint.asciidoc index efa5e5dc5cb..606727b27b2 100644 --- a/docs/configure-kibana-endpoint.asciidoc +++ b/docs/configure-kibana-endpoint.asciidoc @@ -1,6 +1,10 @@ [[setup-kibana-endpoint]] == Configure the Kibana endpoint +++++ +Kibana endpoint +++++ + Configuring the Kibana endpoint is required for {kibana-ref}/agent-configuration.html[APM Agent configuration in Kibana]. You configure the endpoint in the `apm-server.kibana` section of the @@ -17,11 +21,13 @@ apm-server.kibana.host: "http://localhost:5601" [float] === Considerations -* If your setup uses a <> for Agent/Server communication, +* If your setup uses a <> for Agent/Server communication, the same token is used to secure this endpoint. * It's important to still set relevant defaults locally in each Agent's configuration. If APM Server is unreachable, slow to respond, returns an error, etc., defaults set in the agent will apply according to their precedence. +* APM Server needs sufficient Kibana privileges to manage central configuration. +See <> for a list of required privileges. [float] === Kibana endpoint configuration options diff --git a/docs/configuring-ingest.asciidoc b/docs/configuring-ingest.asciidoc index 7e132db035c..2795920a8f0 100644 --- a/docs/configuring-ingest.asciidoc +++ b/docs/configuring-ingest.asciidoc @@ -4,17 +4,22 @@ [[configuring-ingest-node]] == Parse data using ingest node pipelines +deprecated::[7.16.0,Users should now use the <>, which includes ingest node pipeline management. See <>] + You can configure APM Server to use an {ref}/ingest.html[ingest node] -to pre-process documents before indexing them in Elasticsearch. -A pipeline definition specifies the series of processors that will transform each document in a specific way. +to pre-process documents before indexing them in {es}. +A pipeline definition specifies the series of pipelines or processors that will transform each document in a specific way. For example, a pipeline might define one processor that removes a field, followed by another that renames a field. +Pipelines can be used to ensure data security by removing or obfuscating sensitive information. +See {apm-overview-ref-v}/data-security.html[data security] for an example. + [[default-pipeline]] [float] === Default ingest pipeline -By default, APM Server registers the `apm` pipeline definition to Elasticsearch on startup. -The `apm` pipeline definition defines the following processors: +By default, APM Server registers the `apm` pipeline definition to {es} on startup. +The `apm` pipeline defines the following inner pipelines: [horizontal] `apm_user_agent`:: @@ -35,8 +40,19 @@ that are already available on the parent transaction. In previous versions of APM Server, this functionality was hardcoded internally. Switching metadata cleanup from an internal process to a processor allows you to keep any span metadata that is important in your architecture. -See the complete pipeline definition by navigating to the APM Server's home directory, -and then viewing `ingest/pipeline/definition.json`. +`apm_error_grouping_name`:: +added:[7.13] +Adds `error.grouping_name` to error documents for use in the {kibana-ref}/xpack-apm.html[Kibana APM UI]. + +`apm_opentelemetry_metrics`:: +added:[7.13] +Copies well-known OpenTelemetry metrics to their Elastic APM counterparts, for vizualisation in the {kibana-ref}/xpack-apm.html[Kibana APM UI]. +For example, the OpenTelemetry metric field `runtime.jvm.gc.time` is copied to the Elastic APM metric field `jvm.gc.time`. ++ +Metrics are duplicated so you can refer to them by either the OpenTelemetry or Elastic APM metric name. + +See the complete pipeline definition by navigating to the APM Server's home directory +and viewing `ingest/pipeline/definition.json`. To disable this, or any other pipeline, set `output.elasticsearch.pipeline: _none`. diff --git a/docs/configuring.asciidoc b/docs/configuring.asciidoc index db82a8df267..10ba3e373c4 100644 --- a/docs/configuring.asciidoc +++ b/docs/configuring.asciidoc @@ -10,25 +10,51 @@ include::{libbeat-dir}/shared/configuring-intro.asciidoc[] * <> -* <> -* <> +* <> * <> * <> +* <> * <> -* <> +* <> * <> -* <> -* <> +* <> +* <> * <> +* <> +* <> +* <> * <> -- include::./configuration-process.asciidoc[] +include::./configuration-anonymous.asciidoc[] + +include::{libbeat-dir}/template-config.asciidoc[] + +include::./ilm-reference.asciidoc[] + +include::{libbeat-dir}/shared-instrumentation.asciidoc[] + +include::./jaeger-reference.asciidoc[] + +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + +include::./configure-kibana-endpoint.asciidoc[] + +include::{libbeat-dir}/loggingconfig.asciidoc[] + :no-redis-output: include::{libbeat-dir}/outputconfig.asciidoc[] +include::{libbeat-dir}/shared-path-config.asciidoc[] + +include::./configuration-rum.asciidoc[] + +// BEGIN SSL SECTION -------------------------------------------- [[configuration-ssl-landing]] == SSL/TLS settings @@ -49,24 +75,9 @@ include::{libbeat-dir}/shared-ssl-config.asciidoc[] :leveloffset: -1 include::ssl-input-settings.asciidoc[] +// END SSL SECTION -------------------------------------------- -See <> for more information. - -include::{libbeat-dir}/template-config.asciidoc[] - -include::./ilm-reference.asciidoc[] - -include::./jaeger-reference.asciidoc[] - -include::{libbeat-dir}/loggingconfig.asciidoc[] - -include::./configure-kibana-endpoint.asciidoc[] - -include::./configuration-rum.asciidoc[] - -include::{libbeat-dir}/shared-instrumentation.asciidoc[] - -include::{libbeat-dir}/shared-path-config.asciidoc[] +include::./transaction-metrics.asciidoc[] :standalone: include::{libbeat-dir}/shared-env-vars.asciidoc[] diff --git a/docs/copied-from-beats/docs/command-reference.asciidoc b/docs/copied-from-beats/docs/command-reference.asciidoc index a00a2baed24..fbce53a8421 100644 --- a/docs/copied-from-beats/docs/command-reference.asciidoc +++ b/docs/copied-from-beats/docs/command-reference.asciidoc @@ -704,12 +704,10 @@ the end of the file is reached. By default harvesters are closed after `close_inactive` is reached. endif::[] -ifeval::["{beatname_lc}"=="metricbeat"] *`--system.hostfs MOUNT_POINT`*:: -Specifies the mount point of the host's filesystem for use in monitoring a host -from within a container. -endif::[] +Specifies the mount point of the host's filesystem for use in monitoring a host. + ifeval::["{beatname_lc}"=="packetbeat"] *`-t`*:: @@ -1012,7 +1010,7 @@ default config file, +{beatname_lc}.yml+, is used. Enables debugging for the specified selectors. For the selectors, you can specify a comma-separated list of components, or you can use `-d "*"` to enable debugging for all -components. For example, `-d "publish"` displays all the "publish" related +components. For example, `-d "publisher"` displays all the publisher-related messages. *`-e, --e`*:: diff --git a/docs/copied-from-beats/docs/debugging.asciidoc b/docs/copied-from-beats/docs/debugging.asciidoc index abb5ed252d9..08cdc3f7152 100644 --- a/docs/copied-from-beats/docs/debugging.asciidoc +++ b/docs/copied-from-beats/docs/debugging.asciidoc @@ -27,12 +27,12 @@ platform). You can use a different configuration file by specifying the `-c` fla ------------------------------------------------------------ You can increase the verbosity of debug messages by enabling one or more debug -selectors. For example, to view the published transactions, you can start {beatname_uc} -with the `publish` selector like this: +selectors. For example, to view publisher-related messages, start {beatname_uc} +with the `publisher` selector: ["source","sh",subs="attributes"] ------------------------------------------------------------ -{beatname_lc} -e -d "publish" +{beatname_lc} -e -d "publisher" ------------------------------------------------------------ If you want all the debugging output (fair warning, it's quite a lot), you can diff --git a/docs/copied-from-beats/docs/howto/load-index-templates.asciidoc b/docs/copied-from-beats/docs/howto/load-index-templates.asciidoc index bd5e249b90c..885432a2671 100644 --- a/docs/copied-from-beats/docs/howto/load-index-templates.asciidoc +++ b/docs/copied-from-beats/docs/howto/load-index-templates.asciidoc @@ -1,7 +1,7 @@ [id="{beatname_lc}-template"] == Load the {es} index template -{es} uses {ref}/indices-templates.html[index templates] to define: +{es} uses {ref}/index-templates.html[index templates] to define: * Settings that control the behavior of your indices. The settings include the lifecycle policy used to manage indices as they grow and age. @@ -48,6 +48,10 @@ If the template already exists, it’s not overwritten unless you configure [[overwrite-template]] === Overwrite an existing index template +WARNING: Do not enable this option for more than one instance of {beatname_uc}. If you start +multiple instances at the same time, it can overload your {es} with too many +template update requests. + To overwrite a template that's already loaded into {es}, set: [source,yaml] diff --git a/docs/copied-from-beats/docs/loggingconfig.asciidoc b/docs/copied-from-beats/docs/loggingconfig.asciidoc index 6c535cfdedc..dabece8e98f 100644 --- a/docs/copied-from-beats/docs/loggingconfig.asciidoc +++ b/docs/copied-from-beats/docs/loggingconfig.asciidoc @@ -68,7 +68,7 @@ messages related to event publishing: ["source","yaml",subs="attributes"] ---- logging.level: debug -logging.selectors: ["publish"] +logging.selectors: ["publisher"] ---- The logs generated by {beatname_uc} are written to the CloudWatch log group for @@ -143,20 +143,38 @@ published. Also logs any warnings, errors, or critical errors. ==== `logging.selectors` The list of debugging-only selector tags used by different {beatname_uc} components. -Use `*` to enable debug output for all components. For example add `publish` to display -all the debug messages related to event publishing. +Use `*` to enable debug output for all components. Use `publisher` to display +debug messages related to event publishing. + +[TIP] +===== +The list of available selectors may change between releases, so avoid creating +tests that depend on specific selectors. + +To see which selectors are available, run {beatname_uc} in debug mode +(set `logging.level: debug` in the configuration). The selector name appears +after the log level and is enclosed in brackets. +===== + +To configure multiple selectors, use the following {beats-ref}/config-file-format.html[YAML list syntax]: +["source","yaml",subs="attributes"] +---- +logging.selectors: [ harvester, input ] +---- + ifndef::serverless[] -When starting {beatname_lc}, selectors can be overwritten using the `-d` command -line option (`-d` also sets the debug log level). +To override selectors at the command line, use the `-d` global flag (`-d` also +sets the debug log level). For more information, see <>. endif::serverless[] [float] ==== `logging.metrics.enabled` -If enabled, {beatname_uc} periodically logs its internal metrics that have +By default, {beatname_uc} periodically logs its internal metrics that have changed in the last period. For each metric that changed, the delta from the value at the beginning of the period is logged. Also, the total values for all -non-zero internal metrics are logged on shutdown. The default is true. +non-zero internal metrics are logged on shutdown. Set this to false to disable +this behavior. The default is true. Here is an example log line: diff --git a/docs/copied-from-beats/docs/monitoring/monitoring-beats.asciidoc b/docs/copied-from-beats/docs/monitoring/monitoring-beats.asciidoc index ecad33a4a04..6f31c73aa2d 100644 --- a/docs/copied-from-beats/docs/monitoring/monitoring-beats.asciidoc +++ b/docs/copied-from-beats/docs/monitoring/monitoring-beats.asciidoc @@ -28,7 +28,6 @@ ifndef::serverless[] and sends it directly to your monitoring cluster. endif::[] - //Commenting out this link temporarily until the general monitoring docs can be //updated. //To learn about monitoring in general, see diff --git a/docs/copied-from-beats/docs/monitoring/monitoring-metricbeat.asciidoc b/docs/copied-from-beats/docs/monitoring/monitoring-metricbeat.asciidoc index d0cf30c9ed5..47168cc1870 100644 --- a/docs/copied-from-beats/docs/monitoring/monitoring-metricbeat.asciidoc +++ b/docs/copied-from-beats/docs/monitoring/monitoring-metricbeat.asciidoc @@ -83,6 +83,32 @@ For more information, see <>. -- +. Configure host (optional). + ++ +-- +// tag::set-http-host[] +If you intend to get metrics using {metricbeat} installed on another server, you need to bind the {beatname_uc} to host's IP: + +[source,yaml] +---------------------------------- +http.host: xxx.xxx.xxx.xxx +---------------------------------- +// end::set-http-host[] +-- + +. Configure cluster uuid (optional). + ++ +-- +// tag::set-cluster-uuid[] +To see the Beats monitoring section in Kibana if you have a cluster, you need to associate the {beatname_uc} with cluster UUID: + +[source,yaml] +---------------------------------- +monitoring.cluster_uuid: "cluster-uuid" +---------------------------------- +// end::set-cluster-uuid[] +-- + ifndef::serverless[] . Start {beatname_uc}. endif::[] diff --git a/docs/copied-from-beats/docs/outputs-list.asciidoc b/docs/copied-from-beats/docs/outputs-list.asciidoc index bd3b2878aa6..4181c10f64f 100644 --- a/docs/copied-from-beats/docs/outputs-list.asciidoc +++ b/docs/copied-from-beats/docs/outputs-list.asciidoc @@ -83,9 +83,5 @@ ifdef::requires_xpack[] endif::[] include::{libbeat-outputs-dir}/codec/docs/codec.asciidoc[] endif::[] -ifndef::no_kerberos[] -include::{libbeat-dir}/shared-kerberos-config.asciidoc[] -endif::[] - //# end::outputs-include[] diff --git a/docs/copied-from-beats/docs/repositories.asciidoc b/docs/copied-from-beats/docs/repositories.asciidoc index a7104414465..1b27a6c0b44 100644 --- a/docs/copied-from-beats/docs/repositories.asciidoc +++ b/docs/copied-from-beats/docs/repositories.asciidoc @@ -122,7 +122,7 @@ sudo apt-get update && sudo apt-get install {beatname_pkg} -------------------------------------------------- sudo systemctl enable {beatname_pkg} -------------------------------------------------- - ++ If your system does not use `systemd` then run: + ["source","sh",subs="attributes"] @@ -224,7 +224,7 @@ sudo yum install {beatname_pkg} -------------------------------------------------- sudo systemctl enable {beatname_pkg} -------------------------------------------------- - ++ If your system does not use `systemd` then run: + ["source","sh",subs="attributes"] @@ -233,4 +233,3 @@ sudo chkconfig --add {beatname_pkg} -------------------------------------------------- endif::[] - diff --git a/docs/copied-from-beats/docs/security/api-keys.asciidoc b/docs/copied-from-beats/docs/security/api-keys.asciidoc deleted file mode 100644 index 403fd011122..00000000000 --- a/docs/copied-from-beats/docs/security/api-keys.asciidoc +++ /dev/null @@ -1,121 +0,0 @@ -[role="xpack"] -[[beats-api-keys]] -== Grant access using API keys - -Instead of using usernames and passwords, you can use API keys to grant -access to {es} resources. You can set API keys to expire at a certain time, -and you can explicitly invalidate them. Any user with the `manage_api_key` -or `manage_own_api_key` cluster privilege can create API keys. - -{beatname_uc} instances typically send both collected data and monitoring -information to {es}. If you are sending both to the same cluster, you can use the same -API key. For different clusters, you need to use an API key per cluster. - -NOTE: For security reasons, we recommend using a unique API key per {beatname_uc} instance. -You can create as many API keys per user as necessary. - -[float] -[[beats-api-key-publish]] -=== Create an API key for publishing -To create an API key to use for writing data to {es}, use the -{ref}/security-api-create-api-key.html[Create API key API], for example: - -[source,console,subs="attributes,callouts"] ------------------------------------------------------------- -POST /_security/api_key -{ - "name": "{beat_default_index_prefix}_host001", <1> - "role_descriptors": { - "{beat_default_index_prefix}_writer": { <2> - "cluster": ["monitor", "read_ilm"], - "index": [ - { - "names": ["{beat_default_index_prefix}-*"], - "privileges": ["view_index_metadata", "create_doc"] - } - ] - } - } -} ------------------------------------------------------------- -<1> Name of the API key -<2> Granted privileges, see <> - -The return value will look something like this: - -[source,console-result,subs="attributes,callouts"] --------------------------------------------------- -{ - "id":"TiNAGG4BaaMdaH1tRfuU", <1> - "name":"{beat_default_index_prefix}_host001", - "api_key":"KnR6yE41RrSowb0kQ0HWoA" <2> -} --------------------------------------------------- -<1> Unique id for this API key -<2> Generated API key - -You can now use this API key in your +{beatname_lc}.yml+ configuration file like this: -["source","yaml"] --------------------- -output.elasticsearch: - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA <1> --------------------- -<1> Format is `id:api_key` (as returned by {ref}/security-api-create-api-key.html[Create API key]) - -[float] -[[beats-api-key-monitor]] -=== Create an API key for monitoring -To create an API key to use for sending monitoring data to {es}, use the -{ref}/security-api-create-api-key.html[Create API key API], for example: - -[source,console,subs="attributes,callouts"] ------------------------------------------------------------- -POST /_security/api_key -{ - "name": "{beat_default_index_prefix}_host001", <1> - "role_descriptors": { - "{beat_default_index_prefix}_monitoring": { <2> - "cluster": ["monitor"], - "index": [ - { - "names": [".monitoring-beats-*"], - "privileges": ["create_index", "create"] - } - ] - } - } -} ------------------------------------------------------------- -<1> Name of the API key -<2> Granted privileges, see <> - -The return value will look something like this: - -[source,console-result,subs="attributes,callouts"] --------------------------------------------------- -{ - "id":"TiNAGG4BaaMdaH1tRfuU", <1> - "name":"{beat_default_index_prefix}_host001", - "api_key":"KnR6yE41RrSowb0kQ0HWoA" <2> -} --------------------------------------------------- -<1> Unique id for this API key -<2> Generated API key - -You can now use this API key in your +{beatname_lc}.yml+ configuration file like this: -["source","yml",subs="attributes"] --------------------- -monitoring.elasticsearch: - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA <1> --------------------- -<1> Format is `id:api_key` (as returned by {ref}/security-api-create-api-key.html[Create API key]) - -[[learn-more-api-keys]] -[float] -=== Learn more about API keys - -See the {es} API key documentation for more information: - -* {ref}/security-api-create-api-key.html[Create API key] -* {ref}/security-api-get-api-key.html[Get API key information] -* {ref}/security-api-invalidate-api-key.html[Invalidate API key] diff --git a/docs/copied-from-beats/docs/shared-docker.asciidoc b/docs/copied-from-beats/docs/shared-docker.asciidoc index b943a58785f..6b73a6d90a7 100644 --- a/docs/copied-from-beats/docs/shared-docker.asciidoc +++ b/docs/copied-from-beats/docs/shared-docker.asciidoc @@ -5,13 +5,13 @@ Docker images for {beatname_uc} are available from the Elastic Docker registry. The base image is https://hub.docker.com/_/centos/[centos:7]. A list of all published Docker images and tags is available at -https://www.docker.elastic.co[www.docker.elastic.co]. +https://www.docker.elastic.co[www.docker.elastic.co]. -These images are free to use under the Elastic license. They contain open source -and free commercial features and access to paid commercial features. -{kibana-ref}/managing-licenses.html[Start a 30-day trial] to try out all of the -paid commercial features. See the -https://www.elastic.co/subscriptions[Subscriptions] page for information about +These images are free to use under the Elastic license. They contain open source +and free commercial features and access to paid commercial features. +{kibana-ref}/managing-licenses.html[Start a 30-day trial] to try out all of the +paid commercial features. See the +https://www.elastic.co/subscriptions[Subscriptions] page for information about Elastic license levels. ==== Pull the image @@ -34,8 +34,8 @@ docker pull {dockerimage} ------------------------------------------------ Alternatively, you can download other Docker images that contain only features -available under the Apache 2.0 license. To download the images, go to -https://www.docker.elastic.co[www.docker.elastic.co]. +available under the Apache 2.0 license. To download the images, go to +https://www.docker.elastic.co[www.docker.elastic.co]. endif::[] @@ -129,7 +129,7 @@ endif::apm-server[] ==== Configure {beatname_uc} on Docker The Docker image provides several methods for configuring {beatname_uc}. The -conventional approach is to provide a configuration file via a volume mount, but +conventional approach is to provide a configuration file via a volume mount, but it's also possible to create a custom image with your configuration included. @@ -244,6 +244,7 @@ ifeval::["{beatname_lc}"=="apm-server"] ["source", "sh", subs="attributes"] -------------------------------------------- docker run -d \ + -p 8200:8200 \ --name={beatname_lc} \ --user={beatname_lc} \ --volume="$(pwd)/{beatname_lc}.docker.yml:/usr/share/{beatname_lc}/{beatname_lc}.yml:ro" \ diff --git a/docs/copied-from-beats/docs/shared-path-config.asciidoc b/docs/copied-from-beats/docs/shared-path-config.asciidoc index e6264dbca6f..33390ca7faa 100644 --- a/docs/copied-from-beats/docs/shared-path-config.asciidoc +++ b/docs/copied-from-beats/docs/shared-path-config.asciidoc @@ -106,3 +106,20 @@ Example: ------------------------------------------------------------------------------ path.logs: /var/log/beats ------------------------------------------------------------------------------ + +[float] +==== `system.hostfs` + +Specifies the mount point of the host's filesystem for use in monitoring a host. +This can either be set in the config, or with the `--system.hostfs` CLI flag. This is used for cgroup self-monitoring. +ifeval::["{beatname_lc}"=="metricbeat"] +This is also used by the system module to read files from `/proc` and `/sys`. +endif::[] + + +Example: + +[source,yaml] +------------------------------------------------------------------------------ +system.hostfs: /mount/rootfs +------------------------------------------------------------------------------ diff --git a/docs/copied-from-beats/docs/shared-securing-beat.asciidoc b/docs/copied-from-beats/docs/shared-securing-beat.asciidoc index b8dcc3b1957..e9889c89e16 100644 --- a/docs/copied-from-beats/docs/shared-securing-beat.asciidoc +++ b/docs/copied-from-beats/docs/shared-securing-beat.asciidoc @@ -29,11 +29,13 @@ For secure communication between APM Server and APM Agents, see <> endif::[] +endif::[] // APM HTTPS information ifdef::beat-specific-security[] @@ -42,18 +44,19 @@ endif::[] -- -// APM privileges ifdef::apm-server[] +// APM privileges include::{docdir}/feature-roles.asciidoc[] +// APM API keys +include::{docdir}/api-keys.asciidoc[] endif::[] -// Beat privileges ifndef::apm-server[] +// Beat privileges include::./security/users.asciidoc[] -endif::[] - -// API Keys +// Beat API keys include::./security/api-keys.asciidoc[] +endif::[] // APM Agent security ifdef::apm-server[] @@ -70,5 +73,7 @@ endif::[] // Linux Seccomp ifndef::serverless[] +ifndef::win_only[] include::./security/linux-seccomp.asciidoc[] endif::[] +endif::[] diff --git a/docs/copied-from-beats/docs/shared-ssl-config.asciidoc b/docs/copied-from-beats/docs/shared-ssl-config.asciidoc index f850aeedd68..ec0690397a5 100644 --- a/docs/copied-from-beats/docs/shared-ssl-config.asciidoc +++ b/docs/copied-from-beats/docs/shared-ssl-config.asciidoc @@ -87,49 +87,34 @@ Example module with SSL enabled: ---- endif::[] -[float] -=== Configuration options - -You can specify the following options in the `ssl` section of the +{beatname_lc}.yml+ config file: - -[float] -==== `enabled` +There are a number of SSL configuration options available to you: -The `enabled` setting can be used to disable the ssl configuration by setting -it to `false`. The default value is `true`. +* <> +* <> +* <> -NOTE: SSL settings are disabled if either `enabled` is set to `false` or the -`ssl` section is missing. - -[float] -==== `certificate_authorities` +[discrete] +[[ssl-common-config]] +=== Common configuration options -The list of root certificates for server verifications. If `certificate_authorities` is empty or not set, the trusted certificate authorities of the host system are used. +Common SSL configuration options can be used in both client and server configurations. +You can specify the following options in the `ssl` section of each subsystem that +supports SSL. [float] -[[certificate]] -==== `certificate: "/etc/pki/client/cert.pem"` - -The path to the certificate for SSL client authentication. If the certificate -is not specified, client authentication is not available. The connection -might fail if the server requests client authentication. If the SSL server does not -require client authentication, the certificate will be loaded, but not requested or used -by the server. - -When this option is configured, the <> option is also required. - -[float] -[[key]] -==== `key: "/etc/pki/client/cert.key"` - -The client certificate key used for client authentication. This option is required if <> is specified. +[[enabled]] +==== `enabled` -[float] -==== `key_passphrase` +To disable SSL configuration, set the value to `false`. The default value is `true`. -The passphrase used to decrypt an encrypted key stored in the configured `key` file. +[NOTE] +===== +SSL settings are disabled if either `enabled` is set to `false` or the +`ssl` section is missing. +===== [float] +[[supported-protocols]] ==== `supported_protocols` List of allowed SSL/TLS versions. If SSL/TLS server decides for protocol versions @@ -141,17 +126,7 @@ setting is a list of allowed protocol versions: The default value is `[TLSv1.1, TLSv1.2, TLSv1.3]`. [float] -==== `verification_mode` - -This option controls whether the client verifies server certificates and host -names. Valid values are `none` and `full`. If `verification_mode` is set -to `none`, all server host names and certificates are accepted. In this mode, -TLS-based connections are susceptible to man-in-the-middle attacks. Use this -option for testing only. - -The default is `full`. - -[float] +[[cipher-suites]] ==== `cipher_suites` The list of cipher suites to use. The first entry has the highest priority. @@ -220,6 +195,7 @@ Here is a list of acronyms used in defining the cipher suites: // end::cipher_suites[] [float] +[[curve-types]] ==== `curve_types` The list of curve types for ECDHE (Elliptic Curve Diffie-Hellman ephemeral key exchange). @@ -232,17 +208,7 @@ The following elliptic curve types are available: * X25519 [float] -==== `renegotiation` - -This configures what types of TLS renegotiation are supported. The valid options -are `never`, `once`, and `freely`. The default value is never. - -* `never` - Disables renegotiation. -* `once` - Allows a remote server to request renegotiation once per connection. -* `freely` - Allows a remote server to repeatedly request renegotiation. - - -[float] +[[ca-sha256]] ==== `ca_sha256` This configures a certificate pin that you can use to ensure that a specific certificate is part of the verified chain. @@ -253,18 +219,352 @@ NOTE: This check is not a replacement for the normal SSL validation, but it adds If this option is used with `verification_mode` set to `none`, the check will always fail because it will not receive any verified chains. +[discrete] +[[ssl-client-config]] +=== Client configuration options + +You can specify the following options in the `ssl` section of each subsystem that +supports SSL. + +[float] +[[client-certificate-authorities]] +==== `certificate_authorities` + +The list of root certificates for verifications is required. If `certificate_authorities` is empty or not set, the +system keystore is used. If `certificate_authorities` is self-signed, the host system +needs to trust that CA cert as well. + +By default you can specify a list of files that +{beatname_lc}+ will read, but you +can also embed a certificate directly in the `YAML` configuration: + +[source,yaml] +---- +certificate_authorities: + - | + -----BEGIN CERTIFICATE----- + MIIDCjCCAfKgAwIBAgITJ706Mu2wJlKckpIvkWxEHvEyijANBgkqhkiG9w0BAQsF + ADAUMRIwEAYDVQQDDAlsb2NhbGhvc3QwIBcNMTkwNzIyMTkyOTA0WhgPMjExOTA2 + MjgxOTI5MDRaMBQxEjAQBgNVBAMMCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEB + BQADggEPADCCAQoCggEBANce58Y/JykI58iyOXpxGfw0/gMvF0hUQAcUrSMxEO6n + fZRA49b4OV4SwWmA3395uL2eB2NB8y8qdQ9muXUdPBWE4l9rMZ6gmfu90N5B5uEl + 94NcfBfYOKi1fJQ9i7WKhTjlRkMCgBkWPkUokvBZFRt8RtF7zI77BSEorHGQCk9t + /D7BS0GJyfVEhftbWcFEAG3VRcoMhF7kUzYwp+qESoriFRYLeDWv68ZOvG7eoWnP + PsvZStEVEimjvK5NSESEQa9xWyJOmlOKXhkdymtcUd/nXnx6UTCFgnkgzSdTWV41 + CI6B6aJ9svCTI2QuoIq2HxX/ix7OvW1huVmcyHVxyUECAwEAAaNTMFEwHQYDVR0O + BBYEFPwN1OceFGm9v6ux8G+DZ3TUDYxqMB8GA1UdIwQYMBaAFPwN1OceFGm9v6ux + 8G+DZ3TUDYxqMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAG5D + 874A4YI7YUwOVsVAdbWtgp1d0zKcPRR+r2OdSbTAV5/gcS3jgBJ3i1BN34JuDVFw + 3DeJSYT3nxy2Y56lLnxDeF8CUTUtVQx3CuGkRg1ouGAHpO/6OqOhwLLorEmxi7tA + H2O8mtT0poX5AnOAhzVy7QW0D/k4WaoLyckM5hUa6RtvgvLxOwA0U+VGurCDoctu + 8F4QOgTAWyh8EZIwaKCliFRSynDpv3JTUwtfZkxo6K6nce1RhCWFAsMvDZL8Dgc0 + yvgJ38BRsFOtkRuAGSf6ZUwTO8JJRRIFnpUzXflAnGivK9M13D5GEQMmIl6U9Pvk + sxSmbIUfc2SGJGCJD4I= + -----END CERTIFICATE----- +---- + +[float] +[[client-certificate]] +==== `certificate: "/etc/pki/client/cert.pem"` + +The path to the certificate for SSL client authentication is only required if +`client_authentication` is specified. If the certificate +is not specified, client authentication is not available. The connection +might fail if the server requests client authentication. If the SSL server does not +require client authentication, the certificate will be loaded, but not requested or used +by the server. + +When this option is configured, the <> option is also required. +The certificate option support embedding of the certificate: + +[source,yaml] +---- +certificate: | + -----BEGIN CERTIFICATE----- + MIIDCjCCAfKgAwIBAgITJ706Mu2wJlKckpIvkWxEHvEyijANBgkqhkiG9w0BAQsF + ADAUMRIwEAYDVQQDDAlsb2NhbGhvc3QwIBcNMTkwNzIyMTkyOTA0WhgPMjExOTA2 + MjgxOTI5MDRaMBQxEjAQBgNVBAMMCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEB + BQADggEPADCCAQoCggEBANce58Y/JykI58iyOXpxGfw0/gMvF0hUQAcUrSMxEO6n + fZRA49b4OV4SwWmA3395uL2eB2NB8y8qdQ9muXUdPBWE4l9rMZ6gmfu90N5B5uEl + 94NcfBfYOKi1fJQ9i7WKhTjlRkMCgBkWPkUokvBZFRt8RtF7zI77BSEorHGQCk9t + /D7BS0GJyfVEhftbWcFEAG3VRcoMhF7kUzYwp+qESoriFRYLeDWv68ZOvG7eoWnP + PsvZStEVEimjvK5NSESEQa9xWyJOmlOKXhkdymtcUd/nXnx6UTCFgnkgzSdTWV41 + CI6B6aJ9svCTI2QuoIq2HxX/ix7OvW1huVmcyHVxyUECAwEAAaNTMFEwHQYDVR0O + BBYEFPwN1OceFGm9v6ux8G+DZ3TUDYxqMB8GA1UdIwQYMBaAFPwN1OceFGm9v6ux + 8G+DZ3TUDYxqMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAG5D + 874A4YI7YUwOVsVAdbWtgp1d0zKcPRR+r2OdSbTAV5/gcS3jgBJ3i1BN34JuDVFw + 3DeJSYT3nxy2Y56lLnxDeF8CUTUtVQx3CuGkRg1ouGAHpO/6OqOhwLLorEmxi7tA + H2O8mtT0poX5AnOAhzVy7QW0D/k4WaoLyckM5hUa6RtvgvLxOwA0U+VGurCDoctu + 8F4QOgTAWyh8EZIwaKCliFRSynDpv3JTUwtfZkxo6K6nce1RhCWFAsMvDZL8Dgc0 + yvgJ38BRsFOtkRuAGSf6ZUwTO8JJRRIFnpUzXflAnGivK9M13D5GEQMmIl6U9Pvk + sxSmbIUfc2SGJGCJD4I= + -----END CERTIFICATE----- +---- + +[float] +[[client-key]] +==== `key: "/etc/pki/client/cert.key"` + +The client certificate key used for client authentication and is only required +if `client_authentication` is configured. The key option support embedding of the private key: + +[source,yaml] +---- +key: | + -----BEGIN PRIVATE KEY----- + MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDXHufGPycpCOfI + sjl6cRn8NP4DLxdIVEAHFK0jMRDup32UQOPW+DleEsFpgN9/ebi9ngdjQfMvKnUP + Zrl1HTwVhOJfazGeoJn7vdDeQebhJfeDXHwX2DiotXyUPYu1ioU45UZDAoAZFj5F + KJLwWRUbfEbRe8yO+wUhKKxxkApPbfw+wUtBicn1RIX7W1nBRABt1UXKDIRe5FM2 + MKfqhEqK4hUWC3g1r+vGTrxu3qFpzz7L2UrRFRIpo7yuTUhEhEGvcVsiTppTil4Z + HcprXFHf5158elEwhYJ5IM0nU1leNQiOgemifbLwkyNkLqCKth8V/4sezr1tYblZ + nMh1cclBAgMBAAECggEBAKdP5jyOicqknoG9/G564RcDsDyRt64NuO7I6hBg7SZx + Jn7UKWDdFuFP/RYtoabn6QOxkVVlydp5Typ3Xu7zmfOyss479Q/HIXxmmbkD0Kp0 + eRm2KN3y0b6FySsS40KDRjKGQCuGGlNotW3crMw6vOvvsLTlcKgUHF054UVCHoK/ + Piz7igkDU7NjvJeha53vXL4hIjb10UtJNaGPxIyFLYRZdRPyyBJX7Yt3w8dgz8WM + epOPu0dq3bUrY3WQXcxKZo6sQjE1h7kdl4TNji5jaFlvD01Y8LnyG0oThOzf0tve + Gaw+kuy17gTGZGMIfGVcdeb+SlioXMAAfOps+mNIwTECgYEA/gTO8W0hgYpOQJzn + BpWkic3LAoBXWNpvsQkkC3uba8Fcps7iiEzotXGfwYcb5Ewf5O3Lrz1EwLj7GTW8 + VNhB3gb7bGOvuwI/6vYk2/dwo84bwW9qRWP5hqPhNZ2AWl8kxmZgHns6WTTxpkRU + zrfZ5eUrBDWjRU2R8uppgRImsxMCgYEA2MxuL/C/Ko0d7XsSX1kM4JHJiGpQDvb5 + GUrlKjP/qVyUysNF92B9xAZZHxxfPWpdfGGBynhw7X6s+YeIoxTzFPZVV9hlkpAA + 5igma0n8ZpZEqzttjVdpOQZK8o/Oni/Q2S10WGftQOOGw5Is8+LY30XnLvHBJhO7 + TKMurJ4KCNsCgYAe5TDSVmaj3dGEtFC5EUxQ4nHVnQyCpxa8npL+vor5wSvmsfUF + hO0s3GQE4sz2qHecnXuPldEd66HGwC1m2GKygYDk/v7prO1fQ47aHi9aDQB9N3Li + e7Vmtdn3bm+lDjtn0h3Qt0YygWj+wwLZnazn9EaWHXv9OuEMfYxVgYKpdwKBgEze + Zy8+WDm5IWRjn8cI5wT1DBT/RPWZYgcyxABrwXmGZwdhp3wnzU/kxFLAl5BKF22T + kRZ+D+RVZvVutebE9c937BiilJkb0AXLNJwT9pdVLnHcN2LHHHronUhV7vetkop+ + kGMMLlY0lkLfoGq1AxpfSbIea9KZam6o6VKxEnPDAoGAFDCJm+ZtsJK9nE5GEMav + NHy+PwkYsHhbrPl4dgStTNXLenJLIJ+Ke0Pcld4ZPfYdSyu/Tv4rNswZBNpNsW9K + 0NwJlyMBfayoPNcJKXrH/csJY7hbKviAHr1eYy9/8OL0dHf85FV+9uY5YndLcsDc + nygO9KTJuUiBrLr0AHEnqko= + -----END PRIVATE KEY----- +---- + +[float] +[[client-key-passphrase]] +==== `key_passphrase` + +The passphrase used to decrypt an encrypted key stored in the configured `key` file. + + +[float] +[[client-verification-mode]] +==== `verification_mode` + +Controls the verification of server certificates. Valid values are: + +`full`:: +Verifies that the provided certificate is signed by a trusted +authority (CA) and also verifies that the server's hostname (or IP address) +matches the names identified within the certificate. + +`strict`:: +Verifies that the provided certificate is signed by a trusted +authority (CA) and also verifies that the server's hostname (or IP address) +matches the names identified within the certificate. If the Subject Alternative +Name is empty, it returns an error. + +`certificate`:: +Verifies that the provided certificate is signed by a +trusted authority (CA), but does not perform any hostname verification. + +`none`:: +Performs _no verification_ of the server's certificate. This +mode disables many of the security benefits of SSL/TLS and should only be used +after cautious consideration. It is primarily intended as a temporary +diagnostic mechanism when attempting to resolve TLS errors; its use in +production environments is strongly discouraged. ++ +The default value is `full`. + +[discrete] +[[ssl-server-config]] +=== Server configuration options + +You can specify the following options in the `ssl` section of each subsystem that +supports SSL. + +[float] +[[server-certificate-authorities]] +==== `certificate_authorities` + +The list of root certificates for client verifications is only required if +`client_authentication` is configured. If `certificate_authorities` is empty or not set, and +`client_authentication` is configured, the system keystore is used. + +If `certificate_authorities` is self-signed, the host system needs to trust that CA cert as well. +By default you can specify a list of files that +{beatname_lc}+ will read, but you can also embed a certificate +directly in the `YAML` configuration: + +[source,yaml] +---- +certificate_authorities: + - | + -----BEGIN CERTIFICATE----- + MIIDCjCCAfKgAwIBAgITJ706Mu2wJlKckpIvkWxEHvEyijANBgkqhkiG9w0BAQsF + ADAUMRIwEAYDVQQDDAlsb2NhbGhvc3QwIBcNMTkwNzIyMTkyOTA0WhgPMjExOTA2 + MjgxOTI5MDRaMBQxEjAQBgNVBAMMCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEB + BQADggEPADCCAQoCggEBANce58Y/JykI58iyOXpxGfw0/gMvF0hUQAcUrSMxEO6n + fZRA49b4OV4SwWmA3395uL2eB2NB8y8qdQ9muXUdPBWE4l9rMZ6gmfu90N5B5uEl + 94NcfBfYOKi1fJQ9i7WKhTjlRkMCgBkWPkUokvBZFRt8RtF7zI77BSEorHGQCk9t + /D7BS0GJyfVEhftbWcFEAG3VRcoMhF7kUzYwp+qESoriFRYLeDWv68ZOvG7eoWnP + PsvZStEVEimjvK5NSESEQa9xWyJOmlOKXhkdymtcUd/nXnx6UTCFgnkgzSdTWV41 + CI6B6aJ9svCTI2QuoIq2HxX/ix7OvW1huVmcyHVxyUECAwEAAaNTMFEwHQYDVR0O + BBYEFPwN1OceFGm9v6ux8G+DZ3TUDYxqMB8GA1UdIwQYMBaAFPwN1OceFGm9v6ux + 8G+DZ3TUDYxqMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAG5D + 874A4YI7YUwOVsVAdbWtgp1d0zKcPRR+r2OdSbTAV5/gcS3jgBJ3i1BN34JuDVFw + 3DeJSYT3nxy2Y56lLnxDeF8CUTUtVQx3CuGkRg1ouGAHpO/6OqOhwLLorEmxi7tA + H2O8mtT0poX5AnOAhzVy7QW0D/k4WaoLyckM5hUa6RtvgvLxOwA0U+VGurCDoctu + 8F4QOgTAWyh8EZIwaKCliFRSynDpv3JTUwtfZkxo6K6nce1RhCWFAsMvDZL8Dgc0 + yvgJ38BRsFOtkRuAGSf6ZUwTO8JJRRIFnpUzXflAnGivK9M13D5GEQMmIl6U9Pvk + sxSmbIUfc2SGJGCJD4I= + -----END CERTIFICATE----- +---- + +[float] +[[server-certificate]] +==== `certificate: "/etc/pki/server/cert.pem"` + +For server authentication, the path to the SSL authentication certificate must +be specified for TLS. If the certificate is not specified, startup will fail. + +When this option is configured, the <> option is also required. +The certificate option support embedding of the certificate: + +[source,yaml] +---- +certificate: | + -----BEGIN CERTIFICATE----- + MIIDCjCCAfKgAwIBAgITJ706Mu2wJlKckpIvkWxEHvEyijANBgkqhkiG9w0BAQsF + ADAUMRIwEAYDVQQDDAlsb2NhbGhvc3QwIBcNMTkwNzIyMTkyOTA0WhgPMjExOTA2 + MjgxOTI5MDRaMBQxEjAQBgNVBAMMCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEB + BQADggEPADCCAQoCggEBANce58Y/JykI58iyOXpxGfw0/gMvF0hUQAcUrSMxEO6n + fZRA49b4OV4SwWmA3395uL2eB2NB8y8qdQ9muXUdPBWE4l9rMZ6gmfu90N5B5uEl + 94NcfBfYOKi1fJQ9i7WKhTjlRkMCgBkWPkUokvBZFRt8RtF7zI77BSEorHGQCk9t + /D7BS0GJyfVEhftbWcFEAG3VRcoMhF7kUzYwp+qESoriFRYLeDWv68ZOvG7eoWnP + PsvZStEVEimjvK5NSESEQa9xWyJOmlOKXhkdymtcUd/nXnx6UTCFgnkgzSdTWV41 + CI6B6aJ9svCTI2QuoIq2HxX/ix7OvW1huVmcyHVxyUECAwEAAaNTMFEwHQYDVR0O + BBYEFPwN1OceFGm9v6ux8G+DZ3TUDYxqMB8GA1UdIwQYMBaAFPwN1OceFGm9v6ux + 8G+DZ3TUDYxqMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAG5D + 874A4YI7YUwOVsVAdbWtgp1d0zKcPRR+r2OdSbTAV5/gcS3jgBJ3i1BN34JuDVFw + 3DeJSYT3nxy2Y56lLnxDeF8CUTUtVQx3CuGkRg1ouGAHpO/6OqOhwLLorEmxi7tA + H2O8mtT0poX5AnOAhzVy7QW0D/k4WaoLyckM5hUa6RtvgvLxOwA0U+VGurCDoctu + 8F4QOgTAWyh8EZIwaKCliFRSynDpv3JTUwtfZkxo6K6nce1RhCWFAsMvDZL8Dgc0 + yvgJ38BRsFOtkRuAGSf6ZUwTO8JJRRIFnpUzXflAnGivK9M13D5GEQMmIl6U9Pvk + sxSmbIUfc2SGJGCJD4I= + -----END CERTIFICATE----- +---- + +[float] +[[server-key]] +==== `key: "/etc/pki/server/cert.key"` + +The server certificate key used for authentication is required. +The key option support embedding of the private key: + +[source,yaml] +---- +key: | + -----BEGIN PRIVATE KEY----- + MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDXHufGPycpCOfI + sjl6cRn8NP4DLxdIVEAHFK0jMRDup32UQOPW+DleEsFpgN9/ebi9ngdjQfMvKnUP + Zrl1HTwVhOJfazGeoJn7vdDeQebhJfeDXHwX2DiotXyUPYu1ioU45UZDAoAZFj5F + KJLwWRUbfEbRe8yO+wUhKKxxkApPbfw+wUtBicn1RIX7W1nBRABt1UXKDIRe5FM2 + MKfqhEqK4hUWC3g1r+vGTrxu3qFpzz7L2UrRFRIpo7yuTUhEhEGvcVsiTppTil4Z + HcprXFHf5158elEwhYJ5IM0nU1leNQiOgemifbLwkyNkLqCKth8V/4sezr1tYblZ + nMh1cclBAgMBAAECggEBAKdP5jyOicqknoG9/G564RcDsDyRt64NuO7I6hBg7SZx + Jn7UKWDdFuFP/RYtoabn6QOxkVVlydp5Typ3Xu7zmfOyss479Q/HIXxmmbkD0Kp0 + eRm2KN3y0b6FySsS40KDRjKGQCuGGlNotW3crMw6vOvvsLTlcKgUHF054UVCHoK/ + Piz7igkDU7NjvJeha53vXL4hIjb10UtJNaGPxIyFLYRZdRPyyBJX7Yt3w8dgz8WM + epOPu0dq3bUrY3WQXcxKZo6sQjE1h7kdl4TNji5jaFlvD01Y8LnyG0oThOzf0tve + Gaw+kuy17gTGZGMIfGVcdeb+SlioXMAAfOps+mNIwTECgYEA/gTO8W0hgYpOQJzn + BpWkic3LAoBXWNpvsQkkC3uba8Fcps7iiEzotXGfwYcb5Ewf5O3Lrz1EwLj7GTW8 + VNhB3gb7bGOvuwI/6vYk2/dwo84bwW9qRWP5hqPhNZ2AWl8kxmZgHns6WTTxpkRU + zrfZ5eUrBDWjRU2R8uppgRImsxMCgYEA2MxuL/C/Ko0d7XsSX1kM4JHJiGpQDvb5 + GUrlKjP/qVyUysNF92B9xAZZHxxfPWpdfGGBynhw7X6s+YeIoxTzFPZVV9hlkpAA + 5igma0n8ZpZEqzttjVdpOQZK8o/Oni/Q2S10WGftQOOGw5Is8+LY30XnLvHBJhO7 + TKMurJ4KCNsCgYAe5TDSVmaj3dGEtFC5EUxQ4nHVnQyCpxa8npL+vor5wSvmsfUF + hO0s3GQE4sz2qHecnXuPldEd66HGwC1m2GKygYDk/v7prO1fQ47aHi9aDQB9N3Li + e7Vmtdn3bm+lDjtn0h3Qt0YygWj+wwLZnazn9EaWHXv9OuEMfYxVgYKpdwKBgEze + Zy8+WDm5IWRjn8cI5wT1DBT/RPWZYgcyxABrwXmGZwdhp3wnzU/kxFLAl5BKF22T + kRZ+D+RVZvVutebE9c937BiilJkb0AXLNJwT9pdVLnHcN2LHHHronUhV7vetkop+ + kGMMLlY0lkLfoGq1AxpfSbIea9KZam6o6VKxEnPDAoGAFDCJm+ZtsJK9nE5GEMav + NHy+PwkYsHhbrPl4dgStTNXLenJLIJ+Ke0Pcld4ZPfYdSyu/Tv4rNswZBNpNsW9K + 0NwJlyMBfayoPNcJKXrH/csJY7hbKviAHr1eYy9/8OL0dHf85FV+9uY5YndLcsDc + nygO9KTJuUiBrLr0AHEnqko= + -----END PRIVATE KEY----- +---- + +[float] +[[server-key-passphrase]] +==== `key_passphrase` + +The passphrase is used to decrypt an encrypted key stored in the configured `key` file. + +[float] +[[server-verification-mode]] +==== `verification_mode` + +Controls the verification of client certificates. Valid values are: + +`full`:: +Verifies that the provided certificate is signed by a trusted +authority (CA) and also verifies that the server's hostname (or IP address) +matches the names identified within the certificate. + +`strict`:: +Verifies that the provided certificate is signed by a trusted +authority (CA) and also verifies that the server's hostname (or IP address) +matches the names identified within the certificate. If the Subject Alternative +Name is empty, it returns an error. + +`certificate`:: +Verifies that the provided certificate is signed by a +trusted authority (CA), but does not perform any hostname verification. + +`none`:: +Performs _no verification_ of the server's certificate. This +mode disables many of the security benefits of SSL/TLS and should only be used +after cautious consideration. It is primarily intended as a temporary +diagnostic mechanism when attempting to resolve TLS errors; its use in +production environments is strongly discouraged. ++ +The default value is `full`. + +[float] +[[server-renegotiation]] +==== `renegotiation` + +This configures what types of TLS renegotiation are supported. The valid options +are: + +`never`:: +Disables renegotiation. + +`once`:: +Allows a remote server to request renegotiation once per connection. + +`freely`:: +Allows a remote server to request renegotiation repeatedly. ++ +The default value is `never`. ifeval::["{beatname_lc}" == "filebeat"] [float] +[[server-client-renegotiation]] ==== `client_authentication` -This configures what types of client authentication are supported. The valid options -are `none`, `optional`, and `required`. When `certificate_authorities` is set it will -default to `required` otherwise it will be set to `none`. +The type of client authentication mode. When `certificate_authorities` is set, it +defaults to `required`. Otherwise, it defaults to `none`. + +The valid options are: + +`none`:: +Disables client authentication. -NOTE: This option is only valid with the TCP or the Syslog input. +`optional`:: +When a client certificate is supplied, the server will verify it. -* `none` - Disables client authentication. -* `optional` - When a client certificate is given, the server will verify it. -* `required` - Will require clients to provide a valid certificate. +`required`:: +Will require clients to provide a valid certificate. endif::[] diff --git a/docs/copied-from-beats/docs/template-config.asciidoc b/docs/copied-from-beats/docs/template-config.asciidoc index 3271d567c2a..5699a46dd76 100644 --- a/docs/copied-from-beats/docs/template-config.asciidoc +++ b/docs/copied-from-beats/docs/template-config.asciidoc @@ -7,7 +7,7 @@ ++++ The `setup.template` section of the +{beatname_lc}.yml+ config file specifies -the {ref}/indices-templates.html[index template] to use for setting +the {ref}/index-templates.html[index template] to use for setting mappings in Elasticsearch. If template loading is enabled (the default), {beatname_uc} loads the index template automatically after successfully connecting to Elasticsearch. @@ -23,9 +23,16 @@ endif::[] You can adjust the following settings to load your own template or overwrite an existing one. -*`setup.template.enabled`*:: Set to false to disable template loading. If set this to false, +*`setup.template.enabled`*:: Set to false to disable template loading. If this is set to false, you must <>. +ifndef::apm-server[] +*`setup.template.type`*:: The type of template to use. Available options: `legacy` (default), index templates +before Elasticsearch v7.8. Use this to avoid breaking existing deployments. New options are `component` +and `index`. Selecting `component` loads a component template which can be included in new index templates. +The option `index` loads the new index template. +endif::[] + *`setup.template.name`*:: The name of the template. The default is +{beatname_lc}+. The {beatname_uc} version is always appended to the given name, so the final name is +{beatname_lc}-%{[{beat_version_key}]}+. @@ -55,7 +62,8 @@ relative path is set, it is considered relative to the config path. See the <Kafka ++++ -[IMPORTANT] -.Known issue in version 7.8.0 -==== -The Kafka output fails to connect when using multiple TLS brokers. We advise -not to upgrade to {beatname_uc} 7.8.0 if you're using the Kafka output in this -configuration. -==== - The Kafka output sends events to Apache Kafka. To use this output, edit the {beatname_uc} configuration file to disable the {es} @@ -79,12 +71,29 @@ See <> for information on supported versions. ===== `username` The username for connecting to Kafka. If username is configured, the password -must be configured as well. Only SASL/PLAIN is supported. +must be configured as well. ===== `password` The password for connecting to Kafka. +===== `sasl.mechanism` + +beta[] + +The SASL mechanism to use when connecting to Kafka. It can be one of: + +* `PLAIN` for SASL/PLAIN. +* `SCRAM-SHA-256` for SCRAM-SHA-256. +* `SCRAM-SHA-512` for SCRAM-SHA-512. + +If `sasl.mechanism` is not set, `PLAIN` is used if `username` and `password` +are provided. Otherwise, SASL authentication is disabled. + +To use `GSSAPI` mechanism to authenticate with Kerberos, you must leave this +field empty, and use the <> options. + + [[topic-option-kafka]] ===== `topic` @@ -232,6 +241,19 @@ Set `max_retries` to a value less than 0 to retry until all events are published The default is 3. endif::[] +===== `backoff.init` + +The number of seconds to wait before trying to republish to Kafka +after a network error. After waiting `backoff.init` seconds, {beatname_uc} +tries to republish. If the attempt fails, the backoff timer is increased +exponentially up to `backoff.max`. After a successful publish, the backoff +timer is reset. The default is 1s. + +===== `backoff.max` + +The maximum number of seconds to wait before attempting to republish to +Kafka after a network error. The default is 60s. + ===== `bulk_max_size` The maximum number of events to bulk in a single Kafka request. The default is 2048. @@ -261,6 +283,12 @@ The keep-alive period for an active network connection. If 0s, keep-alives are d Sets the output compression codec. Must be one of `none`, `snappy`, `lz4` and `gzip`. The default is `gzip`. +[IMPORTANT] +.Known issue with Azure Event Hub for Kafka +==== +When targeting Azure Event Hub for Kafka, set `compression` to `none` as the provided codecs are not supported. +==== + ===== `compression_level` Sets the compression level used by gzip. Setting this value to 0 disables compression. @@ -281,6 +309,12 @@ The ACK reliability level required from broker. 0=no response, 1=wait for local Note: If set to 0, no ACKs are returned by Kafka. Messages might be lost silently on error. +===== `enable_krb5_fast` + +beta[] + +Enable Kerberos FAST authentication. This may conflict with some Active Directory installations. It is separate from the standard Kerberos settings because this flag only applies to the Kafka output. The default is `false`. + ===== `ssl` Configuration options for SSL parameters like the root CA for Kafka connections. @@ -288,3 +322,12 @@ Configuration options for SSL parameters like the root CA for Kafka connections. `-keyalg RSA` argument to ensure it uses a cipher supported by https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions#why-cant-sarama-connect-to-my-kafka-cluster-using-ssl[Filebeat's Kafka library]. See <> for more information. + +[[kerberos-option-kafka]] +===== `kerberos` + +beta[] + +Configuration options for Kerberos authentication. + +See <> for more information. diff --git a/docs/data-ingestion.asciidoc b/docs/data-ingestion.asciidoc index 924bd56af1e..f658c41d3b7 100644 --- a/docs/data-ingestion.asciidoc +++ b/docs/data-ingestion.asciidoc @@ -72,13 +72,14 @@ Read more in the {apm-agents-ref}/index.html[agents documentation]. [[adjust-event-rate]] [float] -==== Adjust RUM event rate limit +==== Adjust anonymous auth rate limit -Agents make use of long running requests and flush as many events over a single request as possible. Thus, the rate limiter for RUM is bound to the number of _events_ sent per second, per IP. +Agents make use of long running requests and flush as many events over a single request as possible. +Thus, the rate limiter for anonymous authentication is bound to the number of _events_ sent per second, per IP. -If the rate limit is hit while events on an established request are sent, the request is not immediately terminated. The intake of events is only throttled to <>, which means that events are queued and processed slower. Only when the allowed buffer queue is also full, does the request get terminated with a `429 - rate limit exceeded` HTTP response. If an agent tries to establish a new request, but the rate limit is already hit, a `429` will be sent immediately. +If the event rate limit is hit while events on an established request are sent, the request is not immediately terminated. The intake of events is only throttled to <>, which means that events are queued and processed slower. Only when the allowed buffer queue is also full, does the request get terminated with a `429 - rate limit exceeded` HTTP response. If an agent tries to establish a new request, but the rate limit is already hit, a `429` will be sent immediately. -Increasing the <> default value will help avoid `rate limit exceeded` errors. +Increasing the <> default value will help avoid `rate limit exceeded` errors. [[tune-es]] === Tune Elasticsearch diff --git a/docs/data/elasticsearch/generated/errors.json b/docs/data/elasticsearch/generated/errors.json index 041a2eebf68..c3a79d58b14 100644 --- a/docs/data/elasticsearch/generated/errors.json +++ b/docs/data/elasticsearch/generated/errors.json @@ -9,10 +9,11 @@ "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "error": { "grouping_key": "d6b3f958dfea98dc9ed2b57d5f0c48bb", + "grouping_name": "Cannot read property 'baz' of undefined", "id": "0f0e9d67c1854d21a6f44673ed561ec8", "log": { "level": "custom log level", @@ -40,6 +41,7 @@ "tag1": "one", "tag2": 2 }, + "message": "Cannot read property 'baz' of undefined", "observer": { "ephemeral_id": "f1838cde-80dd-4af5-b7ac-ffc2d3fccc9d", "hostname": "ix.lan", @@ -95,6 +97,7 @@ "geo": { "continent_name": "North America", "country_iso_code": "US", + "country_name": "United States", "location": { "lat": 37.751, "lon": -97.822 @@ -106,7 +109,7 @@ "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "error": { "culprit": "my.module.function_name", @@ -194,6 +197,7 @@ } ], "grouping_key": "50f62f37edffc4630c6655ba3ecfcf46", + "grouping_name": "My service could not talk to the database named foobar", "id": "5f0e9d64c1854d21a6f44673ed561ec8", "log": { "level": "warning", @@ -216,7 +220,6 @@ "exclude_from_grouping": false, "filename": "/webpack/file/name.py", "function": "foo", - "library_frame": false, "line": { "column": 4, "context": "line3", @@ -261,10 +264,6 @@ } } ] - }, - "page": { - "referer": "http://localhost:8000/test/e2e/", - "url": "http://localhost:8000/test/e2e/general-usecase/" } }, "event": { @@ -279,9 +278,7 @@ }, "http": { "request": { - "body": { - "original": "Hello World" - }, + "body.original": "Hello World", "cookies": { "c1": "v1", "c2": "v2" @@ -309,12 +306,8 @@ "Mozilla Chrome Edge" ] }, - "method": "post", - "referrer": "http://localhost:8000/test/e2e/", - "socket": { - "encrypted": true, - "remote_address": "8.8.8.8" - } + "method": "POST", + "referrer": "http://localhost:8000/test/e2e/" }, "response": { "finished": true, @@ -340,6 +333,7 @@ "tag1": "one", "tag2": 2 }, + "message": "My service could not talk to the database named foobar", "observer": { "ephemeral_id": "f1838cde-80dd-4af5-b7ac-ffc2d3fccc9d", "hostname": "ix.lan", @@ -420,7 +414,7 @@ "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "error": { "exception": [ @@ -429,6 +423,7 @@ } ], "grouping_key": "18f82051862e494727fa20e0adc15711", + "grouping_name": null, "id": "7f0e9d68c1854d21a6f44673ed561ec8" }, "event": { @@ -507,7 +502,7 @@ "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "error": { "exception": [ @@ -517,6 +512,7 @@ } ], "grouping_key": "f6b5a2877d9b00d5b32b44c9db039f11", + "grouping_name": "foo is not defined", "id": "8f0e9d68c1854d21a6f44673ed561ec8" }, "event": { @@ -540,6 +536,7 @@ "tag1": "one", "tag2": 2 }, + "message": "foo is not defined", "observer": { "ephemeral_id": "f1838cde-80dd-4af5-b7ac-ffc2d3fccc9d", "hostname": "ix.lan", diff --git a/docs/data/elasticsearch/generated/metricsets.json b/docs/data/elasticsearch/generated/metricsets.json deleted file mode 100644 index 3d7bd1ffa4d..00000000000 --- a/docs/data/elasticsearch/generated/metricsets.json +++ /dev/null @@ -1,235 +0,0 @@ -[ - { - "@timestamp": "2017-05-30T18:53:41.364Z", - "agent": { - "name": "elastic-node", - "version": "3.14.0" - }, - "ecs": { - "version": "1.5.0" - }, - "event": { - "ingested": "2020-04-22T14:55:05.425020Z" - }, - "go": { - "memstats": { - "heap": { - "sys": { - "bytes": 6520832.0 - } - } - } - }, - "host": { - "ip": "127.0.0.1" - }, - "labels": { - "tag1": "one", - "tag2": 2 - }, - "observer": { - "ephemeral_id": "8785cbe1-7f89-4279-84c2-6c33979531fb", - "hostname": "ix.lan", - "id": "b0cfe4b7-76c9-4159-95ff-e558db368cbe", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "process": { - "pid": 1234 - }, - "processor": { - "event": "metric", - "name": "metric" - }, - "service": { - "language": { - "name": "ecmascript" - }, - "name": "1234_service-12a3", - "node": { - "name": "node-1" - } - }, - "user": { - "email": "user@mail.com", - "id": "axb123hg", - "name": "logged-in-user" - } - }, - { - "@timestamp": "2017-05-30T18:53:41.366Z", - "agent": { - "name": "elastic-node", - "version": "3.14.0" - }, - "ecs": { - "version": "1.5.0" - }, - "event": { - "ingested": "2020-09-08T15:57:10.396695Z" - }, - "host": { - "ip": "127.0.0.1" - }, - "labels": { - "tag1": "one", - "tag2": 2 - }, - "observer": { - "ephemeral_id": "2f30050f-81e6-491a-a54f-e7d94eec17b5", - "hostname": "simmac.net", - "id": "02f6cb38-c1ce-4382-9478-4c8b4cdbda9c", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "process": { - "pid": 1234 - }, - "processor": { - "event": "metric", - "name": "metric" - }, - "service": { - "language": { - "name": "ecmascript" - }, - "name": "1234_service-12a3", - "node": { - "name": "node-1" - } - }, - "system": { - "process": { - "cgroup": { - "memory": { - "mem": { - "limit": { - "bytes": 2048 - }, - "usage": { - "bytes": 1024 - } - }, - "stats": { - "inactive_file": { - "bytes": 48 - } - } - } - } - } - }, - "user": { - "email": "user@mail.com", - "id": "axb123hg", - "name": "logged-in-user" - } - }, - { - "@timestamp": "2017-05-30T18:53:42.281Z", - "agent": { - "name": "elastic-node", - "version": "3.14.0" - }, - "byte_counter": 1, - "dotted": { - "float": { - "gauge": 6.12 - } - }, - "double_gauge": 3.141592653589793, - "ecs": { - "version": "1.5.0" - }, - "event": { - "ingested": "2020-04-22T14:55:05.368308Z" - }, - "float_gauge": 9.16, - "host": { - "ip": "127.0.0.1" - }, - "integer_gauge": 42767, - "labels": { - "code": 200, - "some": "abc", - "success": true, - "tag1": "one", - "tag2": 2 - }, - "long_gauge": 3147483648.0, - "negative": { - "d": { - "o": { - "t": { - "t": { - "e": { - "d": -1022 - } - } - } - } - } - }, - "observer": { - "ephemeral_id": "8785cbe1-7f89-4279-84c2-6c33979531fb", - "hostname": "ix.lan", - "id": "b0cfe4b7-76c9-4159-95ff-e558db368cbe", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "process": { - "pid": 1234 - }, - "processor": { - "event": "metric", - "name": "metric" - }, - "service": { - "language": { - "name": "ecmascript" - }, - "name": "1234_service-12a3", - "node": { - "name": "node-1" - } - }, - "short_counter": 227, - "span": { - "self_time": { - "count": 1, - "sum": { - "us": 633.288 - } - }, - "subtype": "mysql", - "type": "db" - }, - "transaction": { - "breakdown": { - "count": 12 - }, - "duration": { - "count": 2, - "sum": { - "us": 12 - } - }, - "name": "GET /", - "self_time": { - "count": 2, - "sum": { - "us": 10 - } - }, - "type": "request" - }, - "user": { - "email": "user@mail.com", - "id": "axb123hg", - "name": "logged-in-user" - } - } -] \ No newline at end of file diff --git a/docs/data/elasticsearch/generated/spans.json b/docs/data/elasticsearch/generated/spans.json index 788cc4795b5..95620eef2af 100644 --- a/docs/data/elasticsearch/generated/spans.json +++ b/docs/data/elasticsearch/generated/spans.json @@ -6,11 +6,19 @@ "version": "3.14.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "outcome": "unknown" }, + "http": { + "request": { + "method": "GET" + }, + "response": { + "status_code": 200 + } + }, "labels": { "span_tag": "something" }, @@ -47,14 +55,12 @@ "us": 3781 }, "http": { - "method": "get", + "method": "GET", "response": { "status_code": 200 - }, - "url": { - "original": "http://localhost:8000" } }, + "http.url.original": "http://localhost:8000", "id": "0aaaaaaaaaaaaaaa", "name": "SELECT FROM product_types", "stacktrace": [ @@ -108,6 +114,9 @@ }, "transaction": { "id": "945254c567a5417e" + }, + "url": { + "original": "http://localhost:8000" } }, { @@ -122,7 +131,7 @@ "port": 5432 }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "outcome": "unknown" @@ -191,7 +200,7 @@ "version": "3.14.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "outcome": "unknown" @@ -244,7 +253,7 @@ "version": "3.14.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "outcome": "unknown" @@ -303,7 +312,7 @@ ] }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "outcome": "unknown" diff --git a/docs/data/elasticsearch/generated/transactions.json b/docs/data/elasticsearch/generated/transactions.json index 024ab9d4273..741ab42b26c 100644 --- a/docs/data/elasticsearch/generated/transactions.json +++ b/docs/data/elasticsearch/generated/transactions.json @@ -9,7 +9,7 @@ "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "ingested": "2020-08-11T09:55:04.391451Z", @@ -105,7 +105,7 @@ "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "ingested": "2020-08-11T09:55:04.391568Z", @@ -179,7 +179,6 @@ "id": "85925e55b43f4341", "name": "GET /api/types", "result": "200", - "sampled": false, "span_count": { "started": 0 }, @@ -201,7 +200,7 @@ "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "ingested": "2020-08-11T09:55:04.391639Z", @@ -298,6 +297,7 @@ "geo": { "continent_name": "North America", "country_iso_code": "US", + "country_name": "United States", "location": { "lat": 37.751, "lon": -97.822 @@ -309,7 +309,7 @@ "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "ingested": "2020-08-11T09:55:04.338986Z", @@ -324,14 +324,12 @@ }, "http": { "request": { - "body": { - "original": { - "additional": { - "bar": 123, - "req": "additional information" - }, - "str": "hello world" - } + "body.original": { + "additional": { + "bar": 123, + "req": "additional information" + }, + "str": "hello world" }, "cookies": { "c1": "v1", @@ -360,12 +358,8 @@ "Mozilla Chrome Edge" ] }, - "method": "post", - "referrer": "http://localhost:8000/test/e2e/", - "socket": { - "encrypted": true, - "remote_address": "8.8.8.8" - } + "method": "POST", + "referrer": "http://localhost:8000/test/e2e/" }, "response": { "finished": true, @@ -468,10 +462,6 @@ } }, "name": "GET /api/types", - "page": { - "referer": "http://localhost:8000/test/e2e/", - "url": "http://localhost:8000/test/e2e/general-usecase/" - }, "result": "success", "sampled": true, "span_count": { diff --git a/docs/data/elasticsearch/metricset.json b/docs/data/elasticsearch/metricset.json new file mode 100644 index 00000000000..f83e51d9480 --- /dev/null +++ b/docs/data/elasticsearch/metricset.json @@ -0,0 +1,70 @@ +{ + "container": { + "id": "a47ed147c6ee269400f7ea4e296b3d01ec7398471bb2951907e4ea12f028bc69" + }, + "kubernetes": { + "pod": { + "uid": "b0cb3baa-4619-4b82-bef5-84cc87b5f853", + "name": "opbeans-java-7c68f48dc6-n6mzc" + } + }, + "process": { + "pid": 8, + "title": "/opt/java/openjdk/bin/java", + "ppid": 1 + }, + "agent": { + "name": "java", + "ephemeral_id": "29a27947-ed3a-4d87-b2e6-28f7a940ec2d", + "version": "1.25.1-SNAPSHOT.UNKNOWN" + }, + "jvm.gc.time": 11511, + "processor": { + "name": "metric", + "event": "metric" + }, + "labels": { + "name": "Copy" + }, + "metricset.name": "app", + "observer": { + "hostname": "3c5ac040e8f9", + "name": "instance-0000000002", + "id": "6657d6e6-f3e8-4ce4-aa22-e7fe2ad77b5e", + "type": "apm-server", + "ephemeral_id": "b7f21735-d283-4945-ab80-ce8df494a207", + "version": "7.15.0", + "version_major": 7 + }, + "@timestamp": "2021-09-14T09:52:49.454Z", + "ecs": { + "version": "1.11.0" + }, + "service": { + "node": { + "name": "a47ed147c6ee269400f7ea4e296b3d01ec7398471bb2951907e4ea12f028bc69" + }, + "environment": "production", + "name": "opbeans-java", + "runtime": { + "name": "Java", + "version": "11.0.11" + }, + "language": { + "name": "Java", + "version": "11.0.11" + }, + "version": "2021-09-08 03:55:06" + }, + "jvm.gc.count": 2224, + "host": { + "os": { + "platform": "Linux" + }, + "ip": "35.240.52.17", + "architecture": "amd64" + }, + "event": { + "ingested": "2021-09-14T09:53:00.834276431Z" + } +} diff --git a/docs/data/intake-api/generated/rum_v3_events.ndjson b/docs/data/intake-api/generated/rum_v3_events.ndjson index 88bcbe51df8..9bca2a7d11c 100644 --- a/docs/data/intake-api/generated/rum_v3_events.ndjson +++ b/docs/data/intake-api/generated/rum_v3_events.ndjson @@ -1,3 +1,2 @@ -{"m": {"se": {"n": "apm-a-rum-test-e2e-general-usecase","ve": "0.0.1","en": "prod","a": {"n": "js-base","ve": "4.8.1"},"ru": {"n": "v8","ve": "8.0"},"la": {"n": "javascript","ve": "6"},"fw": {"n": "angular","ve": "2"}},"u": {"id": 123,"em": "user@email.com","un": "John Doe"},"l": {"testTagKey": "testTagValue"}}} -{"x": {"id": "ec2e280be8345240","tid": "286ac3ad697892c406528f13c82e0ce1","pid": "1ef08ac234fca23b455d9e27c660f1ab","n": "general-usecase-initial-p-load","t": "p-load","d": 295,"me": [{"sa": {"xdc": {"v": 1},"xds": {"v": 295},"xbc": {"v": 1}}},{"y": {"t": "Request"},"sa": {"ysc": {"v": 1},"yss": {"v": 1}}},{"y": {"t": "Response"},"sa": {"ysc": {"v": 1},"yss": {"v": 1}}}],"y": [{"id": "bbd8bcc3be14d814","n": "Requesting and receiving the document","t": "hard-navigation","su": "browser-timing","s": 4,"d": 2},{"id": "fc546e87a90a774f","n": "Parsing the document, executing sy. scripts","t": "hard-navigation","su": "browser-timing","s": 14,"d": 106},{"id": "fb8f717930697299","n": "http://localhost:8000/test/e2e/general-usecase/app.e2e-bundle.min.js","t": "rc","su": "script","s": 22.53499999642372,"d": 35.060000023804605,"c": {"h": {"url": "http://localhost:8000/test/e2e/general-usecase/app.e2e-bundle.min.js?token=REDACTED","r": {"ts": 677175,"ebs": 676864,"dbs": 676864}},"dt": {"se": {"n": "http://localhost:8000","rc": "localhost:8000","t": "rc"},"ad": "localhost","po": 8000}}},{"id": "9b80535c4403c9fb","n": "OpenTracing y","t": "cu","s": 96.92999999970198,"d": 198.07000000029802},{"id": "5ecb8ee030749715","n": "GET /test/e2e/common/data.json","t": "external","su": "h","sy": true,"s": 98.94000005442649,"d": 6.72499998472631,"c": {"h": {"mt": "GET","url": "http://localhost:8000/test/e2e/common/data.json?test=hamid","sc": 200},"dt": {"se": {"n": "http://localhost:8000","rc": "localhost:8000","t": "external"},"ad": "localhost","po": 8000}}},{"id": "27f45fd274f976d4","n": "POST http://localhost:8003/data","t": "external","su": "h","sy": true,"s": 106.52000003028661,"d": 11.584999971091747,"c": {"h": {"mt": "POST","url": "http://localhost:8003/data","sc": 200},"dt": {"se": {"n": "http://localhost:8003","rc": "localhost:8003","t": "external"},"ad": "localhost","po": 8003}}},{"id": "a3c043330bc2015e","pi": 0,"n": "POST http://localhost:8003/fetch","t": "external","su": "h","ac": "action","sy": false,"s": 119.93500008247793,"d": 15.949999913573265,"c": {"h": {"mt": "POST","url": "http://localhost:8003/fetch","sc": 200},"dt": {"se": {"n": "http://localhost:8003","rc": "localhost:8003","t": "external"},"ad": "localhost","po": 8003}}},{"id": "bc7665dc25629379","st": [{"ap": "http://localhost:8000/test/e2e/general-usecase/app.e2e-bundle.min.js?token=secret","f": "test/e2e/general-usecase/app.e2e-bundle.min.js?token=secret","fn": "generateError","li": 7662,"co": 9},{"ap": "http://localhost:8000/test/e2e/general-usecase/app.e2e-bundle.min.js?token=secret","f": "test/e2e/general-usecase/app.e2e-bundle.min.js?token=secret","fn": "","li": 7666,"co": 3}],"n": "Fire \"DOMContentLoaded\" event","t": "hard-navigation","su": "browser-timing","s": 120,"d": 2,"o":"success"}],"c": {"p": {"rf": "http://localhost:8000/test/e2e/","url": "http://localhost:8000/test/e2e/general-usecase/"},"r": {"sc": 200,"ts": 983,"ebs": 690,"dbs": 690,"he": {"Content-Type": "application/json"}},"q": {"he": {"Accept": "application/json"},"hve": "1.1","mt": "GET"},"u": {"id": "uId","un": "un","em": "em"},"cu": {"testContext": "testContext"},"g": {"testTagKey": "testTagValue"}},"k": {"a": {"lp": 131.03000004775822,"fb": 5,"di": 120,"dc": 138,"ds": 100,"de": 110,"fp": 70.82500003930181},"nt": {"fs": 0,"ls": 0,"le": 0,"cs": 0,"ce": 0,"qs": 4,"rs": 5,"re": 6,"dl": 14,"di": 120,"ds": 120,"de": 122,"dc": 138,"es": 138,"ee": 138}},"yc": {"sd": 8,"dd": 1},"sm": true,"exp":{"cls":1,"fid":2.0,"tbt":3.4,"ignored":5,"also":"ignored"}}} -{"me": {"y": {"t": "Processing","su": "subtype"},"sa": {"ysc": {"v": 1},"yss": {"v": 124}},"g": {"tag1": "value1"}}} +{"m": {"se": {"n": "apm-a-rum-test-e2e-general-usecase","ve": "0.0.1","en": "prod","a": {"n": "js-base","ve": "4.8.1"},"ru": {"n": "v8","ve": "8.0"},"la": {"n": "javascript","ve": "6"},"fw": {"n": "angular","ve": "2"}},"u": {"id": 123,"em": "user@email.com","un": "John Doe"},"l": {"testTagKey": "testTagValue"},"n":{"c":{"t":"5G"}}}} +{"x": {"id": "ec2e280be8345240","tid": "286ac3ad697892c406528f13c82e0ce1","pid": "1ef08ac234fca23b455d9e27c660f1ab","n": "general-usecase-initial-p-load","t": "p-load","d": 295,"me": [{"sa": {"xdc": {"v": 1},"xds": {"v": 295},"xbc": {"v": 1}}},{"y": {"t": "Request"},"sa": {"ysc": {"v": 1},"yss": {"v": 1}}},{"y": {"t": "Response"},"sa": {"ysc": {"v": 1},"yss": {"v": 1}}}],"y": [{"id": "bbd8bcc3be14d814","n": "Requesting and receiving the document","t": "hard-navigation","su": "browser-timing","s": 4,"d": 2},{"id": "fc546e87a90a774f","n": "Parsing the document, executing sy. scripts","t": "hard-navigation","su": "browser-timing","s": 14,"d": 106},{"id": "fb8f717930697299","n": "http://localhost:8000/test/e2e/general-usecase/app.e2e-bundle.min.js","t": "rc","su": "script","s": 22.53499999642372,"d": 35.060000023804605,"c": {"h": {"url": "http://localhost:8000/test/e2e/general-usecase/app.e2e-bundle.min.js?token=REDACTED","r": {"ts": 677175,"ebs": 676864,"dbs": 676864}},"dt": {"se": {"n": "http://localhost:8000","rc": "localhost:8000","t": "rc"},"ad": "localhost","po": 8000}}},{"id": "9b80535c4403c9fb","n": "OpenTracing y","t": "cu","s": 96.92999999970198,"d": 198.07000000029802},{"id": "5ecb8ee030749715","n": "GET /test/e2e/common/data.json","t": "external","su": "h","sy": true,"s": 98.94000005442649,"d": 6.72499998472631,"c": {"h": {"mt": "GET","url": "http://localhost:8000/test/e2e/common/data.json?test=hamid","sc": 200},"dt": {"se": {"n": "http://localhost:8000","rc": "localhost:8000","t": "external"},"ad": "localhost","po": 8000}}},{"id": "27f45fd274f976d4","n": "POST http://localhost:8003/data","t": "external","su": "h","sy": true,"s": 106.52000003028661,"d": 11.584999971091747,"c": {"h": {"mt": "POST","url": "http://localhost:8003/data","sc": 200},"dt": {"se": {"n": "http://localhost:8003","rc": "localhost:8003","t": "external"},"ad": "localhost","po": 8003}}},{"id": "a3c043330bc2015e","pi": 0,"n": "POST http://localhost:8003/fetch","t": "external","su": "h","ac": "action","sy": false,"s": 119.93500008247793,"d": 15.949999913573265,"c": {"h": {"mt": "POST","url": "http://localhost:8003/fetch","sc": 200},"dt": {"se": {"n": "http://localhost:8003","rc": "localhost:8003","t": "external"},"ad": "localhost","po": 8003}}},{"id": "bc7665dc25629379","st": [{"ap": "http://localhost:8000/test/e2e/general-usecase/app.e2e-bundle.min.js?token=secret","f": "test/e2e/general-usecase/app.e2e-bundle.min.js?token=secret","fn": "generateError","li": 7662,"co": 9},{"ap": "http://localhost:8000/test/e2e/general-usecase/app.e2e-bundle.min.js?token=secret","f": "test/e2e/general-usecase/app.e2e-bundle.min.js?token=secret","fn": "","li": 7666,"co": 3}],"n": "Fire \"DOMContentLoaded\" event","t": "hard-navigation","su": "browser-timing","s": 120,"d": 2,"o":"success"}],"c": {"p": {"rf": "http://localhost:8000/test/e2e/","url": "http://localhost:8000/test/e2e/general-usecase/"},"r": {"sc": 200,"ts": 983,"ebs": 690,"dbs": 690,"he": {"Content-Type": "application/json"}},"q": {"he": {"Accept": "application/json"},"hve": "1.1","mt": "GET"},"u": {"id": "uId","un": "un","em": "em"},"cu": {"testContext": "testContext"},"g": {"testTagKey": "testTagValue"}},"k": {"a": {"lp": 131.03000004775822,"fb": 5,"di": 120,"dc": 138,"ds": 100,"de": 110,"fp": 70.82500003930181},"nt": {"fs": 0,"ls": 0,"le": 0,"cs": 0,"ce": 0,"qs": 4,"rs": 5,"re": 6,"dl": 14,"di": 120,"ds": 120,"de": 122,"dc": 138,"es": 138,"ee": 138}},"yc": {"sd": 8,"dd": 1},"sm": true,"exp":{"cls":1,"fid":2.0,"tbt":3.4,"ignored":5,"also":"ignored","lt":{"count":3,"sum":2.5,"max":1}}}} diff --git a/docs/error-api.asciidoc b/docs/error-api.asciidoc index ad3415e121e..ae6af959ff1 100644 --- a/docs/error-api.asciidoc +++ b/docs/error-api.asciidoc @@ -7,9 +7,10 @@ An error or a logged error message captured by an agent occurring in a monitored [[error-schema]] ==== Error Schema -The APM Server uses JSON Schema for validating requests. The specification for errors is defined below: +APM Server uses JSON Schema to validate requests. The specification for errors is defined on +{github_repo_link}/docs/spec/v2/error.json[GitHub] and included below: [source,json] ---- -include::./spec/errors/error.json[] +include::./spec/v2/error.json[] ---- diff --git a/docs/example-intake-events.asciidoc b/docs/example-intake-events.asciidoc index 9ed12743437..cb4c4ba941b 100644 --- a/docs/example-intake-events.asciidoc +++ b/docs/example-intake-events.asciidoc @@ -1,7 +1,7 @@ [[example-intake-events]] === Example Request Body -See a request body example containing one event for all currently supported event types. +A request body example containing one event for all currently supported event types. [source,json] ---- diff --git a/docs/fields.asciidoc b/docs/fields.asciidoc index 4841c868fb0..078bcc58c9f 100644 --- a/docs/fields.asciidoc +++ b/docs/fields.asciidoc @@ -12,7 +12,7 @@ This file is generated! See _meta/fields.yml and scripts/generate_fields_docs.py This document describes the fields that are exported by Apm-Server. They are grouped in the following categories: -* <> +* <> * <> * <> * <> @@ -31,12 +31,24 @@ grouped in the following categories: * <> -- -[[exported-fields-apm]] -== General APM fields +[[exported-fields-apm-application-metrics]] +== APM Application Metrics fields -Fields common to various APM events. +APM application metrics. +*`histogram`*:: ++ +-- +type: histogram + +-- + +[[exported-fields-apm-error]] +== APM Error fields + +Error-specific data for APM + *`processor.name`*:: + @@ -67,6 +79,17 @@ type: long -- +*`message`*:: ++ +-- +The original error message. + +type: text + +{yes-icon} {ecs-ref}[ECS] field. + +-- + [float] === url @@ -82,6 +105,8 @@ The protocol of the request, e.g. "https:". type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- *`url.full`*:: @@ -92,6 +117,8 @@ The full, possibly agent-assembled URL of the request, e.g https://example.com:4 type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- *`url.domain`*:: @@ -102,6 +129,8 @@ The hostname of the request, e.g. "example.com". type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- *`url.port`*:: @@ -112,6 +141,8 @@ The port of the request, e.g. 443. type: long +{yes-icon} {ecs-ref}[ECS] field. + -- *`url.path`*:: @@ -122,6 +153,8 @@ The path of the request, e.g. "/search". type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- *`url.query`*:: @@ -132,6 +165,8 @@ The query string of the request, e.g. "q=elasticsearch". type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- *`url.fragment`*:: @@ -142,6 +177,8 @@ A fragment specifying a location in a web page , e.g. "top". type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- @@ -153,6 +190,8 @@ The http version of the request leading to this event. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- @@ -164,6 +203,8 @@ The http method of the request leading to this event. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- *`http.request.headers`*:: @@ -185,6 +226,8 @@ Referrer for this HTTP request. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- @@ -196,6 +239,8 @@ The status code of the HTTP response. type: long +{yes-icon} {ecs-ref}[ECS] field. + -- *`http.response.finished`*:: @@ -228,6 +273,8 @@ A flat mapping of user-defined labels with string, boolean or number values. type: object +{yes-icon} {ecs-ref}[ECS] field. + -- [float] @@ -245,6 +292,8 @@ Immutable name of the service emitting this event. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- *`service.version`*:: @@ -255,6 +304,8 @@ Version of the service emitting this event. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- *`service.environment`*:: @@ -276,6 +327,8 @@ Unique meaningful name of the service node. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- @@ -350,6 +403,8 @@ The transaction ID. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- *`transaction.sampled`*:: @@ -390,105 +445,96 @@ type: text -- -*`transaction.duration.count`*:: +*`trace.id`*:: + -- -type: long +The ID of the trace to which the event belongs to. --- +type: keyword -*`transaction.duration.sum.us`*:: -+ --- -type: long +{yes-icon} {ecs-ref}[ECS] field. -- -[float] -=== self_time - -Portion of the transaction's duration where no direct child was running - - -*`transaction.self_time.count`*:: +*`parent.id`*:: + -- -type: long - --- +The ID of the parent event. -*`transaction.self_time.sum.us`*:: -+ --- -type: long +type: keyword -- -[float] -=== breakdown -Counter for collected breakdowns for the transaction +*`agent.name`*:: ++ +-- +Name of the agent used. +type: keyword -*`transaction.breakdown.count`*:: -+ --- -type: long +{yes-icon} {ecs-ref}[ECS] field. -- - -*`span.type`*:: +*`agent.version`*:: + -- -Keyword of specific relevance in the service's domain (eg: 'db.postgresql.query', 'template.erb', 'cache', etc). +Version of the agent used. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`span.subtype`*:: +*`agent.ephemeral_id`*:: + -- -A further sub-division of the type (e.g. postgresql, elasticsearch) +The Ephemeral ID identifies a running process. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- [float] -=== self_time +=== container -Portion of the span's duration where no direct child was running +Container fields are used for meta information about the specific container that is the source of information. These fields help correlate data based containers from any runtime. -*`span.self_time.count`*:: +*`container.id`*:: + -- -type: long +Unique container id. --- +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. -*`span.self_time.sum.us`*:: -+ -- -type: long --- +[float] +=== kubernetes + +Kubernetes metadata reported by agents -*`trace.id`*:: + +*`kubernetes.namespace`*:: + -- -The ID of the trace to which the event belongs to. +Kubernetes namespace type: keyword @@ -496,10 +542,10 @@ type: keyword -- -*`parent.id`*:: +*`kubernetes.node.name`*:: + -- -The ID of the parent event. +Kubernetes node name type: keyword @@ -507,47 +553,54 @@ type: keyword -- -*`agent.name`*:: +*`kubernetes.pod.name`*:: + -- -Name of the agent used. +Kubernetes pod name type: keyword -- -*`agent.version`*:: +*`kubernetes.pod.uid`*:: + -- -Version of the agent used. +Kubernetes Pod UID type: keyword -- -*`agent.ephemeral_id`*:: -+ --- -The Ephemeral ID identifies a running process. +[float] +=== network +Optional network fields -type: keyword --- [float] -=== container +=== connection -Container fields are used for meta information about the specific container that is the source of information. These fields help correlate data based containers from any runtime. +Network connection details -*`container.id`*:: +*`network.connection.type`*:: + -- -Unique container id. +Network connection type, eg. "wifi", "cell" + + +type: keyword + +-- + +*`network.connection.subtype`*:: ++ +-- +Detailed network connection sub-type, e.g. "LTE", "CDMA" type: keyword @@ -555,48 +608,46 @@ type: keyword -- [float] -=== kubernetes +=== carrier -Kubernetes metadata reported by agents +Network operator -*`kubernetes.namespace`*:: +*`network.carrier.name`*:: + -- -Kubernetes namespace +Carrier name, eg. Vodafone, T-Mobile, etc. type: keyword -- - -*`kubernetes.node.name`*:: +*`network.carrier.mcc`*:: + -- -Kubernetes node name +Mobile country code type: keyword -- - -*`kubernetes.pod.name`*:: +*`network.carrier.mnc`*:: + -- -Kubernetes pod name +Mobile network code type: keyword -- -*`kubernetes.pod.uid`*:: +*`network.carrier.icc`*:: + -- -Kubernetes Pod UID +ISO country code, eg. US type: keyword @@ -618,6 +669,8 @@ The architecture of the host the event was recorded on. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- *`host.hostname`*:: @@ -628,6 +681,8 @@ The hostname of the host the event was recorded on. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- *`host.name`*:: @@ -638,6 +693,8 @@ Name of the host the event was recorded on. It can contain same information as h type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- *`host.ip`*:: @@ -648,6 +705,8 @@ IP of the host that records the event. type: ip +{yes-icon} {ecs-ref}[ECS] field. + -- [float] @@ -665,6 +724,8 @@ The platform of the host the event was recorded on. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- [float] @@ -677,12 +738,13 @@ Information pertaining to the running process where the data was collected *`process.args`*:: + -- -Process arguments. -May be filtered to protect sensitive information. +Process arguments. May be filtered to protect sensitive information. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- *`process.pid`*:: @@ -693,6 +755,8 @@ Numeric process ID of the service process. type: long +{yes-icon} {ecs-ref}[ECS] field. + -- *`process.ppid`*:: @@ -703,6 +767,8 @@ Numeric ID of the service's parent process. type: long +{yes-icon} {ecs-ref}[ECS] field. + -- *`process.title`*:: @@ -713,6 +779,8 @@ Service process title. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- @@ -734,6 +802,8 @@ Hostname of the APM Server. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- *`observer.version`*:: @@ -744,6 +814,8 @@ APM Server version. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- *`observer.version_major`*:: @@ -762,6 +834,28 @@ type: byte The type will be set to `apm-server`. +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.id`*:: ++ +-- +Unique identifier of the APM Server. + + +type: keyword + +-- + +*`observer.ephemeral_id`*:: ++ +-- +Ephemeral identifier of the APM Server. + + type: keyword -- @@ -775,6 +869,20 @@ The username of the logged in user. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user.domain`*:: ++ +-- +Domain of the logged in user. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + -- *`user.id`*:: @@ -785,6 +893,8 @@ Identifier of the logged in user. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- *`user.email`*:: @@ -795,8 +905,22 @@ Email of the logged in user. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`client.domain`*:: ++ -- +Client domain. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. +-- *`client.ip`*:: + @@ -806,8 +930,34 @@ IP address of the client of a recorded event. This is typically obtained from a type: ip +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.port`*:: ++ +-- +Port of the client. + + +type: long + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`source.domain`*:: ++ -- +Source domain. + + +type: keyword +{yes-icon} {ecs-ref}[ECS] field. + +-- *`source.ip`*:: + @@ -817,6 +967,20 @@ IP address of the source of a recorded event. This is typically obtained from a type: ip +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`source.port`*:: ++ +-- +Port of the source. + + +type: long + +{yes-icon} {ecs-ref}[ECS] field. + -- [float] @@ -829,21 +993,23 @@ Destination fields are usually populated in conjunction with source fields. *`destination.address`*:: + -- -Some event destination addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. -Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. +Some event destination addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- *`destination.ip`*:: + -- -IP addess of the destination. -Can be one of multiple IPv4 or IPv6 addresses. +IP addess of the destination. Can be one of multiple IPv4 or IPv6 addresses. type: ip +{yes-icon} {ecs-ref}[ECS] field. + -- *`destination.port`*:: @@ -855,6 +1021,8 @@ type: long format: string +{yes-icon} {ecs-ref}[ECS] field. + -- [float] @@ -874,6 +1042,8 @@ type: keyword example: Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1 +{yes-icon} {ecs-ref}[ECS] field. + -- *`user_agent.original.text`*:: @@ -896,6 +1066,8 @@ type: keyword example: Safari +{yes-icon} {ecs-ref}[ECS] field. + -- *`user_agent.version`*:: @@ -908,6 +1080,8 @@ type: keyword example: 12.0 +{yes-icon} {ecs-ref}[ECS] field. + -- [float] @@ -927,6 +1101,8 @@ type: keyword example: iPhone +{yes-icon} {ecs-ref}[ECS] field. + -- [float] @@ -946,6 +1122,8 @@ type: keyword example: darwin +{yes-icon} {ecs-ref}[ECS] field. + -- *`user_agent.os.name`*:: @@ -958,6 +1136,8 @@ type: keyword example: Mac OS X +{yes-icon} {ecs-ref}[ECS] field. + -- *`user_agent.os.full`*:: @@ -970,6 +1150,8 @@ type: keyword example: Mac OS Mojave +{yes-icon} {ecs-ref}[ECS] field. + -- *`user_agent.os.family`*:: @@ -982,6 +1164,8 @@ type: keyword example: debian +{yes-icon} {ecs-ref}[ECS] field. + -- *`user_agent.os.version`*:: @@ -994,6 +1178,8 @@ type: keyword example: 10.14.1 +{yes-icon} {ecs-ref}[ECS] field. + -- *`user_agent.os.kernel`*:: @@ -1006,14 +1192,7 @@ type: keyword example: 4.4.0-112-generic --- - -*`experimental`*:: -+ --- -Additional experimental data sent by the agents. - -type: object +{yes-icon} {ecs-ref}[ECS] field. -- @@ -1032,6 +1211,8 @@ Cloud account ID type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- *`cloud.account.name`*:: @@ -1041,6 +1222,8 @@ Cloud account name type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- *`cloud.availability_zone`*:: @@ -1052,6 +1235,8 @@ type: keyword example: us-east1-a +{yes-icon} {ecs-ref}[ECS] field. + -- @@ -1062,6 +1247,8 @@ Cloud instance/machine ID type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- *`cloud.instance.name`*:: @@ -1071,6 +1258,8 @@ Cloud instance/machine name type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- @@ -1083,6 +1272,8 @@ type: keyword example: t2.medium +{yes-icon} {ecs-ref}[ECS] field. + -- @@ -1093,6 +1284,8 @@ Cloud project ID type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- *`cloud.project.name`*:: @@ -1102,6 +1295,8 @@ Cloud project name type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- *`cloud.provider`*:: @@ -1113,6 +1308,8 @@ type: keyword example: gcp +{yes-icon} {ecs-ref}[ECS] field. + -- *`cloud.region`*:: @@ -1124,27 +1321,21 @@ type: keyword example: us-east1 +{yes-icon} {ecs-ref}[ECS] field. + -- -*`event.outcome`*:: +*`cloud.service.name`*:: + -- -`event.outcome` simply denotes whether the event represents a success or a failure from the perspective of the entity that produced the event. +Cloud service name, intended to distinguish services running on different platforms within a provider. type: keyword -example: success - -- -[[exported-fields-apm-error]] -== APM Error fields - -Error-specific data for APM - - [float] === error @@ -1160,6 +1351,8 @@ The ID of the error. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- *`error.culprit`*:: @@ -1174,7 +1367,17 @@ type: keyword *`error.grouping_key`*:: + -- -GroupingKey of the logged error for use in grouping. +Hash of select properties of the logged error for grouping purposes. + + +type: keyword + +-- + +*`error.grouping_name`*:: ++ +-- +Name to associate with an error group. Errors belonging to the same group (same grouping_key) may have differing values for grouping_name. Consumers may choose one arbitrarily. type: keyword @@ -1218,6 +1421,8 @@ type: keyword *`error.exception.type`*:: + -- +The type of the original error, e.g. the Java exception class name. + type: keyword -- @@ -1281,942 +1486,13730 @@ type: keyword Profiling-specific data for APM. - -*`profile.id`*:: +*`processor.name`*:: + -- -Unique ID for the profile. -All samples within a profile will have the same profile ID. - +Processor name. type: keyword -- -*`profile.duration`*:: +*`processor.event`*:: + -- -Duration of the profile, in microseconds. -All samples within a profile will have the same duration. To aggregate durations, you should first group by the profile ID. - +Processor event. -type: long +type: keyword -- -*`profile.cpu.ns`*:: +*`timestamp.us`*:: + -- -Amount of CPU time profiled, in nanoseconds. +Timestamp of the event in microseconds since Unix epoch. type: long -- - -*`profile.samples.count`*:: +*`labels`*:: + -- -Number of profile samples for the profiling period. +A flat mapping of user-defined labels with string, boolean or number values. -type: long +type: object + +{yes-icon} {ecs-ref}[ECS] field. -- +[float] +=== service + +Service fields. -*`profile.alloc_objects.count`*:: + + +*`service.name`*:: + -- -Number of objects allocated since the process started. +Immutable name of the service emitting this event. -type: long +type: keyword --- +{yes-icon} {ecs-ref}[ECS] field. +-- -*`profile.alloc_space.bytes`*:: +*`service.version`*:: + -- -Amount of memory allocated, in bytes, since the process started. +Version of the service emitting this event. -type: long +type: keyword --- +{yes-icon} {ecs-ref}[ECS] field. +-- -*`profile.inuse_objects.count`*:: +*`service.environment`*:: + -- -Number of objects allocated and currently in use. +Service environment. -type: long +type: keyword -- -*`profile.inuse_space.bytes`*:: +*`service.node.name`*:: + -- -Amount of memory allocated, in bytes, and currently in use. +Unique meaningful name of the service node. -type: long +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. -- -*`profile.top.id`*:: +*`service.language.name`*:: + -- -Unique ID for the top stack frame in the context of its callers. +Name of the programming language used. type: keyword -- -*`profile.top.function`*:: +*`service.language.version`*:: + -- -Function name for the top stack frame. +Version of the programming language used. type: keyword -- -*`profile.top.filename`*:: + +*`service.runtime.name`*:: + -- -Source code filename for the top stack frame. +Name of the runtime used. type: keyword -- -*`profile.top.line`*:: +*`service.runtime.version`*:: + -- -Source code line number for the top stack frame. +Version of the runtime used. -type: long +type: keyword -- -*`profile.stack.id`*:: +*`service.framework.name`*:: + -- -Unique ID for a stack frame in the context of its callers. +Name of the framework used. type: keyword -- -*`profile.stack.function`*:: +*`service.framework.version`*:: + -- -Function name for a stack frame. +Version of the framework used. type: keyword -- -*`profile.stack.filename`*:: + +*`agent.name`*:: + -- -Source code filename for a stack frame. +Name of the agent used. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`profile.stack.line`*:: +*`agent.version`*:: + -- -Source code line number for a stack frame. +Version of the agent used. -type: long +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. -- -[[exported-fields-apm-sourcemap]] -== APM Sourcemap fields +*`agent.ephemeral_id`*:: ++ +-- +The Ephemeral ID identifies a running process. -Sourcemap files enriched with metadata +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. +-- [float] -=== service +=== container -Service fields. +Container fields are used for meta information about the specific container that is the source of information. These fields help correlate data based containers from any runtime. -*`sourcemap.service.name`*:: +*`container.id`*:: + -- -The name of the service this sourcemap belongs to. +Unique container id. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`sourcemap.service.version`*:: +[float] +=== network + +Optional network fields + + + +[float] +=== connection + +Network connection details + + + +*`network.connection.type`*:: + -- -Service version. +Network connection type, eg. "wifi", "cell" type: keyword -- -*`sourcemap.bundle_filepath`*:: +*`network.connection.subtype`*:: + -- -Location of the sourcemap relative to the file requesting it. +Detailed network connection sub-type, e.g. "LTE", "CDMA" type: keyword -- -[[exported-fields-apm-span]] -== APM Span fields - -Span-specific data for APM. - +[float] +=== carrier -*`view spans`*:: -+ --- -format: url +Network operator --- -*`child.id`*:: +*`network.carrier.name`*:: + -- -The ID(s)s of the child event(s). +Carrier name, eg. Vodafone, T-Mobile, etc. type: keyword -- - -*`span.id`*:: +*`network.carrier.mcc`*:: + -- -The ID of the span stored as hex encoded string. +Mobile country code type: keyword -- -*`span.name`*:: +*`network.carrier.mnc`*:: + -- -Generic designation of a span in the scope of a transaction. +Mobile network code type: keyword -- -*`span.action`*:: +*`network.carrier.icc`*:: + -- -The specific kind of event within the sub-type represented by the span (e.g. query, connect) +ISO country code, eg. US type: keyword -- +[float] +=== kubernetes -*`span.start.us`*:: -+ --- -Offset relative to the transaction's timestamp identifying the start of the span, in microseconds. - - -type: long +Kubernetes metadata reported by agents --- -*`span.duration.us`*:: +*`kubernetes.namespace`*:: + -- -Duration of the span, in microseconds. +Kubernetes namespace -type: long +type: keyword -- -*`span.sync`*:: + +*`kubernetes.node.name`*:: + -- -Indicates whether the span was executed synchronously or asynchronously. +Kubernetes node name -type: boolean +type: keyword -- -*`span.db.link`*:: +*`kubernetes.pod.name`*:: + -- -Database link. +Kubernetes pod name type: keyword -- -*`span.db.rows_affected`*:: +*`kubernetes.pod.uid`*:: + -- -Number of rows affected by the database statement. +Kubernetes Pod UID -type: long +type: keyword -- - [float] -=== service +=== host -Destination service context +Optional host fields. -*`span.destination.service.type`*:: + +*`host.architecture`*:: + -- -Type of the destination service (e.g. 'db', 'elasticsearch'). Should typically be the same as span.type. +The architecture of the host the event was recorded on. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`span.destination.service.name`*:: +*`host.hostname`*:: + -- -Identifier for the destination service (e.g. 'http://elastic.co', 'elasticsearch', 'rabbitmq') +The hostname of the host the event was recorded on. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`span.destination.service.resource`*:: +*`host.name`*:: + -- -Identifier for the destination service resource being operated on (e.g. 'http://elastic.co:80', 'elasticsearch', 'rabbitmq/queue_name') +Name of the host the event was recorded on. It can contain same information as host.hostname or a name specified by the user. type: keyword --- - +{yes-icon} {ecs-ref}[ECS] field. +-- -*`span.message.queue.name`*:: +*`host.ip`*:: + -- -Name of the message queue or topic where the message is published or received. - +IP of the host that records the event. -type: keyword --- +type: ip +{yes-icon} {ecs-ref}[ECS] field. -*`span.message.age.ms`*:: -+ -- -Age of a message in milliseconds. +[float] +=== os -type: long +The OS fields contain information about the operating system. --- -[[exported-fields-apm-span-metrics-xpack]] -== APM Span Metrics fields -APM span metrics are used for showing rate of requests and latency between instrumented services. +*`host.os.platform`*:: ++ +-- +The platform of the host the event was recorded on. +type: keyword -*`metricset.period`*:: -+ --- -type: long +{yes-icon} {ecs-ref}[ECS] field. -- +[float] +=== process +Information pertaining to the running process where the data was collected -*`span.destination.service.response_time.count`*:: -+ --- -type: long --- -*`span.destination.service.response_time.sum.us`*:: +*`process.args`*:: + -- -type: long +Process arguments. May be filtered to protect sensitive information. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. -- -[[exported-fields-apm-transaction]] -== APM Transaction fields +*`process.pid`*:: ++ +-- +Numeric process ID of the service process. -Transaction-specific data for APM +type: long +{yes-icon} {ecs-ref}[ECS] field. +-- -*`transaction.duration.us`*:: +*`process.ppid`*:: + -- -Total duration of this transaction, in microseconds. +Numeric ID of the service's parent process. type: long +{yes-icon} {ecs-ref}[ECS] field. + -- -*`transaction.result`*:: +*`process.title`*:: + -- -The result of the transaction. HTTP status code for HTTP-related transactions. +Service process title. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`transaction.marks`*:: + +*`observer.listening`*:: + -- -A user-defined mapping of groups of marks in milliseconds. +Address the server is listening on. -type: object +type: keyword -- -*`transaction.marks.*.*`*:: +*`observer.hostname`*:: + -- -type: object - --- +Hostname of the APM Server. -*`transaction.experience.cls`*:: -+ --- -The Cumulative Layout Shift metric +type: keyword -type: scaled_float +{yes-icon} {ecs-ref}[ECS] field. -- -*`transaction.experience.fid`*:: +*`observer.version`*:: + -- -The First Input Delay metric +APM Server version. -type: scaled_float + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. -- -*`transaction.experience.tbt`*:: +*`observer.version_major`*:: + -- -The Total Blocking Time metric +Major version number of the observer -type: scaled_float --- +type: byte +-- -*`transaction.span_count.dropped`*:: +*`observer.type`*:: + -- -The total amount of dropped spans for this transaction. +The type will be set to `apm-server`. -type: long --- +type: keyword +{yes-icon} {ecs-ref}[ECS] field. +-- -*`transaction.message.queue.name`*:: +*`observer.id`*:: + -- -Name of the message queue or topic where the message is published or received. +Unique identifier of the APM Server. type: keyword -- - -*`transaction.message.age.ms`*:: +*`observer.ephemeral_id`*:: + -- -Age of a message in milliseconds. +Ephemeral identifier of the APM Server. -type: long +type: keyword -- -[[exported-fields-apm-transaction-metrics]] -== APM Transaction Metrics fields - -APM transaction metrics, and transaction metrics-specific properties, such as transaction.root. - - - -*`transaction.root`*:: +*`user.name`*:: + -- -Identifies metrics for root transactions. This can be used for calculating metrics for traces. +The username of the logged in user. -type: boolean +type: keyword --- +{yes-icon} {ecs-ref}[ECS] field. -[[exported-fields-apm-transaction-metrics-xpack]] -== APM Transaction Metrics fields +-- -APM transaction metrics, and transaction metrics-specific properties, requiring licensed features such as the histogram field type. +*`user.id`*:: ++ +-- +Identifier of the logged in user. +type: keyword +{yes-icon} {ecs-ref}[ECS] field. +-- -*`transaction.duration.histogram`*:: +*`user.email`*:: + -- -Pre-aggregated histogram of transaction durations. - - -type: histogram +Email of the logged in user. --- -[[exported-fields-beat-common]] -== Beat fields +type: keyword -Contains common beat fields available in all event types. +{yes-icon} {ecs-ref}[ECS] field. +-- -*`agent.hostname`*:: +*`client.domain`*:: + -- -Deprecated - use agent.name or agent.id to identify an agent. +Client domain. -type: alias +type: keyword -alias to: agent.name +{yes-icon} {ecs-ref}[ECS] field. -- -*`beat.timezone`*:: +*`client.ip`*:: + -- -type: alias +IP address of the client of a recorded event. This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. -alias to: event.timezone + +type: ip + +{yes-icon} {ecs-ref}[ECS] field. -- -*`fields`*:: +*`client.port`*:: + -- -Contains user configurable fields. +Port of the client. -type: object +type: long + +{yes-icon} {ecs-ref}[ECS] field. -- -*`beat.name`*:: + +*`source.domain`*:: + -- -type: alias +Source domain. -alias to: host.name + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. -- -*`beat.hostname`*:: +*`source.ip`*:: + -- -type: alias +IP address of the source of a recorded event. This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. -alias to: agent.name + +type: ip + +{yes-icon} {ecs-ref}[ECS] field. -- -*`timeseries.instance`*:: +*`source.port`*:: + -- -Time series instance id +Port of the source. -type: keyword --- +type: long -[[exported-fields-cloud]] -== Cloud provider metadata fields +{yes-icon} {ecs-ref}[ECS] field. -Metadata from cloud providers added by the add_cloud_metadata processor. +-- + +[float] +=== destination +Destination fields describe details about the destination of a packet/event. +Destination fields are usually populated in conjunction with source fields. -*`cloud.project.id`*:: +*`destination.address`*:: + -- -Name of the project in Google Cloud. +Some event destination addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. +type: keyword -example: project-x +{yes-icon} {ecs-ref}[ECS] field. -- -*`cloud.image.id`*:: +*`destination.ip`*:: + -- -Image ID for the cloud instance. +IP addess of the destination. Can be one of multiple IPv4 or IPv6 addresses. +type: ip -example: ami-abcd1234 +{yes-icon} {ecs-ref}[ECS] field. -- -*`meta.cloud.provider`*:: +*`destination.port`*:: + -- -type: alias +Port of the destination. -alias to: cloud.provider +type: long --- +format: string + +{yes-icon} {ecs-ref}[ECS] field. -*`meta.cloud.instance_id`*:: -+ -- -type: alias -alias to: cloud.instance.id +[float] +=== user_agent --- +The user_agent fields normally come from a browser request. They often show up in web service logs coming from the parsed user agent string. -*`meta.cloud.instance_name`*:: + + +*`user_agent.original`*:: + -- -type: alias +Unparsed version of the user_agent. -alias to: cloud.instance.name --- +type: keyword -*`meta.cloud.machine_type`*:: -+ --- -type: alias +example: Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1 -alias to: cloud.machine.type +{yes-icon} {ecs-ref}[ECS] field. -- -*`meta.cloud.availability_zone`*:: +*`user_agent.original.text`*:: + -- -type: alias +Software agent acting in behalf of a user, eg. a web browser / OS combination. -alias to: cloud.availability_zone + +type: text -- -*`meta.cloud.project_id`*:: +*`user_agent.name`*:: + -- -type: alias +Name of the user agent. -alias to: cloud.project.id + +type: keyword + +example: Safari + +{yes-icon} {ecs-ref}[ECS] field. -- -*`meta.cloud.region`*:: +*`user_agent.version`*:: + -- -type: alias +Version of the user agent. -alias to: cloud.region --- +type: keyword -[[exported-fields-docker-processor]] -== Docker fields +example: 12.0 -Docker stats collected from Docker. +{yes-icon} {ecs-ref}[ECS] field. +-- + +[float] +=== device + +Information concerning the device. + + + +*`user_agent.device.name`*:: ++ +-- +Name of the device. + + +type: keyword + +example: iPhone + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== os + +The OS fields contain information about the operating system. + + + +*`user_agent.os.platform`*:: ++ +-- +Operating system platform (such centos, ubuntu, windows). + + +type: keyword + +example: darwin + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.os.name`*:: ++ +-- +Operating system name, without the version. + + +type: keyword + +example: Mac OS X + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.os.full`*:: ++ +-- +Operating system name, including the version or code name. + + +type: keyword + +example: Mac OS Mojave + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.os.family`*:: ++ +-- +OS family (such as redhat, debian, freebsd, windows). + + +type: keyword + +example: debian + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.os.version`*:: ++ +-- +Operating system version as a raw string. + + +type: keyword + +example: 10.14.1 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.os.kernel`*:: ++ +-- +Operating system kernel version as a raw string. + + +type: keyword + +example: 4.4.0-112-generic + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== cloud + +Cloud metadata reported by agents + + + + +*`cloud.account.id`*:: ++ +-- +Cloud account ID + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.account.name`*:: ++ +-- +Cloud account name + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.availability_zone`*:: ++ +-- +Cloud availability zone name + +type: keyword + +example: us-east1-a + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`cloud.instance.id`*:: ++ +-- +Cloud instance/machine ID + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.instance.name`*:: ++ +-- +Cloud instance/machine name + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`cloud.machine.type`*:: ++ +-- +Cloud instance/machine type + +type: keyword + +example: t2.medium + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`cloud.project.id`*:: ++ +-- +Cloud project ID + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.project.name`*:: ++ +-- +Cloud project name + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.provider`*:: ++ +-- +Cloud provider name + +type: keyword + +example: gcp + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.region`*:: ++ +-- +Cloud region name + +type: keyword + +example: us-east1 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`cloud.service.name`*:: ++ +-- +Cloud service name, intended to distinguish services running on different platforms within a provider. + + +type: keyword + +-- + + +*`profile.id`*:: ++ +-- +Unique ID for the profile. All samples within a profile will have the same profile ID. + + +type: keyword + +-- + +*`profile.duration`*:: ++ +-- +Duration of the profile, in nanoseconds. All samples within a profile will have the same duration. To aggregate durations, you should first group by the profile ID. + + +type: long + +-- + + +*`profile.cpu.ns`*:: ++ +-- +Amount of CPU time profiled, in nanoseconds. + + +type: long + +-- + + +*`profile.wall.us`*:: ++ +-- +Amount of wall time profiled, in microseconds. + + +type: long + +-- + + +*`profile.samples.count`*:: ++ +-- +Number of profile samples for the profiling period. + + +type: long + +-- + + +*`profile.alloc_objects.count`*:: ++ +-- +Number of objects allocated since the process started. + + +type: long + +-- + + +*`profile.alloc_space.bytes`*:: ++ +-- +Amount of memory allocated, in bytes, since the process started. + + +type: long + +-- + + +*`profile.inuse_objects.count`*:: ++ +-- +Number of objects allocated and currently in use. + + +type: long + +-- + + +*`profile.inuse_space.bytes`*:: ++ +-- +Amount of memory allocated, in bytes, and currently in use. + + +type: long + +-- + + +*`profile.top.id`*:: ++ +-- +Unique ID for the top stack frame in the context of its callers. + + +type: keyword + +-- + +*`profile.top.function`*:: ++ +-- +Function name for the top stack frame. + + +type: keyword + +-- + +*`profile.top.filename`*:: ++ +-- +Source code filename for the top stack frame. + + +type: keyword + +-- + +*`profile.top.line`*:: ++ +-- +Source code line number for the top stack frame. + + +type: long + +-- + + +*`profile.stack.id`*:: ++ +-- +Unique ID for a stack frame in the context of its callers. + + +type: keyword + +-- + +*`profile.stack.function`*:: ++ +-- +Function name for a stack frame. + + +type: keyword + +-- + +*`profile.stack.filename`*:: ++ +-- +Source code filename for a stack frame. + + +type: keyword + +-- + +*`profile.stack.line`*:: ++ +-- +Source code line number for a stack frame. + + +type: long + +-- + +[[exported-fields-apm-sourcemap]] +== APM Sourcemap fields + +Sourcemap files enriched with metadata + + + +[float] +=== service + +Service fields. + + + +*`sourcemap.service.name`*:: ++ +-- +The name of the service this sourcemap belongs to. + + +type: keyword + +-- + +*`sourcemap.service.version`*:: ++ +-- +Service version. + + +type: keyword + +-- + +*`sourcemap.bundle_filepath`*:: ++ +-- +Location of the sourcemap relative to the file requesting it. + + +type: keyword + +-- + +[[exported-fields-apm-span]] +== APM Span fields + +Span-specific data for APM. + + +*`processor.name`*:: ++ +-- +Processor name. + +type: keyword + +-- + +*`processor.event`*:: ++ +-- +Processor event. + +type: keyword + +-- + + +*`timestamp.us`*:: ++ +-- +Timestamp of the event in microseconds since Unix epoch. + + +type: long + +-- + +*`labels`*:: ++ +-- +A flat mapping of user-defined labels with string, boolean or number values. + + +type: object + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== service + +Service fields. + + + +*`service.name`*:: ++ +-- +Immutable name of the service emitting this event. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`service.version`*:: ++ +-- +Version of the service emitting this event. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`service.environment`*:: ++ +-- +Service environment. + + +type: keyword + +-- + + +*`service.node.name`*:: ++ +-- +Unique meaningful name of the service node. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`service.language.name`*:: ++ +-- +Name of the programming language used. + + +type: keyword + +-- + +*`service.language.version`*:: ++ +-- +Version of the programming language used. + + +type: keyword + +-- + + +*`service.runtime.name`*:: ++ +-- +Name of the runtime used. + + +type: keyword + +-- + +*`service.runtime.version`*:: ++ +-- +Version of the runtime used. + + +type: keyword + +-- + + +*`service.framework.name`*:: ++ +-- +Name of the framework used. + + +type: keyword + +-- + +*`service.framework.version`*:: ++ +-- +Version of the framework used. + + +type: keyword + +-- + + +*`transaction.id`*:: ++ +-- +The transaction ID. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`transaction.sampled`*:: ++ +-- +Transactions that are 'sampled' will include all available information. Transactions that are not sampled will not have spans or context. + + +type: boolean + +-- + +*`transaction.type`*:: ++ +-- +Keyword of specific relevance in the service's domain (eg. 'request', 'backgroundjob', etc) + + +type: keyword + +-- + +*`transaction.name`*:: ++ +-- +Generic designation of a transaction in the scope of a single service (eg. 'GET /users/:id'). + + +type: keyword + +-- + +*`transaction.name.text`*:: ++ +-- +type: text + +-- + + +*`trace.id`*:: ++ +-- +The ID of the trace to which the event belongs to. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`parent.id`*:: ++ +-- +The ID of the parent event. + + +type: keyword + +-- + + +*`agent.name`*:: ++ +-- +Name of the agent used. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`agent.version`*:: ++ +-- +Version of the agent used. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`agent.ephemeral_id`*:: ++ +-- +The Ephemeral ID identifies a running process. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== container + +Container fields are used for meta information about the specific container that is the source of information. These fields help correlate data based containers from any runtime. + + + +*`container.id`*:: ++ +-- +Unique container id. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== kubernetes + +Kubernetes metadata reported by agents + + + +*`kubernetes.namespace`*:: ++ +-- +Kubernetes namespace + + +type: keyword + +-- + + +*`kubernetes.node.name`*:: ++ +-- +Kubernetes node name + + +type: keyword + +-- + + +*`kubernetes.pod.name`*:: ++ +-- +Kubernetes pod name + + +type: keyword + +-- + +*`kubernetes.pod.uid`*:: ++ +-- +Kubernetes Pod UID + + +type: keyword + +-- + +[float] +=== network + +Optional network fields + + + +[float] +=== connection + +Network connection details + + + +*`network.connection.type`*:: ++ +-- +Network connection type, eg. "wifi", "cell" + + +type: keyword + +-- + +*`network.connection.subtype`*:: ++ +-- +Detailed network connection sub-type, e.g. "LTE", "CDMA" + + +type: keyword + +-- + +[float] +=== carrier + +Network operator + + + +*`network.carrier.name`*:: ++ +-- +Carrier name, eg. Vodafone, T-Mobile, etc. + + +type: keyword + +-- + +*`network.carrier.mcc`*:: ++ +-- +Mobile country code + + +type: keyword + +-- + +*`network.carrier.mnc`*:: ++ +-- +Mobile network code + + +type: keyword + +-- + +*`network.carrier.icc`*:: ++ +-- +ISO country code, eg. US + + +type: keyword + +-- + +[float] +=== host + +Optional host fields. + + + +*`host.architecture`*:: ++ +-- +The architecture of the host the event was recorded on. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.hostname`*:: ++ +-- +The hostname of the host the event was recorded on. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.name`*:: ++ +-- +Name of the host the event was recorded on. It can contain same information as host.hostname or a name specified by the user. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.ip`*:: ++ +-- +IP of the host that records the event. + + +type: ip + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== os + +The OS fields contain information about the operating system. + + + +*`host.os.platform`*:: ++ +-- +The platform of the host the event was recorded on. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== process + +Information pertaining to the running process where the data was collected + + + +*`process.args`*:: ++ +-- +Process arguments. May be filtered to protect sensitive information. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.pid`*:: ++ +-- +Numeric process ID of the service process. + + +type: long + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.ppid`*:: ++ +-- +Numeric ID of the service's parent process. + + +type: long + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.title`*:: ++ +-- +Service process title. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`observer.listening`*:: ++ +-- +Address the server is listening on. + + +type: keyword + +-- + +*`observer.hostname`*:: ++ +-- +Hostname of the APM Server. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.version`*:: ++ +-- +APM Server version. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.version_major`*:: ++ +-- +Major version number of the observer + + +type: byte + +-- + +*`observer.type`*:: ++ +-- +The type will be set to `apm-server`. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.id`*:: ++ +-- +Unique identifier of the APM Server. + + +type: keyword + +-- + +*`observer.ephemeral_id`*:: ++ +-- +Ephemeral identifier of the APM Server. + + +type: keyword + +-- + + +*`user.name`*:: ++ +-- +The username of the logged in user. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user.domain`*:: ++ +-- +Domain of the logged in user. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user.id`*:: ++ +-- +Identifier of the logged in user. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user.email`*:: ++ +-- +Email of the logged in user. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`client.domain`*:: ++ +-- +Client domain. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.ip`*:: ++ +-- +IP address of the client of a recorded event. This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + + +type: ip + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.port`*:: ++ +-- +Port of the client. + + +type: long + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`source.domain`*:: ++ +-- +Source domain. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`source.ip`*:: ++ +-- +IP address of the source of a recorded event. This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + + +type: ip + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`source.port`*:: ++ +-- +Port of the source. + + +type: long + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== destination + +Destination fields describe details about the destination of a packet/event. +Destination fields are usually populated in conjunction with source fields. + + +*`destination.address`*:: ++ +-- +Some event destination addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.ip`*:: ++ +-- +IP addess of the destination. Can be one of multiple IPv4 or IPv6 addresses. + +type: ip + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.port`*:: ++ +-- +Port of the destination. + +type: long + +format: string + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== user_agent + +The user_agent fields normally come from a browser request. They often show up in web service logs coming from the parsed user agent string. + + + +*`user_agent.original`*:: ++ +-- +Unparsed version of the user_agent. + + +type: keyword + +example: Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.original.text`*:: ++ +-- +Software agent acting in behalf of a user, eg. a web browser / OS combination. + + +type: text + +-- + +*`user_agent.name`*:: ++ +-- +Name of the user agent. + + +type: keyword + +example: Safari + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.version`*:: ++ +-- +Version of the user agent. + + +type: keyword + +example: 12.0 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== device + +Information concerning the device. + + + +*`user_agent.device.name`*:: ++ +-- +Name of the device. + + +type: keyword + +example: iPhone + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== os + +The OS fields contain information about the operating system. + + + +*`user_agent.os.platform`*:: ++ +-- +Operating system platform (such centos, ubuntu, windows). + + +type: keyword + +example: darwin + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.os.name`*:: ++ +-- +Operating system name, without the version. + + +type: keyword + +example: Mac OS X + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.os.full`*:: ++ +-- +Operating system name, including the version or code name. + + +type: keyword + +example: Mac OS Mojave + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.os.family`*:: ++ +-- +OS family (such as redhat, debian, freebsd, windows). + + +type: keyword + +example: debian + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.os.version`*:: ++ +-- +Operating system version as a raw string. + + +type: keyword + +example: 10.14.1 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.os.kernel`*:: ++ +-- +Operating system kernel version as a raw string. + + +type: keyword + +example: 4.4.0-112-generic + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== cloud + +Cloud metadata reported by agents + + + + +*`cloud.account.id`*:: ++ +-- +Cloud account ID + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.account.name`*:: ++ +-- +Cloud account name + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.availability_zone`*:: ++ +-- +Cloud availability zone name + +type: keyword + +example: us-east1-a + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`cloud.instance.id`*:: ++ +-- +Cloud instance/machine ID + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.instance.name`*:: ++ +-- +Cloud instance/machine name + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`cloud.machine.type`*:: ++ +-- +Cloud instance/machine type + +type: keyword + +example: t2.medium + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`cloud.project.id`*:: ++ +-- +Cloud project ID + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.project.name`*:: ++ +-- +Cloud project name + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.provider`*:: ++ +-- +Cloud provider name + +type: keyword + +example: gcp + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.region`*:: ++ +-- +Cloud region name + +type: keyword + +example: us-east1 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`cloud.service.name`*:: ++ +-- +Cloud service name, intended to distinguish services running on different platforms within a provider. + + +type: keyword + +-- + + +*`event.outcome`*:: ++ +-- +`event.outcome` simply denotes whether the event represents a success or a failure from the perspective of the entity that produced the event. + + +type: keyword + +example: success + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`child.id`*:: ++ +-- +The ID(s) of the child event(s). + + +type: keyword + +-- + + +*`span.type`*:: ++ +-- +Keyword of specific relevance in the service's domain (eg: 'db.postgresql.query', 'template.erb', 'cache', etc). + + +type: keyword + +-- + +*`span.subtype`*:: ++ +-- +A further sub-division of the type (e.g. postgresql, elasticsearch) + + +type: keyword + +-- + +*`span.id`*:: ++ +-- +The ID of the span stored as hex encoded string. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`span.name`*:: ++ +-- +Generic designation of a span in the scope of a transaction. + + +type: keyword + +-- + +*`span.action`*:: ++ +-- +The specific kind of event within the sub-type represented by the span (e.g. query, connect) + + +type: keyword + +-- + + +*`span.start.us`*:: ++ +-- +Offset relative to the transaction's timestamp identifying the start of the span, in microseconds. + + +type: long + +-- + + +*`span.duration.us`*:: ++ +-- +Duration of the span, in microseconds. + + +type: long + +-- + +*`span.sync`*:: ++ +-- +Indicates whether the span was executed synchronously or asynchronously. + + +type: boolean + +-- + + +*`span.db.link`*:: ++ +-- +Database link. + + +type: keyword + +-- + +*`span.db.rows_affected`*:: ++ +-- +Number of rows affected by the database statement. + + +type: long + +-- + + +[float] +=== service + +Destination service context + + +*`span.destination.service.type`*:: ++ +-- +Type of the destination service (e.g. 'db', 'elasticsearch'). Should typically be the same as span.type. DEPRECATED: this field will be removed in a future release + + +type: keyword + +-- + +*`span.destination.service.name`*:: ++ +-- +Identifier for the destination service (e.g. 'http://elastic.co', 'elasticsearch', 'rabbitmq') DEPRECATED: this field will be removed in a future release + + +type: keyword + +-- + +*`span.destination.service.resource`*:: ++ +-- +Identifier for the destination service resource being operated on (e.g. 'http://elastic.co:80', 'elasticsearch', 'rabbitmq/queue_name') + + +type: keyword + +-- + + + +*`span.message.queue.name`*:: ++ +-- +Name of the message queue or topic where the message is published or received. + + +type: keyword + +-- + + +*`span.message.age.ms`*:: ++ +-- +Age of a message in milliseconds. + + +type: long + +-- + + +*`span.composite.count`*:: ++ +-- +Number of compressed spans the composite span represents. + + +type: long + +-- + + +*`span.composite.sum.us`*:: ++ +-- +Sum of the durations of the compressed spans, in microseconds. + + +type: long + +-- + +*`span.composite.compression_strategy`*:: ++ +-- +The compression strategy that was used. + + +type: keyword + +-- + +[[exported-fields-apm-span-metrics-xpack]] +== APM Span Metrics fields + +APM span metrics are used for showing rate of requests and latency between instrumented services. + + + +*`metricset.period`*:: ++ +-- +Current data collection period for this event in milliseconds. + +type: long + +-- + + + +*`span.destination.service.response_time.count`*:: ++ +-- +Number of aggregated outgoing requests. + +type: long + +-- + +*`span.destination.service.response_time.sum.us`*:: ++ +-- +Aggregated duration of outgoing requests, in microseconds. + +type: long + +-- + +[[exported-fields-apm-transaction]] +== APM Transaction fields + +Transaction-specific data for APM + + +*`processor.name`*:: ++ +-- +Processor name. + +type: keyword + +-- + +*`processor.event`*:: ++ +-- +Processor event. + +type: keyword + +-- + + +*`timestamp.us`*:: ++ +-- +Timestamp of the event in microseconds since Unix epoch. + + +type: long + +-- + +[float] +=== url + +A complete Url, with scheme, host and path. + + + +*`url.scheme`*:: ++ +-- +The protocol of the request, e.g. "https:". + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`url.full`*:: ++ +-- +The full, possibly agent-assembled URL of the request, e.g https://example.com:443/search?q=elasticsearch#top. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`url.domain`*:: ++ +-- +The hostname of the request, e.g. "example.com". + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`url.port`*:: ++ +-- +The port of the request, e.g. 443. + + +type: long + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`url.path`*:: ++ +-- +The path of the request, e.g. "/search". + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`url.query`*:: ++ +-- +The query string of the request, e.g. "q=elasticsearch". + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`url.fragment`*:: ++ +-- +A fragment specifying a location in a web page , e.g. "top". + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`http.version`*:: ++ +-- +The http version of the request leading to this event. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`http.request.method`*:: ++ +-- +The http method of the request leading to this event. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`http.request.headers`*:: ++ +-- +The canonical headers of the monitored HTTP request. + + +type: object + +Object is not enabled. + +-- + +*`http.request.referrer`*:: ++ +-- +Referrer for this HTTP request. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`http.response.status_code`*:: ++ +-- +The status code of the HTTP response. + + +type: long + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`http.response.finished`*:: ++ +-- +Used by the Node agent to indicate when in the response life cycle an error has occurred. + + +type: boolean + +-- + +*`http.response.headers`*:: ++ +-- +The canonical headers of the monitored HTTP response. + + +type: object + +Object is not enabled. + +-- + +*`labels`*:: ++ +-- +A flat mapping of user-defined labels with string, boolean or number values. + + +type: object + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== service + +Service fields. + + + +*`service.name`*:: ++ +-- +Immutable name of the service emitting this event. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`service.version`*:: ++ +-- +Version of the service emitting this event. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`service.environment`*:: ++ +-- +Service environment. + + +type: keyword + +-- + + +*`service.node.name`*:: ++ +-- +Unique meaningful name of the service node. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`service.language.name`*:: ++ +-- +Name of the programming language used. + + +type: keyword + +-- + +*`service.language.version`*:: ++ +-- +Version of the programming language used. + + +type: keyword + +-- + + +*`service.runtime.name`*:: ++ +-- +Name of the runtime used. + + +type: keyword + +-- + +*`service.runtime.version`*:: ++ +-- +Version of the runtime used. + + +type: keyword + +-- + + +*`service.framework.name`*:: ++ +-- +Name of the framework used. + + +type: keyword + +-- + +*`service.framework.version`*:: ++ +-- +Version of the framework used. + + +type: keyword + +-- + + +*`session.id`*:: ++ +-- +The ID of the session to which the event belongs. + + +type: keyword + +-- + +*`session.sequence`*:: ++ +-- +The sequence number of the event within the session to which the event belongs. + + +type: long + +-- + + + +*`transaction.duration.us`*:: ++ +-- +Total duration of this transaction, in microseconds. + + +type: long + +-- + +*`transaction.result`*:: ++ +-- +The result of the transaction. HTTP status code for HTTP-related transactions. + + +type: keyword + +-- + +*`transaction.marks`*:: ++ +-- +A user-defined mapping of groups of marks in milliseconds. + + +type: object + +-- + +*`transaction.marks.*.*`*:: ++ +-- +A user-defined mapping of groups of marks in milliseconds. + + +type: object + +-- + + +*`transaction.experience.cls`*:: ++ +-- +The Cumulative Layout Shift metric + +type: scaled_float + +-- + +*`transaction.experience.fid`*:: ++ +-- +The First Input Delay metric + +type: scaled_float + +-- + +*`transaction.experience.tbt`*:: ++ +-- +The Total Blocking Time metric + +type: scaled_float + +-- + +[float] +=== longtask + +Longtask duration/count metrics + + +*`transaction.experience.longtask.count`*:: ++ +-- +The total number of of longtasks + +type: long + +-- + +*`transaction.experience.longtask.sum`*:: ++ +-- +The sum of longtask durations + +type: scaled_float + +-- + +*`transaction.experience.longtask.max`*:: ++ +-- +The max longtask duration + +type: scaled_float + +-- + + +*`transaction.span_count.dropped`*:: ++ +-- +The total amount of dropped spans for this transaction. + +type: long + +-- + + + +*`transaction.message.queue.name`*:: ++ +-- +Name of the message queue or topic where the message is published or received. + + +type: keyword + +-- + + +*`transaction.message.age.ms`*:: ++ +-- +Age of a message in milliseconds. + + +type: long + +-- + + +*`trace.id`*:: ++ +-- +The ID of the trace to which the event belongs to. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`parent.id`*:: ++ +-- +The ID of the parent event. + + +type: keyword + +-- + + +*`agent.name`*:: ++ +-- +Name of the agent used. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`agent.version`*:: ++ +-- +Version of the agent used. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`agent.ephemeral_id`*:: ++ +-- +The Ephemeral ID identifies a running process. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== container + +Container fields are used for meta information about the specific container that is the source of information. These fields help correlate data based containers from any runtime. + + + +*`container.id`*:: ++ +-- +Unique container id. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== kubernetes + +Kubernetes metadata reported by agents + + + +*`kubernetes.namespace`*:: ++ +-- +Kubernetes namespace + + +type: keyword + +-- + + +*`kubernetes.node.name`*:: ++ +-- +Kubernetes node name + + +type: keyword + +-- + + +*`kubernetes.pod.name`*:: ++ +-- +Kubernetes pod name + + +type: keyword + +-- + +*`kubernetes.pod.uid`*:: ++ +-- +Kubernetes Pod UID + + +type: keyword + +-- + +[float] +=== network + +Optional network fields + + + +[float] +=== connection + +Network connection details + + + +*`network.connection.type`*:: ++ +-- +Network connection type, eg. "wifi", "cell" + + +type: keyword + +-- + +*`network.connection.subtype`*:: ++ +-- +Detailed network connection sub-type, e.g. "LTE", "CDMA" + + +type: keyword + +-- + +[float] +=== carrier + +Network operator + + + +*`network.carrier.name`*:: ++ +-- +Carrier name, eg. Vodafone, T-Mobile, etc. + + +type: keyword + +-- + +*`network.carrier.mcc`*:: ++ +-- +Mobile country code + + +type: keyword + +-- + +*`network.carrier.mnc`*:: ++ +-- +Mobile network code + + +type: keyword + +-- + +*`network.carrier.icc`*:: ++ +-- +ISO country code, eg. US + + +type: keyword + +-- + +[float] +=== host + +Optional host fields. + + + +*`host.architecture`*:: ++ +-- +The architecture of the host the event was recorded on. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.hostname`*:: ++ +-- +The hostname of the host the event was recorded on. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.name`*:: ++ +-- +Name of the host the event was recorded on. It can contain same information as host.hostname or a name specified by the user. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.ip`*:: ++ +-- +IP of the host that records the event. + + +type: ip + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== os + +The OS fields contain information about the operating system. + + + +*`host.os.platform`*:: ++ +-- +The platform of the host the event was recorded on. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== process + +Information pertaining to the running process where the data was collected + + + +*`process.args`*:: ++ +-- +Process arguments. May be filtered to protect sensitive information. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.pid`*:: ++ +-- +Numeric process ID of the service process. + + +type: long + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.ppid`*:: ++ +-- +Numeric ID of the service's parent process. + + +type: long + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.title`*:: ++ +-- +Service process title. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`observer.listening`*:: ++ +-- +Address the server is listening on. + + +type: keyword + +-- + +*`observer.hostname`*:: ++ +-- +Hostname of the APM Server. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.version`*:: ++ +-- +APM Server version. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.version_major`*:: ++ +-- +Major version number of the observer + + +type: byte + +-- + +*`observer.type`*:: ++ +-- +The type will be set to `apm-server`. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.id`*:: ++ +-- +Unique identifier of the APM Server. + + +type: keyword + +-- + +*`observer.ephemeral_id`*:: ++ +-- +Ephemeral identifier of the APM Server. + + +type: keyword + +-- + + +*`user.domain`*:: ++ +-- +The domain of the logged in user. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user.name`*:: ++ +-- +The username of the logged in user. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user.id`*:: ++ +-- +Identifier of the logged in user. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user.email`*:: ++ +-- +Email of the logged in user. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`client.domain`*:: ++ +-- +Client domain. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.ip`*:: ++ +-- +IP address of the client of a recorded event. This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + + +type: ip + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.port`*:: ++ +-- +Port of the client. + + +type: long + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`source.domain`*:: ++ +-- +Source domain. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`source.ip`*:: ++ +-- +IP address of the source of a recorded event. This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + + +type: ip + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`source.port`*:: ++ +-- +Port of the source. + + +type: long + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== destination + +Destination fields describe details about the destination of a packet/event. +Destination fields are usually populated in conjunction with source fields. + + +*`destination.address`*:: ++ +-- +Some event destination addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.ip`*:: ++ +-- +IP addess of the destination. Can be one of multiple IPv4 or IPv6 addresses. + +type: ip + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.port`*:: ++ +-- +Port of the destination. + +type: long + +format: string + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== user_agent + +The user_agent fields normally come from a browser request. They often show up in web service logs coming from the parsed user agent string. + + + +*`user_agent.original`*:: ++ +-- +Unparsed version of the user_agent. + + +type: keyword + +example: Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.original.text`*:: ++ +-- +Software agent acting in behalf of a user, eg. a web browser / OS combination. + + +type: text + +-- + +*`user_agent.name`*:: ++ +-- +Name of the user agent. + + +type: keyword + +example: Safari + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.version`*:: ++ +-- +Version of the user agent. + + +type: keyword + +example: 12.0 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== device + +Information concerning the device. + + + +*`user_agent.device.name`*:: ++ +-- +Name of the device. + + +type: keyword + +example: iPhone + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== os + +The OS fields contain information about the operating system. + + + +*`user_agent.os.platform`*:: ++ +-- +Operating system platform (such centos, ubuntu, windows). + + +type: keyword + +example: darwin + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.os.name`*:: ++ +-- +Operating system name, without the version. + + +type: keyword + +example: Mac OS X + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.os.full`*:: ++ +-- +Operating system name, including the version or code name. + + +type: keyword + +example: Mac OS Mojave + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.os.family`*:: ++ +-- +OS family (such as redhat, debian, freebsd, windows). + + +type: keyword + +example: debian + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.os.version`*:: ++ +-- +Operating system version as a raw string. + + +type: keyword + +example: 10.14.1 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.os.kernel`*:: ++ +-- +Operating system kernel version as a raw string. + + +type: keyword + +example: 4.4.0-112-generic + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== cloud + +Cloud metadata reported by agents + + + + +*`cloud.account.id`*:: ++ +-- +Cloud account ID + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.account.name`*:: ++ +-- +Cloud account name + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.availability_zone`*:: ++ +-- +Cloud availability zone name + +type: keyword + +example: us-east1-a + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`cloud.instance.id`*:: ++ +-- +Cloud instance/machine ID + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.instance.name`*:: ++ +-- +Cloud instance/machine name + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`cloud.machine.type`*:: ++ +-- +Cloud instance/machine type + +type: keyword + +example: t2.medium + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`cloud.project.id`*:: ++ +-- +Cloud project ID + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.project.name`*:: ++ +-- +Cloud project name + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.provider`*:: ++ +-- +Cloud provider name + +type: keyword + +example: gcp + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.region`*:: ++ +-- +Cloud region name + +type: keyword + +example: us-east1 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`cloud.service.name`*:: ++ +-- +Cloud service name, intended to distinguish services running on different platforms within a provider. + + +type: keyword + +-- + + +*`event.outcome`*:: ++ +-- +`event.outcome` simply denotes whether the event represents a success or a failure from the perspective of the entity that produced the event. + + +type: keyword + +example: success + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[[exported-fields-apm-transaction-metrics]] +== APM Transaction Metrics fields + +APM transaction metrics, and transaction metrics-specific properties, such as transaction.root. + + + +*`processor.name`*:: ++ +-- +Processor name. + +type: keyword + +-- + +*`processor.event`*:: ++ +-- +Processor event. + +type: keyword + +-- + +*`timeseries.instance`*:: ++ +-- +Time series instance ID + +type: keyword + +-- + + +*`timestamp.us`*:: ++ +-- +Timestamp of the event in microseconds since Unix epoch. + + +type: long + +-- + +*`labels`*:: ++ +-- +A flat mapping of user-defined labels with string, boolean or number values. + + +type: object + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`metricset.name`*:: ++ +-- +Name of the set of metrics. + + +type: keyword + +example: transaction + +-- + +[float] +=== service + +Service fields. + + + +*`service.name`*:: ++ +-- +Immutable name of the service emitting this event. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`service.version`*:: ++ +-- +Version of the service emitting this event. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`service.environment`*:: ++ +-- +Service environment. + + +type: keyword + +-- + + +*`service.node.name`*:: ++ +-- +Unique meaningful name of the service node. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`service.language.name`*:: ++ +-- +Name of the programming language used. + + +type: keyword + +-- + +*`service.language.version`*:: ++ +-- +Version of the programming language used. + + +type: keyword + +-- + + +*`service.runtime.name`*:: ++ +-- +Name of the runtime used. + + +type: keyword + +-- + +*`service.runtime.version`*:: ++ +-- +Version of the runtime used. + + +type: keyword + +-- + + +*`service.framework.name`*:: ++ +-- +Name of the framework used. + + +type: keyword + +-- + +*`service.framework.version`*:: ++ +-- +Version of the framework used. + + +type: keyword + +-- + + +*`transaction.id`*:: ++ +-- +The transaction ID. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`transaction.sampled`*:: ++ +-- +Transactions that are 'sampled' will include all available information. Transactions that are not sampled will not have spans or context. + + +type: boolean + +-- + +*`transaction.type`*:: ++ +-- +Keyword of specific relevance in the service's domain (eg. 'request', 'backgroundjob', etc) + + +type: keyword + +-- + +*`transaction.name`*:: ++ +-- +Generic designation of a transaction in the scope of a single service (eg. 'GET /users/:id'). + + +type: keyword + +-- + +*`transaction.name.text`*:: ++ +-- +type: text + +-- + + +*`transaction.duration.count`*:: ++ +-- +Number of aggregated transactions. + +type: long + +-- + + +*`transaction.duration.sum.us`*:: ++ +-- +Aggregated transaction duration, in microseconds. + +type: long + +-- + +[float] +=== self_time + +Portion of the transaction's duration where no direct child was running + + + +*`transaction.self_time.count`*:: ++ +-- +Number of aggregated transactions. + +type: long + +-- + + +*`transaction.self_time.sum.us`*:: ++ +-- +Aggregated transaction duration, excluding the time periods where a direct child was running, in microseconds. + + +type: long + +-- + + +*`transaction.breakdown.count`*:: ++ +-- +Counter for collected breakdowns for the transaction + + +type: long + +-- + +*`transaction.root`*:: ++ +-- +Identifies metrics for root transactions. This can be used for calculating metrics for traces. + + +type: boolean + +-- + +*`transaction.result`*:: ++ +-- +The result of the transaction. HTTP status code for HTTP-related transactions. + + +type: keyword + +-- + + +*`span.type`*:: ++ +-- +Keyword of specific relevance in the service's domain (eg: 'db.postgresql.query', 'template.erb', 'cache', etc). + + +type: keyword + +-- + +*`span.subtype`*:: ++ +-- +A further sub-division of the type (e.g. postgresql, elasticsearch) + + +type: keyword + +-- + +[float] +=== self_time + +Portion of the span's duration where no direct child was running + + + +*`span.self_time.count`*:: ++ +-- +Number of aggregated spans. + +type: long + +-- + + +*`span.self_time.sum.us`*:: ++ +-- +Aggregated span duration, excluding the time periods where a direct child was running, in microseconds. + + +type: long + +-- + + +[float] +=== service + +Destination service context + + +*`span.destination.service.resource`*:: ++ +-- +Identifier for the destination service resource being operated on (e.g. 'http://elastic.co:80', 'elasticsearch', 'rabbitmq/queue_name') + + +type: keyword + +-- + + +*`agent.name`*:: ++ +-- +Name of the agent used. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`agent.version`*:: ++ +-- +Version of the agent used. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`agent.ephemeral_id`*:: ++ +-- +The Ephemeral ID identifies a running process. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== container + +Container fields are used for meta information about the specific container that is the source of information. These fields help correlate data based containers from any runtime. + + + +*`container.id`*:: ++ +-- +Unique container id. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== kubernetes + +Kubernetes metadata reported by agents + + + +*`kubernetes.namespace`*:: ++ +-- +Kubernetes namespace + + +type: keyword + +-- + + +*`kubernetes.node.name`*:: ++ +-- +Kubernetes node name + + +type: keyword + +-- + + +*`kubernetes.pod.name`*:: ++ +-- +Kubernetes pod name + + +type: keyword + +-- + +*`kubernetes.pod.uid`*:: ++ +-- +Kubernetes Pod UID + + +type: keyword + +-- + +[float] +=== network + +Optional network fields + + + +[float] +=== connection + +Network connection details + + + +*`network.connection.type`*:: ++ +-- +Network connection type, eg. "wifi", "cell" + + +type: keyword + +-- + +*`network.connection.subtype`*:: ++ +-- +Detailed network connection sub-type, e.g. "LTE", "CDMA" + + +type: keyword + +-- + +[float] +=== carrier + +Network operator + + + +*`network.carrier.name`*:: ++ +-- +Carrier name, eg. Vodafone, T-Mobile, etc. + + +type: keyword + +-- + +*`network.carrier.mcc`*:: ++ +-- +Mobile country code + + +type: keyword + +-- + +*`network.carrier.mnc`*:: ++ +-- +Mobile network code + + +type: keyword + +-- + +*`network.carrier.icc`*:: ++ +-- +ISO country code, eg. US + + +type: keyword + +-- + +[float] +=== host + +Optional host fields. + + + +*`host.architecture`*:: ++ +-- +The architecture of the host the event was recorded on. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.hostname`*:: ++ +-- +The hostname of the host the event was recorded on. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.name`*:: ++ +-- +Name of the host the event was recorded on. It can contain same information as host.hostname or a name specified by the user. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.ip`*:: ++ +-- +IP of the host that records the event. + + +type: ip + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== os + +The OS fields contain information about the operating system. + + + +*`host.os.platform`*:: ++ +-- +The platform of the host the event was recorded on. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== process + +Information pertaining to the running process where the data was collected + + + +*`process.args`*:: ++ +-- +Process arguments. May be filtered to protect sensitive information. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.pid`*:: ++ +-- +Numeric process ID of the service process. + + +type: long + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.ppid`*:: ++ +-- +Numeric ID of the service's parent process. + + +type: long + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.title`*:: ++ +-- +Service process title. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`observer.listening`*:: ++ +-- +Address the server is listening on. + + +type: keyword + +-- + +*`observer.hostname`*:: ++ +-- +Hostname of the APM Server. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.version`*:: ++ +-- +APM Server version. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.version_major`*:: ++ +-- +Major version number of the observer + + +type: byte + +-- + +*`observer.type`*:: ++ +-- +The type will be set to `apm-server`. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.id`*:: ++ +-- +Unique identifier of the APM Server. + + +type: keyword + +-- + +*`observer.ephemeral_id`*:: ++ +-- +Ephemeral identifier of the APM Server. + + +type: keyword + +-- + + +*`user.name`*:: ++ +-- +The username of the logged in user. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user.id`*:: ++ +-- +Identifier of the logged in user. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user.email`*:: ++ +-- +Email of the logged in user. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`client.domain`*:: ++ +-- +Client domain. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.ip`*:: ++ +-- +IP address of the client of a recorded event. This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + + +type: ip + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.port`*:: ++ +-- +Port of the client. + + +type: long + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`source.domain`*:: ++ +-- +Source domain. + + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`source.ip`*:: ++ +-- +IP address of the source of a recorded event. This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + + +type: ip + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`source.port`*:: ++ +-- +Port of the source. + + +type: long + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== destination + +Destination fields describe details about the destination of a packet/event. +Destination fields are usually populated in conjunction with source fields. + + +*`destination.address`*:: ++ +-- +Some event destination addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.ip`*:: ++ +-- +IP addess of the destination. Can be one of multiple IPv4 or IPv6 addresses. + +type: ip + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.port`*:: ++ +-- +Port of the destination. + +type: long + +format: string + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== user_agent + +The user_agent fields normally come from a browser request. They often show up in web service logs coming from the parsed user agent string. + + + +*`user_agent.original`*:: ++ +-- +Unparsed version of the user_agent. + + +type: keyword + +example: Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.original.text`*:: ++ +-- +Software agent acting in behalf of a user, eg. a web browser / OS combination. + + +type: text + +-- + +*`user_agent.name`*:: ++ +-- +Name of the user agent. + + +type: keyword + +example: Safari + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.version`*:: ++ +-- +Version of the user agent. + + +type: keyword + +example: 12.0 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== device + +Information concerning the device. + + + +*`user_agent.device.name`*:: ++ +-- +Name of the device. + + +type: keyword + +example: iPhone + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== os + +The OS fields contain information about the operating system. + + + +*`user_agent.os.platform`*:: ++ +-- +Operating system platform (such centos, ubuntu, windows). + + +type: keyword + +example: darwin + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.os.name`*:: ++ +-- +Operating system name, without the version. + + +type: keyword + +example: Mac OS X + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.os.full`*:: ++ +-- +Operating system name, including the version or code name. + + +type: keyword + +example: Mac OS Mojave + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.os.family`*:: ++ +-- +OS family (such as redhat, debian, freebsd, windows). + + +type: keyword + +example: debian + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.os.version`*:: ++ +-- +Operating system version as a raw string. + + +type: keyword + +example: 10.14.1 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`user_agent.os.kernel`*:: ++ +-- +Operating system kernel version as a raw string. + + +type: keyword + +example: 4.4.0-112-generic + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== cloud + +Cloud metadata reported by agents + + + + +*`cloud.account.id`*:: ++ +-- +Cloud account ID + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.account.name`*:: ++ +-- +Cloud account name + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.availability_zone`*:: ++ +-- +Cloud availability zone name + +type: keyword + +example: us-east1-a + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`cloud.instance.id`*:: ++ +-- +Cloud instance/machine ID + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.instance.name`*:: ++ +-- +Cloud instance/machine name + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`cloud.machine.type`*:: ++ +-- +Cloud instance/machine type + +type: keyword + +example: t2.medium + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`cloud.project.id`*:: ++ +-- +Cloud project ID + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.project.name`*:: ++ +-- +Cloud project name + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.provider`*:: ++ +-- +Cloud provider name + +type: keyword + +example: gcp + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.region`*:: ++ +-- +Cloud region name + +type: keyword + +example: us-east1 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + + +*`cloud.service.name`*:: ++ +-- +Cloud service name, intended to distinguish services running on different platforms within a provider. + + +type: keyword + +-- + + +*`event.outcome`*:: ++ +-- +`event.outcome` simply denotes whether the event represents a success or a failure from the perspective of the entity that produced the event. + + +type: keyword + +example: success + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[[exported-fields-apm-transaction-metrics-xpack]] +== APM Transaction Metrics fields + +APM transaction metrics, and transaction metrics-specific properties, requiring licensed features such as the histogram field type. + + + + + +*`transaction.duration.histogram`*:: ++ +-- +Pre-aggregated histogram of transaction durations. + + +type: histogram + +-- + +[[exported-fields-beat-common]] +== Beat fields + +Contains common beat fields available in all event types. + + + +*`agent.hostname`*:: ++ +-- +Deprecated - use agent.name or agent.id to identify an agent. + + +type: alias + +alias to: agent.name + +-- + +*`beat.timezone`*:: ++ +-- +type: alias + +alias to: event.timezone + +-- + +*`fields`*:: ++ +-- +Contains user configurable fields. + + +type: object + +-- + +*`beat.name`*:: ++ +-- +type: alias + +alias to: host.name + +-- + +*`beat.hostname`*:: ++ +-- +type: alias + +alias to: agent.name + +-- + +*`timeseries.instance`*:: ++ +-- +Time series instance id + +type: keyword + +-- + +[[exported-fields-cloud]] +== Cloud provider metadata fields + +Metadata from cloud providers added by the add_cloud_metadata processor. + + + +*`cloud.image.id`*:: ++ +-- +Image ID for the cloud instance. + + +example: ami-abcd1234 + +-- + +*`meta.cloud.provider`*:: ++ +-- +type: alias + +alias to: cloud.provider + +-- + +*`meta.cloud.instance_id`*:: ++ +-- +type: alias + +alias to: cloud.instance.id + +-- + +*`meta.cloud.instance_name`*:: ++ +-- +type: alias + +alias to: cloud.instance.name + +-- + +*`meta.cloud.machine_type`*:: ++ +-- +type: alias + +alias to: cloud.machine.type + +-- + +*`meta.cloud.availability_zone`*:: ++ +-- +type: alias + +alias to: cloud.availability_zone + +-- + +*`meta.cloud.project_id`*:: ++ +-- +type: alias + +alias to: cloud.project.id + +-- + +*`meta.cloud.region`*:: ++ +-- +type: alias + +alias to: cloud.region + +-- + +[[exported-fields-docker-processor]] +== Docker fields + +Docker stats collected from Docker. + + + + +*`docker.container.id`*:: ++ +-- +type: alias + +alias to: container.id + +-- + +*`docker.container.image`*:: ++ +-- +type: alias + +alias to: container.image.name + +-- + +*`docker.container.name`*:: ++ +-- +type: alias + +alias to: container.name + +-- + +*`docker.container.labels`*:: ++ +-- +Image labels. + + +type: object + +-- + +[[exported-fields-ecs]] +== ECS fields + + +This section defines Elastic Common Schema (ECS) fields—a common set of fields +to be used when storing event data in {es}. + +This is an exhaustive list, and fields listed here are not necessarily used by {beatname_uc}. +The goal of ECS is to enable and encourage users of {es} to normalize their event data, +so that they can better analyze, visualize, and correlate the data represented in their events. + +See the {ecs-ref}[ECS reference] for more information. + +*`@timestamp`*:: ++ +-- +Date/time when the event originated. +This is the date/time extracted from the event, typically representing when the event was generated by the source. +If the event source has no original timestamp, this value is typically populated by the first time the event was received by the pipeline. +Required field for all events. + +type: date + +example: 2016-05-23T08:05:34.853Z + +required: True + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`labels`*:: ++ +-- +Custom key/value pairs. +Can be used to add meta information to events. Should not contain nested objects. All values are stored as keyword. +Example: `docker` and `k8s` labels. + +type: object + +example: {"application": "foo-bar", "env": "production"} + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`message`*:: ++ +-- +For log events the message field contains the log message, optimized for viewing in a log viewer. +For structured logs without an original message field, other fields can be concatenated to form a human-readable summary of the event. +If multiple messages exist, they can be combined into one message. + +type: text + +example: Hello World + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`tags`*:: ++ +-- +List of keywords used to tag each event. + +type: keyword + +example: ["production", "env2"] + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== agent + +The agent fields contain the data about the software entity, if any, that collects, detects, or observes events on a host, or takes measurements on a host. +Examples include Beats. Agents may also run on observers. ECS agent.* fields shall be populated with details of the agent running on the host or observer where the event happened or the measurement was taken. + + +*`agent.build.original`*:: ++ +-- +Extended build information for the agent. +This field is intended to contain any build information that a data source may provide, no specific formatting is required. + +type: keyword + +example: metricbeat version 7.6.0 (amd64), libbeat 7.6.0 [6a23e8f8f30f5001ba344e4e54d8d9cb82cb107c built 2020-02-05 23:10:10 +0000 UTC] + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`agent.ephemeral_id`*:: ++ +-- +Ephemeral identifier of this agent (if one exists). +This id normally changes across restarts, but `agent.id` does not. + +type: keyword + +example: 8a4f500f + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`agent.id`*:: ++ +-- +Unique identifier of this agent (if one exists). +Example: For Beats this would be beat.id. + +type: keyword + +example: 8a4f500d + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`agent.name`*:: ++ +-- +Custom name of the agent. +This is a name that can be given to an agent. This can be helpful if for example two Filebeat instances are running on the same host but a human readable separation is needed on which Filebeat instance data is coming from. +If no name is given, the name is often left empty. + +type: keyword + +example: foo + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`agent.type`*:: ++ +-- +Type of the agent. +The agent type always stays the same and should be given by the agent used. In case of Filebeat the agent would always be Filebeat also if two Filebeat instances are run on the same machine. + +type: keyword + +example: filebeat + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`agent.version`*:: ++ +-- +Version of the agent. + +type: keyword + +example: 6.0.0-rc2 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== as + +An autonomous system (AS) is a collection of connected Internet Protocol (IP) routing prefixes under the control of one or more network operators on behalf of a single administrative entity or domain that presents a common, clearly defined routing policy to the internet. + + +*`as.number`*:: ++ +-- +Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. + +type: long + +example: 15169 + +-- + +*`as.organization.name`*:: ++ +-- +Organization name. + +type: keyword + +example: Google LLC + +-- + +*`as.organization.name.text`*:: ++ +-- +type: text + +-- + +[float] +=== client + +A client is defined as the initiator of a network connection for events regarding sessions, connections, or bidirectional flow records. +For TCP events, the client is the initiator of the TCP connection that sends the SYN packet(s). For other protocols, the client is generally the initiator or requestor in the network transaction. Some systems use the term "originator" to refer the client in TCP connections. The client fields describe details about the system acting as the client in the network event. Client fields are usually populated in conjunction with server fields. Client fields are generally not populated for packet-level events. +Client / server representations can add semantic context to an exchange, which is helpful to visualize the data in certain situations. If your context falls in that category, you should still ensure that source and destination are filled appropriately. + + +*`client.address`*:: ++ +-- +Some event client addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. +Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.as.number`*:: ++ +-- +Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. + +type: long + +example: 15169 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.as.organization.name`*:: ++ +-- +Organization name. + +type: keyword + +example: Google LLC + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.as.organization.name.text`*:: ++ +-- +type: text + +-- + +*`client.bytes`*:: ++ +-- +Bytes sent from the client to the server. + +type: long + +example: 184 + +format: bytes + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.domain`*:: ++ +-- +Client domain. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.geo.city_name`*:: ++ +-- +City name. + +type: keyword + +example: Montreal + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.geo.continent_code`*:: ++ +-- +Two-letter code representing continent's name. + +type: keyword + +example: NA + +-- + +*`client.geo.continent_name`*:: ++ +-- +Name of the continent. + +type: keyword + +example: North America + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.geo.country_iso_code`*:: ++ +-- +Country ISO code. + +type: keyword + +example: CA + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.geo.country_name`*:: ++ +-- +Country name. + +type: keyword + +example: Canada + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.geo.location`*:: ++ +-- +Longitude and latitude. + +type: geo_point + +example: { "lon": -73.614830, "lat": 45.505918 } + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.geo.name`*:: ++ +-- +User-defined description of a location, at the level of granularity they care about. +Could be the name of their data centers, the floor number, if this describes a local physical entity, city names. +Not typically used in automated geolocation. + +type: keyword + +example: boston-dc + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.geo.postal_code`*:: ++ +-- +Postal code associated with the location. +Values appropriate for this field may also be known as a postcode or ZIP code and will vary widely from country to country. + +type: keyword + +example: 94040 + +-- + +*`client.geo.region_iso_code`*:: ++ +-- +Region ISO code. + +type: keyword + +example: CA-QC + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.geo.region_name`*:: ++ +-- +Region name. + +type: keyword + +example: Quebec + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.geo.timezone`*:: ++ +-- +The time zone of the location, such as IANA time zone name. + +type: keyword + +example: America/Argentina/Buenos_Aires + +-- + +*`client.ip`*:: ++ +-- +IP address of the client (IPv4 or IPv6). + +type: ip + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.mac`*:: ++ +-- +MAC address of the client. +The notation format from RFC 7042 is suggested: Each octet (that is, 8-bit byte) is represented by two [uppercase] hexadecimal digits giving the value of the octet as an unsigned integer. Successive octets are separated by a hyphen. + +type: keyword + +example: 00-00-5E-00-53-23 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.nat.ip`*:: ++ +-- +Translated IP of source based NAT sessions (e.g. internal client to internet). +Typically connections traversing load balancers, firewalls, or routers. + +type: ip + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.nat.port`*:: ++ +-- +Translated port of source based NAT sessions (e.g. internal client to internet). +Typically connections traversing load balancers, firewalls, or routers. + +type: long + +format: string + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.packets`*:: ++ +-- +Packets sent from the client to the server. + +type: long + +example: 12 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.port`*:: ++ +-- +Port of the client. + +type: long + +format: string + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.registered_domain`*:: ++ +-- +The highest registered client domain, stripped of the subdomain. +For example, the registered domain for "foo.example.com" is "example.com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". + +type: keyword + +example: example.com + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.subdomain`*:: ++ +-- +The subdomain portion of a fully qualified domain name includes all of the names except the host name under the registered_domain. In a partially qualified domain, or if the the qualification level of the full name cannot be determined, subdomain contains all of the names below the registered domain. +For example the subdomain portion of "www.east.mydomain.co.uk" is "east". If the domain has multiple levels of subdomain, such as "sub2.sub1.example.com", the subdomain field should contain "sub2.sub1", with no trailing period. + +type: keyword + +example: east + +-- + +*`client.top_level_domain`*:: ++ +-- +The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for example.com is "com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last label will not work well for effective TLDs such as "co.uk". + +type: keyword + +example: co.uk + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.user.domain`*:: ++ +-- +Name of the directory the user is a member of. +For example, an LDAP or Active Directory domain name. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.user.email`*:: ++ +-- +User email address. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.user.full_name`*:: ++ +-- +User's full name, if available. + +type: keyword + +example: Albert Einstein + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.user.full_name.text`*:: ++ +-- +type: text + +-- + +*`client.user.group.domain`*:: ++ +-- +Name of the directory the group is a member of. +For example, an LDAP or Active Directory domain name. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.user.group.id`*:: ++ +-- +Unique identifier for the group on the system/platform. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.user.group.name`*:: ++ +-- +Name of the group. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.user.hash`*:: ++ +-- +Unique user hash to correlate information for a user in anonymized form. +Useful if `user.id` or `user.name` contain confidential information and cannot be used. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.user.id`*:: ++ +-- +Unique identifier of the user. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.user.name`*:: ++ +-- +Short name or login of the user. + +type: keyword + +example: albert + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`client.user.name.text`*:: ++ +-- +type: text + +-- + +*`client.user.roles`*:: ++ +-- +Array of user roles at the time of the event. + +type: keyword + +example: ["kibana_admin", "reporting_user"] + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== cloud + +Fields related to the cloud or infrastructure the events are coming from. + + +*`cloud.account.id`*:: ++ +-- +The cloud account or organization id used to identify different entities in a multi-tenant environment. +Examples: AWS account id, Google Cloud ORG Id, or other unique identifier. + +type: keyword + +example: 666777888999 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.account.name`*:: ++ +-- +The cloud account name or alias used to identify different entities in a multi-tenant environment. +Examples: AWS account name, Google Cloud ORG display name. + +type: keyword + +example: elastic-dev + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.availability_zone`*:: ++ +-- +Availability zone in which this host, resource, or service is located. + +type: keyword + +example: us-east-1c + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.instance.id`*:: ++ +-- +Instance ID of the host machine. + +type: keyword + +example: i-1234567890abcdef0 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.instance.name`*:: ++ +-- +Instance name of the host machine. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.machine.type`*:: ++ +-- +Machine type of the host machine. + +type: keyword + +example: t2.medium + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.project.id`*:: ++ +-- +The cloud project identifier. +Examples: Google Cloud Project id, Azure Project id. + +type: keyword + +example: my-project + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.project.name`*:: ++ +-- +The cloud project name. +Examples: Google Cloud Project name, Azure Project name. + +type: keyword + +example: my project + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.provider`*:: ++ +-- +Name of the cloud provider. Example values are aws, azure, gcp, or digitalocean. + +type: keyword + +example: aws + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.region`*:: ++ +-- +Region in which this host, resource, or service is located. + +type: keyword + +example: us-east-1 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`cloud.service.name`*:: ++ +-- +The cloud service name is intended to distinguish services running on different platforms within a provider, eg AWS EC2 vs Lambda, GCP GCE vs App Engine, Azure VM vs App Server. +Examples: app engine, app service, cloud run, fargate, lambda. + +type: keyword + +example: lambda + +-- + +[float] +=== code_signature + +These fields contain information about binary code signatures. + + +*`code_signature.exists`*:: ++ +-- +Boolean to capture if a signature is present. + +type: boolean + +example: true + +-- + +*`code_signature.signing_id`*:: ++ +-- +The identifier used to sign the process. +This is used to identify the application manufactured by a software vendor. The field is relevant to Apple *OS only. + +type: keyword + +example: com.apple.xpc.proxy + +-- + +*`code_signature.status`*:: ++ +-- +Additional information about the certificate status. +This is useful for logging cryptographic errors with the certificate validity or trust status. Leave unpopulated if the validity or trust of the certificate was unchecked. + +type: keyword + +example: ERROR_UNTRUSTED_ROOT + +-- + +*`code_signature.subject_name`*:: ++ +-- +Subject name of the code signer + +type: keyword + +example: Microsoft Corporation + +-- + +*`code_signature.team_id`*:: ++ +-- +The team identifier used to sign the process. +This is used to identify the team or vendor of a software product. The field is relevant to Apple *OS only. + +type: keyword + +example: EQHXZ8M8AV + +-- + +*`code_signature.trusted`*:: ++ +-- +Stores the trust status of the certificate chain. +Validating the trust of the certificate chain may be complicated, and this field should only be populated by tools that actively check the status. + +type: boolean + +example: true + +-- + +*`code_signature.valid`*:: ++ +-- +Boolean to capture if the digital signature is verified against the binary content. +Leave unpopulated if a certificate was unchecked. + +type: boolean + +example: true + +-- + +[float] +=== container + +Container fields are used for meta information about the specific container that is the source of information. +These fields help correlate data based containers from any runtime. + + +*`container.id`*:: ++ +-- +Unique container id. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`container.image.name`*:: ++ +-- +Name of the image the container was built on. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`container.image.tag`*:: ++ +-- +Container image tags. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`container.labels`*:: ++ +-- +Image labels. + +type: object + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`container.name`*:: ++ +-- +Container name. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`container.runtime`*:: ++ +-- +Runtime managing this container. + +type: keyword + +example: docker + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== data_stream + +The data_stream fields take part in defining the new data stream naming scheme. +In the new data stream naming scheme the value of the data stream fields combine to the name of the actual data stream in the following manner: `{data_stream.type}-{data_stream.dataset}-{data_stream.namespace}`. This means the fields can only contain characters that are valid as part of names of data streams. More details about this can be found in this https://www.elastic.co/blog/an-introduction-to-the-elastic-data-stream-naming-scheme[blog post]. +An Elasticsearch data stream consists of one or more backing indices, and a data stream name forms part of the backing indices names. Due to this convention, data streams must also follow index naming restrictions. For example, data stream names cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, ` ` (space character), `,`, or `#`. Please see the Elasticsearch reference for additional https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html#indices-create-api-path-params[restrictions]. + + +*`data_stream.dataset`*:: ++ +-- +The field can contain anything that makes sense to signify the source of the data. +Examples include `nginx.access`, `prometheus`, `endpoint` etc. For data streams that otherwise fit, but that do not have dataset set we use the value "generic" for the dataset value. `event.dataset` should have the same value as `data_stream.dataset`. +Beyond the Elasticsearch data stream naming criteria noted above, the `dataset` value has additional restrictions: + * Must not contain `-` + * No longer than 100 characters + +type: constant_keyword + +example: nginx.access + +-- + +*`data_stream.namespace`*:: ++ +-- +A user defined namespace. Namespaces are useful to allow grouping of data. +Many users already organize their indices this way, and the data stream naming scheme now provides this best practice as a default. Many users will populate this field with `default`. If no value is used, it falls back to `default`. +Beyond the Elasticsearch index naming criteria noted above, `namespace` value has the additional restrictions: + * Must not contain `-` + * No longer than 100 characters + +type: constant_keyword + +example: production + +-- + +*`data_stream.type`*:: ++ +-- +An overarching type for the data stream. +Currently allowed values are "logs" and "metrics". We expect to also add "traces" and "synthetics" in the near future. + +type: constant_keyword + +example: logs + +-- + +[float] +=== destination + +Destination fields capture details about the receiver of a network exchange/packet. These fields are populated from a network event, packet, or other event containing details of a network transaction. +Destination fields are usually populated in conjunction with source fields. The source and destination fields are considered the baseline and should always be filled if an event contains source and destination details from a network transaction. If the event also contains identification of the client and server roles, then the client and server fields should also be populated. + + +*`destination.address`*:: ++ +-- +Some event destination addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. +Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.as.number`*:: ++ +-- +Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. + +type: long + +example: 15169 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.as.organization.name`*:: ++ +-- +Organization name. + +type: keyword + +example: Google LLC + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.as.organization.name.text`*:: ++ +-- +type: text + +-- + +*`destination.bytes`*:: ++ +-- +Bytes sent from the destination to the source. + +type: long + +example: 184 + +format: bytes + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.domain`*:: ++ +-- +Destination domain. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.geo.city_name`*:: ++ +-- +City name. + +type: keyword + +example: Montreal + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.geo.continent_code`*:: ++ +-- +Two-letter code representing continent's name. + +type: keyword + +example: NA + +-- + +*`destination.geo.continent_name`*:: ++ +-- +Name of the continent. + +type: keyword + +example: North America + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.geo.country_iso_code`*:: ++ +-- +Country ISO code. + +type: keyword + +example: CA + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.geo.country_name`*:: ++ +-- +Country name. + +type: keyword + +example: Canada + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.geo.location`*:: ++ +-- +Longitude and latitude. + +type: geo_point + +example: { "lon": -73.614830, "lat": 45.505918 } + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.geo.name`*:: ++ +-- +User-defined description of a location, at the level of granularity they care about. +Could be the name of their data centers, the floor number, if this describes a local physical entity, city names. +Not typically used in automated geolocation. + +type: keyword + +example: boston-dc + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.geo.postal_code`*:: ++ +-- +Postal code associated with the location. +Values appropriate for this field may also be known as a postcode or ZIP code and will vary widely from country to country. + +type: keyword + +example: 94040 + +-- + +*`destination.geo.region_iso_code`*:: ++ +-- +Region ISO code. + +type: keyword + +example: CA-QC + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.geo.region_name`*:: ++ +-- +Region name. + +type: keyword + +example: Quebec + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.geo.timezone`*:: ++ +-- +The time zone of the location, such as IANA time zone name. + +type: keyword + +example: America/Argentina/Buenos_Aires + +-- + +*`destination.ip`*:: ++ +-- +IP address of the destination (IPv4 or IPv6). + +type: ip + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.mac`*:: ++ +-- +MAC address of the destination. +The notation format from RFC 7042 is suggested: Each octet (that is, 8-bit byte) is represented by two [uppercase] hexadecimal digits giving the value of the octet as an unsigned integer. Successive octets are separated by a hyphen. + +type: keyword + +example: 00-00-5E-00-53-23 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.nat.ip`*:: ++ +-- +Translated ip of destination based NAT sessions (e.g. internet to private DMZ) +Typically used with load balancers, firewalls, or routers. + +type: ip + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.nat.port`*:: ++ +-- +Port the source session is translated to by NAT Device. +Typically used with load balancers, firewalls, or routers. + +type: long + +format: string + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.packets`*:: ++ +-- +Packets sent from the destination to the source. + +type: long + +example: 12 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.port`*:: ++ +-- +Port of the destination. + +type: long + +format: string + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.registered_domain`*:: ++ +-- +The highest registered destination domain, stripped of the subdomain. +For example, the registered domain for "foo.example.com" is "example.com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". + +type: keyword + +example: example.com + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.subdomain`*:: ++ +-- +The subdomain portion of a fully qualified domain name includes all of the names except the host name under the registered_domain. In a partially qualified domain, or if the the qualification level of the full name cannot be determined, subdomain contains all of the names below the registered domain. +For example the subdomain portion of "www.east.mydomain.co.uk" is "east". If the domain has multiple levels of subdomain, such as "sub2.sub1.example.com", the subdomain field should contain "sub2.sub1", with no trailing period. + +type: keyword + +example: east + +-- + +*`destination.top_level_domain`*:: ++ +-- +The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for example.com is "com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last label will not work well for effective TLDs such as "co.uk". + +type: keyword + +example: co.uk + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.user.domain`*:: ++ +-- +Name of the directory the user is a member of. +For example, an LDAP or Active Directory domain name. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.user.email`*:: ++ +-- +User email address. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.user.full_name`*:: ++ +-- +User's full name, if available. + +type: keyword + +example: Albert Einstein + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.user.full_name.text`*:: ++ +-- +type: text + +-- + +*`destination.user.group.domain`*:: ++ +-- +Name of the directory the group is a member of. +For example, an LDAP or Active Directory domain name. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.user.group.id`*:: ++ +-- +Unique identifier for the group on the system/platform. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.user.group.name`*:: ++ +-- +Name of the group. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.user.hash`*:: ++ +-- +Unique user hash to correlate information for a user in anonymized form. +Useful if `user.id` or `user.name` contain confidential information and cannot be used. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.user.id`*:: ++ +-- +Unique identifier of the user. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.user.name`*:: ++ +-- +Short name or login of the user. + +type: keyword + +example: albert + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`destination.user.name.text`*:: ++ +-- +type: text + +-- + +*`destination.user.roles`*:: ++ +-- +Array of user roles at the time of the event. + +type: keyword + +example: ["kibana_admin", "reporting_user"] + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== dll + +These fields contain information about code libraries dynamically loaded into processes. + +Many operating systems refer to "shared code libraries" with different names, but this field set refers to all of the following: +* Dynamic-link library (`.dll`) commonly used on Windows +* Shared Object (`.so`) commonly used on Unix-like operating systems +* Dynamic library (`.dylib`) commonly used on macOS + + +*`dll.code_signature.exists`*:: ++ +-- +Boolean to capture if a signature is present. + +type: boolean + +example: true + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dll.code_signature.signing_id`*:: ++ +-- +The identifier used to sign the process. +This is used to identify the application manufactured by a software vendor. The field is relevant to Apple *OS only. + +type: keyword + +example: com.apple.xpc.proxy + +-- + +*`dll.code_signature.status`*:: ++ +-- +Additional information about the certificate status. +This is useful for logging cryptographic errors with the certificate validity or trust status. Leave unpopulated if the validity or trust of the certificate was unchecked. + +type: keyword + +example: ERROR_UNTRUSTED_ROOT + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dll.code_signature.subject_name`*:: ++ +-- +Subject name of the code signer + +type: keyword + +example: Microsoft Corporation + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dll.code_signature.team_id`*:: ++ +-- +The team identifier used to sign the process. +This is used to identify the team or vendor of a software product. The field is relevant to Apple *OS only. + +type: keyword + +example: EQHXZ8M8AV + +-- + +*`dll.code_signature.trusted`*:: ++ +-- +Stores the trust status of the certificate chain. +Validating the trust of the certificate chain may be complicated, and this field should only be populated by tools that actively check the status. + +type: boolean + +example: true + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dll.code_signature.valid`*:: ++ +-- +Boolean to capture if the digital signature is verified against the binary content. +Leave unpopulated if a certificate was unchecked. + +type: boolean + +example: true + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dll.hash.md5`*:: ++ +-- +MD5 hash. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dll.hash.sha1`*:: ++ +-- +SHA1 hash. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dll.hash.sha256`*:: ++ +-- +SHA256 hash. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dll.hash.sha512`*:: ++ +-- +SHA512 hash. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dll.hash.ssdeep`*:: ++ +-- +SSDEEP hash. + +type: keyword + +-- + +*`dll.name`*:: ++ +-- +Name of the library. +This generally maps to the name of the file on disk. + +type: keyword + +example: kernel32.dll + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dll.path`*:: ++ +-- +Full file path of the library. + +type: keyword + +example: C:\Windows\System32\kernel32.dll + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dll.pe.architecture`*:: ++ +-- +CPU architecture target for the file. + +type: keyword + +example: x64 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dll.pe.company`*:: ++ +-- +Internal company name of the file, provided at compile-time. + +type: keyword + +example: Microsoft Corporation + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dll.pe.description`*:: ++ +-- +Internal description of the file, provided at compile-time. + +type: keyword + +example: Paint + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dll.pe.file_version`*:: ++ +-- +Internal version of the file, provided at compile-time. + +type: keyword + +example: 6.3.9600.17415 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dll.pe.imphash`*:: ++ +-- +A hash of the imports in a PE file. An imphash -- or import hash -- can be used to fingerprint binaries even after recompilation or other code-level transformations have occurred, which would change more traditional hash values. +Learn more at https://www.fireeye.com/blog/threat-research/2014/01/tracking-malware-import-hashing.html. + +type: keyword + +example: 0c6803c4e922103c4dca5963aad36ddf + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dll.pe.original_file_name`*:: ++ +-- +Internal name of the file, provided at compile-time. + +type: keyword + +example: MSPAINT.EXE + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dll.pe.product`*:: ++ +-- +Internal product name of the file, provided at compile-time. + +type: keyword + +example: Microsoft® Windows® Operating System + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== dns + +Fields describing DNS queries and answers. +DNS events should either represent a single DNS query prior to getting answers (`dns.type:query`) or they should represent a full exchange and contain the query details as well as all of the answers that were provided for this query (`dns.type:answer`). + + +*`dns.answers`*:: ++ +-- +An array containing an object for each answer section returned by the server. +The main keys that should be present in these objects are defined by ECS. Records that have more information may contain more keys than what ECS defines. +Not all DNS data sources give all details about DNS answers. At minimum, answer objects must contain the `data` key. If more information is available, map as much of it to ECS as possible, and add any additional fields to the answer objects as custom fields. + +type: object + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dns.answers.class`*:: ++ +-- +The class of DNS data contained in this resource record. + +type: keyword + +example: IN + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dns.answers.data`*:: ++ +-- +The data describing the resource. +The meaning of this data depends on the type and class of the resource record. + +type: keyword + +example: 10.10.10.10 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dns.answers.name`*:: ++ +-- +The domain name to which this resource record pertains. +If a chain of CNAME is being resolved, each answer's `name` should be the one that corresponds with the answer's `data`. It should not simply be the original `question.name` repeated. + +type: keyword + +example: www.example.com + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dns.answers.ttl`*:: ++ +-- +The time interval in seconds that this resource record may be cached before it should be discarded. Zero values mean that the data should not be cached. + +type: long + +example: 180 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dns.answers.type`*:: ++ +-- +The type of data contained in this resource record. + +type: keyword + +example: CNAME + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dns.header_flags`*:: ++ +-- +Array of 2 letter DNS header flags. +Expected values are: AA, TC, RD, RA, AD, CD, DO. + +type: keyword + +example: ["RD", "RA"] + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dns.id`*:: ++ +-- +The DNS packet identifier assigned by the program that generated the query. The identifier is copied to the response. + +type: keyword + +example: 62111 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dns.op_code`*:: ++ +-- +The DNS operation code that specifies the kind of query in the message. This value is set by the originator of a query and copied into the response. + +type: keyword + +example: QUERY + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dns.question.class`*:: ++ +-- +The class of records being queried. + +type: keyword + +example: IN + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dns.question.name`*:: ++ +-- +The name being queried. +If the name field contains non-printable characters (below 32 or above 126), those characters should be represented as escaped base 10 integers (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, and line feeds should be converted to \t, \r, and \n respectively. + +type: keyword + +example: www.example.com + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dns.question.registered_domain`*:: ++ +-- +The highest registered domain, stripped of the subdomain. +For example, the registered domain for "foo.example.com" is "example.com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". + +type: keyword + +example: example.com + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dns.question.subdomain`*:: ++ +-- +The subdomain is all of the labels under the registered_domain. +If the domain has multiple levels of subdomain, such as "sub2.sub1.example.com", the subdomain field should contain "sub2.sub1", with no trailing period. + +type: keyword + +example: www + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dns.question.top_level_domain`*:: ++ +-- +The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for example.com is "com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last label will not work well for effective TLDs such as "co.uk". + +type: keyword + +example: co.uk + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dns.question.type`*:: ++ +-- +The type of record being queried. + +type: keyword + +example: AAAA + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dns.resolved_ip`*:: ++ +-- +Array containing all IPs seen in `answers.data`. +The `answers` array can be difficult to use, because of the variety of data formats it can contain. Extracting all IP addresses seen in there to `dns.resolved_ip` makes it possible to index them as IP addresses, and makes them easier to visualize and query for. + +type: ip + +example: ["10.10.10.10", "10.10.10.11"] + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dns.response_code`*:: ++ +-- +The DNS response code. + +type: keyword + +example: NOERROR + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`dns.type`*:: ++ +-- +The type of DNS event captured, query or answer. +If your source of DNS events only gives you DNS queries, you should only create dns events of type `dns.type:query`. +If your source of DNS events gives you answers as well, you should create one event per query (optionally as soon as the query is seen). And a second event containing all query details as well as an array of answers. + +type: keyword + +example: answer + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== ecs + +Meta-information specific to ECS. + + +*`ecs.version`*:: ++ +-- +ECS version this event conforms to. `ecs.version` is a required field and must exist in all events. +When querying across multiple indices -- which may conform to slightly different ECS versions -- this field lets integrations adjust to the schema version of the events. + +type: keyword + +example: 1.0.0 + +required: True + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== elf + +These fields contain Linux Executable Linkable Format (ELF) metadata. + + +*`elf.architecture`*:: ++ +-- +Machine architecture of the ELF file. + +type: keyword + +example: x86-64 + +-- + +*`elf.byte_order`*:: ++ +-- +Byte sequence of ELF file. + +type: keyword + +example: Little Endian + +-- + +*`elf.cpu_type`*:: ++ +-- +CPU type of the ELF file. + +type: keyword + +example: Intel + +-- + +*`elf.creation_date`*:: ++ +-- +Extracted when possible from the file's metadata. Indicates when it was built or compiled. It can also be faked by malware creators. + +type: date + +-- + +*`elf.exports`*:: ++ +-- +List of exported element names and types. + +type: flattened + +-- + +*`elf.header.abi_version`*:: ++ +-- +Version of the ELF Application Binary Interface (ABI). + +type: keyword + +-- + +*`elf.header.class`*:: ++ +-- +Header class of the ELF file. + +type: keyword + +-- + +*`elf.header.data`*:: ++ +-- +Data table of the ELF header. + +type: keyword + +-- + +*`elf.header.entrypoint`*:: ++ +-- +Header entrypoint of the ELF file. + +type: long + +format: string + +-- + +*`elf.header.object_version`*:: ++ +-- +"0x1" for original ELF files. + +type: keyword + +-- + +*`elf.header.os_abi`*:: ++ +-- +Application Binary Interface (ABI) of the Linux OS. + +type: keyword + +-- + +*`elf.header.type`*:: ++ +-- +Header type of the ELF file. + +type: keyword + +-- + +*`elf.header.version`*:: ++ +-- +Version of the ELF header. + +type: keyword + +-- + +*`elf.imports`*:: ++ +-- +List of imported element names and types. + +type: flattened + +-- + +*`elf.sections`*:: ++ +-- +An array containing an object for each section of the ELF file. +The keys that should be present in these objects are defined by sub-fields underneath `elf.sections.*`. + +type: nested + +-- + +*`elf.sections.chi2`*:: ++ +-- +Chi-square probability distribution of the section. + +type: long + +format: number + +-- + +*`elf.sections.entropy`*:: ++ +-- +Shannon entropy calculation from the section. + +type: long + +format: number + +-- + +*`elf.sections.flags`*:: ++ +-- +ELF Section List flags. + +type: keyword + +-- + +*`elf.sections.name`*:: ++ +-- +ELF Section List name. + +type: keyword + +-- + +*`elf.sections.physical_offset`*:: ++ +-- +ELF Section List offset. + +type: keyword + +-- + +*`elf.sections.physical_size`*:: ++ +-- +ELF Section List physical size. + +type: long + +format: bytes + +-- + +*`elf.sections.type`*:: ++ +-- +ELF Section List type. + +type: keyword + +-- + +*`elf.sections.virtual_address`*:: ++ +-- +ELF Section List virtual address. + +type: long + +format: string + +-- + +*`elf.sections.virtual_size`*:: ++ +-- +ELF Section List virtual size. + +type: long + +format: string + +-- + +*`elf.segments`*:: ++ +-- +An array containing an object for each segment of the ELF file. +The keys that should be present in these objects are defined by sub-fields underneath `elf.segments.*`. + +type: nested + +-- + +*`elf.segments.sections`*:: ++ +-- +ELF object segment sections. + +type: keyword + +-- + +*`elf.segments.type`*:: ++ +-- +ELF object segment type. + +type: keyword + +-- + +*`elf.shared_libraries`*:: ++ +-- +List of shared libraries used by this ELF object. + +type: keyword + +-- + +*`elf.telfhash`*:: ++ +-- +telfhash symbol hash for ELF file. + +type: keyword + +-- + +[float] +=== error + +These fields can represent errors of any kind. +Use them for errors that happen while fetching events or in cases where the event itself contains an error. + + +*`error.code`*:: ++ +-- +Error code describing the error. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`error.id`*:: ++ +-- +Unique identifier for the error. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`error.message`*:: ++ +-- +Error message. + +type: text + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`error.stack_trace`*:: ++ +-- +The stack trace of this error in plain text. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +Field is not indexed. + +-- + +*`error.stack_trace.text`*:: ++ +-- +type: text + +-- + +*`error.type`*:: ++ +-- +The type of the error, for example the class name of the exception. + +type: keyword + +example: java.lang.NullPointerException + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== event + +The event fields are used for context information about the log or metric event itself. +A log is defined as an event containing details of something that happened. Log events must include the time at which the thing happened. Examples of log events include a process starting on a host, a network packet being sent from a source to a destination, or a network connection between a client and a server being initiated or closed. A metric is defined as an event containing one or more numerical measurements and the time at which the measurement was taken. Examples of metric events include memory pressure measured on a host and device temperature. See the `event.kind` definition in this section for additional details about metric and state events. + + +*`event.action`*:: ++ +-- +The action captured by the event. +This describes the information in the event. It is more specific than `event.category`. Examples are `group-add`, `process-started`, `file-created`. The value is normally defined by the implementer. + +type: keyword + +example: user-password-change + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`event.agent_id_status`*:: ++ +-- +Agents are normally responsible for populating the `agent.id` field value. If the system receiving events is capable of validating the value based on authentication information for the client then this field can be used to reflect the outcome of that validation. +For example if the agent's connection is authenticated with mTLS and the client cert contains the ID of the agent to which the cert was issued then the `agent.id` value in events can be checked against the certificate. If the values match then `event.agent_id_status: verified` is added to the event, otherwise one of the other allowed values should be used. +If no validation is performed then the field should be omitted. +The allowed values are: +`verified` - The `agent.id` field value matches expected value obtained from auth metadata. +`mismatch` - The `agent.id` field value does not match the expected value obtained from auth metadata. +`missing` - There was no `agent.id` field in the event to validate. +`auth_metadata_missing` - There was no auth metadata or it was missing information about the agent ID. + +type: keyword + +example: verified + +-- + +*`event.category`*:: ++ +-- +This is one of four ECS Categorization Fields, and indicates the second level in the ECS category hierarchy. +`event.category` represents the "big buckets" of ECS categories. For example, filtering on `event.category:process` yields all events relating to process activity. This field is closely related to `event.type`, which is used as a subcategory. +This field is an array. This will allow proper categorization of some events that fall in multiple categories. + +type: keyword + +example: authentication + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`event.code`*:: ++ +-- +Identification code for this event, if one exists. +Some event sources use event codes to identify messages unambiguously, regardless of message language or wording adjustments over time. An example of this is the Windows Event ID. + +type: keyword + +example: 4648 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`event.created`*:: ++ +-- +event.created contains the date/time when the event was first read by an agent, or by your pipeline. +This field is distinct from @timestamp in that @timestamp typically contain the time extracted from the original event. +In most situations, these two timestamps will be slightly different. The difference can be used to calculate the delay between your source generating an event, and the time when your agent first processed it. This can be used to monitor your agent's or pipeline's ability to keep up with your event source. +In case the two timestamps are identical, @timestamp should be used. + +type: date + +example: 2016-05-23T08:05:34.857Z + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`event.dataset`*:: ++ +-- +Name of the dataset. +If an event source publishes more than one type of log or events (e.g. access log, error log), the dataset is used to specify which one the event comes from. +It's recommended but not required to start the dataset name with the module name, followed by a dot, then the dataset name. + +type: keyword + +example: apache.access + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`event.duration`*:: ++ +-- +Duration of the event in nanoseconds. +If event.start and event.end are known this value should be the difference between the end and start time. + +type: long + +format: duration + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`event.end`*:: ++ +-- +event.end contains the date when the event ended or when the activity was last observed. + +type: date + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`event.hash`*:: ++ +-- +Hash (perhaps logstash fingerprint) of raw field to be able to demonstrate log integrity. + +type: keyword + +example: 123456789012345678901234567890ABCD + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`event.id`*:: ++ +-- +Unique ID to describe the event. + +type: keyword + +example: 8a4f500d + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`event.ingested`*:: ++ +-- +Timestamp when an event arrived in the central data store. +This is different from `@timestamp`, which is when the event originally occurred. It's also different from `event.created`, which is meant to capture the first time an agent saw the event. +In normal conditions, assuming no tampering, the timestamps should chronologically look like this: `@timestamp` < `event.created` < `event.ingested`. + +type: date + +example: 2016-05-23T08:05:35.101Z + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`event.kind`*:: ++ +-- +This is one of four ECS Categorization Fields, and indicates the highest level in the ECS category hierarchy. +`event.kind` gives high-level information about what type of information the event contains, without being specific to the contents of the event. For example, values of this field distinguish alert events from metric events. +The value of this field can be used to inform how these kinds of events should be handled. They may warrant different retention, different access control, it may also help understand whether the data coming in at a regular interval or not. + +type: keyword + +example: alert + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`event.module`*:: ++ +-- +Name of the module this data is coming from. +If your monitoring agent supports the concept of modules or plugins to process events of a given source (e.g. Apache logs), `event.module` should contain the name of this module. + +type: keyword + +example: apache + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`event.original`*:: ++ +-- +Raw text message of entire event. Used to demonstrate log integrity or where the full log message (before splitting it up in multiple parts) may be required, e.g. for reindex. +This field is not indexed and doc_values are disabled. It cannot be searched, but it can be retrieved from `_source`. If users wish to override this and index this field, please see `Field data types` in the `Elasticsearch Reference`. + +type: keyword + +example: Sep 19 08:26:10 host CEF:0|Security| threatmanager|1.0|100| worm successfully stopped|10|src=10.0.0.1 dst=2.1.2.2spt=1232 + +{yes-icon} {ecs-ref}[ECS] field. + +Field is not indexed. + +-- + +*`event.outcome`*:: ++ +-- +This is one of four ECS Categorization Fields, and indicates the lowest level in the ECS category hierarchy. +`event.outcome` simply denotes whether the event represents a success or a failure from the perspective of the entity that produced the event. +Note that when a single transaction is described in multiple events, each event may populate different values of `event.outcome`, according to their perspective. +Also note that in the case of a compound event (a single event that contains multiple logical events), this field should be populated with the value that best captures the overall success or failure from the perspective of the event producer. +Further note that not all events will have an associated outcome. For example, this field is generally not populated for metric events, events with `event.type:info`, or any events for which an outcome does not make logical sense. + +type: keyword + +example: success + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`event.provider`*:: ++ +-- +Source of the event. +Event transports such as Syslog or the Windows Event Log typically mention the source of an event. It can be the name of the software that generated the event (e.g. Sysmon, httpd), or of a subsystem of the operating system (kernel, Microsoft-Windows-Security-Auditing). + +type: keyword + +example: kernel + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`event.reason`*:: ++ +-- +Reason why this event happened, according to the source. +This describes the why of a particular action or outcome captured in the event. Where `event.action` captures the action from the event, `event.reason` describes why that action was taken. For example, a web proxy with an `event.action` which denied the request may also populate `event.reason` with the reason why (e.g. `blocked site`). + +type: keyword + +example: Terminated an unexpected process + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`event.reference`*:: ++ +-- +Reference URL linking to additional information about this event. +This URL links to a static definition of this event. Alert events, indicated by `event.kind:alert`, are a common use case for this field. + +type: keyword + +example: https://system.example.com/event/#0001234 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`event.risk_score`*:: ++ +-- +Risk score or priority of the event (e.g. security solutions). Use your system's original value here. + +type: float + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`event.risk_score_norm`*:: ++ +-- +Normalized risk score or priority of the event, on a scale of 0 to 100. +This is mainly useful if you use more than one system that assigns risk scores, and you want to see a normalized value across all systems. + +type: float + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`event.sequence`*:: ++ +-- +Sequence number of the event. +The sequence number is a value published by some event sources, to make the exact ordering of events unambiguous, regardless of the timestamp precision. + +type: long + +format: string + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`event.severity`*:: ++ +-- +The numeric severity of the event according to your event source. +What the different severity values mean can be different between sources and use cases. It's up to the implementer to make sure severities are consistent across events from the same source. +The Syslog severity belongs in `log.syslog.severity.code`. `event.severity` is meant to represent the severity according to the event source (e.g. firewall, IDS). If the event source does not publish its own severity, you may optionally copy the `log.syslog.severity.code` to `event.severity`. + +type: long + +example: 7 + +format: string + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`event.start`*:: ++ +-- +event.start contains the date when the event started or when the activity was first observed. + +type: date + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`event.timezone`*:: ++ +-- +This field should be populated when the event's timestamp does not include timezone information already (e.g. default Syslog timestamps). It's optional otherwise. +Acceptable timezone formats are: a canonical ID (e.g. "Europe/Amsterdam"), abbreviated (e.g. "EST") or an HH:mm differential (e.g. "-05:00"). + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`event.type`*:: ++ +-- +This is one of four ECS Categorization Fields, and indicates the third level in the ECS category hierarchy. +`event.type` represents a categorization "sub-bucket" that, when used along with the `event.category` field values, enables filtering events down to a level appropriate for single visualization. +This field is an array. This will allow proper categorization of some events that fall in multiple event types. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`event.url`*:: ++ +-- +URL linking to an external system to continue investigation of this event. +This URL links to another system where in-depth investigation of the specific occurrence of this event can take place. Alert events, indicated by `event.kind:alert`, are a common use case for this field. + +type: keyword + +example: https://mysystem.example.com/alert/5271dedb-f5b0-4218-87f0-4ac4870a38fe + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== file + +A file is defined as a set of information that has been created on, or has existed on a filesystem. +File objects can be associated with host events, network events, and/or file events (e.g., those produced by File Integrity Monitoring [FIM] products or services). File fields provide details about the affected file associated with the event or metric. + + +*`file.accessed`*:: ++ +-- +Last time the file was accessed. +Note that not all filesystems keep track of access time. + +type: date + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.attributes`*:: ++ +-- +Array of file attributes. +Attributes names will vary by platform. Here's a non-exhaustive list of values that are expected in this field: archive, compressed, directory, encrypted, execute, hidden, read, readonly, system, write. + +type: keyword + +example: ["readonly", "system"] + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.code_signature.exists`*:: ++ +-- +Boolean to capture if a signature is present. + +type: boolean + +example: true + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.code_signature.signing_id`*:: ++ +-- +The identifier used to sign the process. +This is used to identify the application manufactured by a software vendor. The field is relevant to Apple *OS only. + +type: keyword + +example: com.apple.xpc.proxy + +-- + +*`file.code_signature.status`*:: ++ +-- +Additional information about the certificate status. +This is useful for logging cryptographic errors with the certificate validity or trust status. Leave unpopulated if the validity or trust of the certificate was unchecked. + +type: keyword + +example: ERROR_UNTRUSTED_ROOT + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.code_signature.subject_name`*:: ++ +-- +Subject name of the code signer + +type: keyword + +example: Microsoft Corporation + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.code_signature.team_id`*:: ++ +-- +The team identifier used to sign the process. +This is used to identify the team or vendor of a software product. The field is relevant to Apple *OS only. + +type: keyword + +example: EQHXZ8M8AV + +-- + +*`file.code_signature.trusted`*:: ++ +-- +Stores the trust status of the certificate chain. +Validating the trust of the certificate chain may be complicated, and this field should only be populated by tools that actively check the status. + +type: boolean + +example: true + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.code_signature.valid`*:: ++ +-- +Boolean to capture if the digital signature is verified against the binary content. +Leave unpopulated if a certificate was unchecked. + +type: boolean + +example: true + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.created`*:: ++ +-- +File creation time. +Note that not all filesystems store the creation time. + +type: date + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.ctime`*:: ++ +-- +Last time the file attributes or metadata changed. +Note that changes to the file content will update `mtime`. This implies `ctime` will be adjusted at the same time, since `mtime` is an attribute of the file. + +type: date + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.device`*:: ++ +-- +Device that is the source of the file. + +type: keyword + +example: sda + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.directory`*:: ++ +-- +Directory where the file is located. It should include the drive letter, when appropriate. + +type: keyword + +example: /home/alice + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.drive_letter`*:: ++ +-- +Drive letter where the file is located. This field is only relevant on Windows. +The value should be uppercase, and not include the colon. + +type: keyword + +example: C + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.elf.architecture`*:: ++ +-- +Machine architecture of the ELF file. + +type: keyword + +example: x86-64 + +-- + +*`file.elf.byte_order`*:: ++ +-- +Byte sequence of ELF file. + +type: keyword + +example: Little Endian + +-- + +*`file.elf.cpu_type`*:: ++ +-- +CPU type of the ELF file. + +type: keyword + +example: Intel + +-- + +*`file.elf.creation_date`*:: ++ +-- +Extracted when possible from the file's metadata. Indicates when it was built or compiled. It can also be faked by malware creators. + +type: date + +-- + +*`file.elf.exports`*:: ++ +-- +List of exported element names and types. + +type: flattened + +-- + +*`file.elf.header.abi_version`*:: ++ +-- +Version of the ELF Application Binary Interface (ABI). + +type: keyword + +-- + +*`file.elf.header.class`*:: ++ +-- +Header class of the ELF file. + +type: keyword + +-- + +*`file.elf.header.data`*:: ++ +-- +Data table of the ELF header. + +type: keyword + +-- + +*`file.elf.header.entrypoint`*:: ++ +-- +Header entrypoint of the ELF file. + +type: long + +format: string + +-- + +*`file.elf.header.object_version`*:: ++ +-- +"0x1" for original ELF files. + +type: keyword + +-- + +*`file.elf.header.os_abi`*:: ++ +-- +Application Binary Interface (ABI) of the Linux OS. + +type: keyword + +-- + +*`file.elf.header.type`*:: ++ +-- +Header type of the ELF file. + +type: keyword + +-- + +*`file.elf.header.version`*:: ++ +-- +Version of the ELF header. + +type: keyword + +-- + +*`file.elf.imports`*:: ++ +-- +List of imported element names and types. + +type: flattened + +-- + +*`file.elf.sections`*:: ++ +-- +An array containing an object for each section of the ELF file. +The keys that should be present in these objects are defined by sub-fields underneath `elf.sections.*`. + +type: nested + +-- + +*`file.elf.sections.chi2`*:: ++ +-- +Chi-square probability distribution of the section. + +type: long + +format: number + +-- + +*`file.elf.sections.entropy`*:: ++ +-- +Shannon entropy calculation from the section. + +type: long + +format: number + +-- + +*`file.elf.sections.flags`*:: ++ +-- +ELF Section List flags. + +type: keyword + +-- + +*`file.elf.sections.name`*:: ++ +-- +ELF Section List name. + +type: keyword + +-- + +*`file.elf.sections.physical_offset`*:: ++ +-- +ELF Section List offset. + +type: keyword + +-- + +*`file.elf.sections.physical_size`*:: ++ +-- +ELF Section List physical size. + +type: long + +format: bytes + +-- + +*`file.elf.sections.type`*:: ++ +-- +ELF Section List type. + +type: keyword + +-- + +*`file.elf.sections.virtual_address`*:: ++ +-- +ELF Section List virtual address. + +type: long + +format: string + +-- + +*`file.elf.sections.virtual_size`*:: ++ +-- +ELF Section List virtual size. + +type: long + +format: string + +-- + +*`file.elf.segments`*:: ++ +-- +An array containing an object for each segment of the ELF file. +The keys that should be present in these objects are defined by sub-fields underneath `elf.segments.*`. + +type: nested + +-- + +*`file.elf.segments.sections`*:: ++ +-- +ELF object segment sections. + +type: keyword + +-- + +*`file.elf.segments.type`*:: ++ +-- +ELF object segment type. + +type: keyword + +-- + +*`file.elf.shared_libraries`*:: ++ +-- +List of shared libraries used by this ELF object. + +type: keyword + +-- + +*`file.elf.telfhash`*:: ++ +-- +telfhash symbol hash for ELF file. + +type: keyword + +-- + +*`file.extension`*:: ++ +-- +File extension, excluding the leading dot. +Note that when the file name has multiple extensions (example.tar.gz), only the last one should be captured ("gz", not "tar.gz"). + +type: keyword + +example: png + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.gid`*:: ++ +-- +Primary group ID (GID) of the file. + +type: keyword + +example: 1001 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.group`*:: ++ +-- +Primary group name of the file. + +type: keyword + +example: alice + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.hash.md5`*:: ++ +-- +MD5 hash. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.hash.sha1`*:: ++ +-- +SHA1 hash. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.hash.sha256`*:: ++ +-- +SHA256 hash. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.hash.sha512`*:: ++ +-- +SHA512 hash. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.hash.ssdeep`*:: ++ +-- +SSDEEP hash. + +type: keyword + +-- + +*`file.inode`*:: ++ +-- +Inode representing the file in the filesystem. + +type: keyword + +example: 256383 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.mime_type`*:: ++ +-- +MIME type should identify the format of the file or stream of bytes using https://www.iana.org/assignments/media-types/media-types.xhtml[IANA official types], where possible. When more than one type is applicable, the most specific type should be used. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.mode`*:: ++ +-- +Mode of the file in octal representation. + +type: keyword + +example: 0640 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.mtime`*:: ++ +-- +Last time the file content was modified. + +type: date + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.name`*:: ++ +-- +Name of the file including the extension, without the directory. + +type: keyword + +example: example.png + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.owner`*:: ++ +-- +File owner's username. + +type: keyword + +example: alice + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.path`*:: ++ +-- +Full path to the file, including the file name. It should include the drive letter, when appropriate. + +type: keyword + +example: /home/alice/example.png + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.path.text`*:: ++ +-- +type: text + +-- + +*`file.pe.architecture`*:: ++ +-- +CPU architecture target for the file. + +type: keyword + +example: x64 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.pe.company`*:: ++ +-- +Internal company name of the file, provided at compile-time. + +type: keyword + +example: Microsoft Corporation + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.pe.description`*:: ++ +-- +Internal description of the file, provided at compile-time. + +type: keyword + +example: Paint + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.pe.file_version`*:: ++ +-- +Internal version of the file, provided at compile-time. + +type: keyword + +example: 6.3.9600.17415 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.pe.imphash`*:: ++ +-- +A hash of the imports in a PE file. An imphash -- or import hash -- can be used to fingerprint binaries even after recompilation or other code-level transformations have occurred, which would change more traditional hash values. +Learn more at https://www.fireeye.com/blog/threat-research/2014/01/tracking-malware-import-hashing.html. + +type: keyword + +example: 0c6803c4e922103c4dca5963aad36ddf + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.pe.original_file_name`*:: ++ +-- +Internal name of the file, provided at compile-time. + +type: keyword + +example: MSPAINT.EXE + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.pe.product`*:: ++ +-- +Internal product name of the file, provided at compile-time. + +type: keyword + +example: Microsoft® Windows® Operating System + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.size`*:: ++ +-- +File size in bytes. +Only relevant when `file.type` is "file". + +type: long + +example: 16384 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.target_path`*:: ++ +-- +Target path for symlinks. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.target_path.text`*:: ++ +-- +type: text + +-- + +*`file.type`*:: ++ +-- +File type (file, dir, or symlink). + +type: keyword + +example: file + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.uid`*:: ++ +-- +The user ID (UID) or security identifier (SID) of the file owner. + +type: keyword + +example: 1001 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.x509.alternative_names`*:: ++ +-- +List of subject alternative names (SAN). Name types vary by certificate authority and certificate type but commonly contain IP addresses, DNS names (and wildcards), and email addresses. + +type: keyword + +example: *.elastic.co + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.x509.issuer.common_name`*:: ++ +-- +List of common name (CN) of issuing certificate authority. + +type: keyword + +example: Example SHA2 High Assurance Server CA + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.x509.issuer.country`*:: ++ +-- +List of country (C) codes + +type: keyword + +example: US + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.x509.issuer.distinguished_name`*:: ++ +-- +Distinguished name (DN) of issuing certificate authority. + +type: keyword + +example: C=US, O=Example Inc, OU=www.example.com, CN=Example SHA2 High Assurance Server CA + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.x509.issuer.locality`*:: ++ +-- +List of locality names (L) + +type: keyword + +example: Mountain View + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.x509.issuer.organization`*:: ++ +-- +List of organizations (O) of issuing certificate authority. + +type: keyword + +example: Example Inc + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.x509.issuer.organizational_unit`*:: ++ +-- +List of organizational units (OU) of issuing certificate authority. + +type: keyword + +example: www.example.com + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.x509.issuer.state_or_province`*:: ++ +-- +List of state or province names (ST, S, or P) + +type: keyword + +example: California + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.x509.not_after`*:: ++ +-- +Time at which the certificate is no longer considered valid. + +type: date + +example: 2020-07-16 03:15:39+00:00 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.x509.not_before`*:: ++ +-- +Time at which the certificate is first considered valid. + +type: date + +example: 2019-08-16 01:40:25+00:00 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.x509.public_key_algorithm`*:: ++ +-- +Algorithm used to generate the public key. + +type: keyword + +example: RSA + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.x509.public_key_curve`*:: ++ +-- +The curve used by the elliptic curve public key algorithm. This is algorithm specific. + +type: keyword + +example: nistp521 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.x509.public_key_exponent`*:: ++ +-- +Exponent used to derive the public key. This is algorithm specific. + +type: long + +example: 65537 + +{yes-icon} {ecs-ref}[ECS] field. + +Field is not indexed. + +-- + +*`file.x509.public_key_size`*:: ++ +-- +The size of the public key space in bits. + +type: long + +example: 2048 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.x509.serial_number`*:: ++ +-- +Unique serial number issued by the certificate authority. For consistency, if this value is alphanumeric, it should be formatted without colons and uppercase characters. + +type: keyword + +example: 55FBB9C7DEBF09809D12CCAA + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.x509.signature_algorithm`*:: ++ +-- +Identifier for certificate signature algorithm. We recommend using names found in Go Lang Crypto library. See https://github.com/golang/go/blob/go1.14/src/crypto/x509/x509.go#L337-L353. + +type: keyword + +example: SHA256-RSA + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.x509.subject.common_name`*:: ++ +-- +List of common names (CN) of subject. + +type: keyword + +example: shared.global.example.net + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.x509.subject.country`*:: ++ +-- +List of country (C) code + +type: keyword + +example: US + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.x509.subject.distinguished_name`*:: ++ +-- +Distinguished name (DN) of the certificate subject entity. + +type: keyword + +example: C=US, ST=California, L=San Francisco, O=Example, Inc., CN=shared.global.example.net + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.x509.subject.locality`*:: ++ +-- +List of locality names (L) + +type: keyword + +example: San Francisco + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.x509.subject.organization`*:: ++ +-- +List of organizations (O) of subject. + +type: keyword + +example: Example, Inc. + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.x509.subject.organizational_unit`*:: ++ +-- +List of organizational units (OU) of subject. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.x509.subject.state_or_province`*:: ++ +-- +List of state or province names (ST, S, or P) + +type: keyword + +example: California + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`file.x509.version_number`*:: ++ +-- +Version of x509 format. + +type: keyword + +example: 3 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== geo + +Geo fields can carry data about a specific location related to an event. +This geolocation information can be derived from techniques such as Geo IP, or be user-supplied. + + +*`geo.city_name`*:: ++ +-- +City name. + +type: keyword + +example: Montreal + +-- + +*`geo.continent_code`*:: ++ +-- +Two-letter code representing continent's name. + +type: keyword + +example: NA + +-- + +*`geo.continent_name`*:: ++ +-- +Name of the continent. + +type: keyword + +example: North America + +-- + +*`geo.country_iso_code`*:: ++ +-- +Country ISO code. + +type: keyword + +example: CA + +-- + +*`geo.country_name`*:: ++ +-- +Country name. + +type: keyword + +example: Canada + +-- + +*`geo.location`*:: ++ +-- +Longitude and latitude. + +type: geo_point + +example: { "lon": -73.614830, "lat": 45.505918 } + +-- + +*`geo.name`*:: ++ +-- +User-defined description of a location, at the level of granularity they care about. +Could be the name of their data centers, the floor number, if this describes a local physical entity, city names. +Not typically used in automated geolocation. + +type: keyword + +example: boston-dc + +-- + +*`geo.postal_code`*:: ++ +-- +Postal code associated with the location. +Values appropriate for this field may also be known as a postcode or ZIP code and will vary widely from country to country. + +type: keyword + +example: 94040 + +-- + +*`geo.region_iso_code`*:: ++ +-- +Region ISO code. + +type: keyword + +example: CA-QC + +-- + +*`geo.region_name`*:: ++ +-- +Region name. + +type: keyword + +example: Quebec + +-- + +*`geo.timezone`*:: ++ +-- +The time zone of the location, such as IANA time zone name. + +type: keyword + +example: America/Argentina/Buenos_Aires + +-- + +[float] +=== group + +The group fields are meant to represent groups that are relevant to the event. + + +*`group.domain`*:: ++ +-- +Name of the directory the group is a member of. +For example, an LDAP or Active Directory domain name. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`group.id`*:: ++ +-- +Unique identifier for the group on the system/platform. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`group.name`*:: ++ +-- +Name of the group. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== hash + +The hash fields represent different bitwise hash algorithms and their values. +Field names for common hashes (e.g. MD5, SHA1) are predefined. Add fields for other hashes by lowercasing the hash algorithm name and using underscore separators as appropriate (snake case, e.g. sha3_512). +Note that this fieldset is used for common hashes that may be computed over a range of generic bytes. Entity-specific hashes such as ja3 or imphash are placed in the fieldsets to which they relate (tls and pe, respectively). + + +*`hash.md5`*:: ++ +-- +MD5 hash. + +type: keyword + +-- + +*`hash.sha1`*:: ++ +-- +SHA1 hash. + +type: keyword + +-- + +*`hash.sha256`*:: ++ +-- +SHA256 hash. + +type: keyword + +-- + +*`hash.sha512`*:: ++ +-- +SHA512 hash. + +type: keyword + +-- + +*`hash.ssdeep`*:: ++ +-- +SSDEEP hash. + +type: keyword + +-- + +[float] +=== host + +A host is defined as a general computing instance. +ECS host.* fields should be populated with details about the host on which the event happened, or from which the measurement was taken. Host types include hardware, virtual machines, Docker containers, and Kubernetes nodes. + + +*`host.architecture`*:: ++ +-- +Operating system architecture. + +type: keyword + +example: x86_64 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.cpu.usage`*:: ++ +-- +Percent CPU used which is normalized by the number of CPU cores and it ranges from 0 to 1. +Scaling factor: 1000. +For example: For a two core host, this value should be the average of the two cores, between 0 and 1. + +type: scaled_float + +-- + +*`host.disk.read.bytes`*:: ++ +-- +The total number of bytes (gauge) read successfully (aggregated from all disks) since the last metric collection. + +type: long + +-- + +*`host.disk.write.bytes`*:: ++ +-- +The total number of bytes (gauge) written successfully (aggregated from all disks) since the last metric collection. + +type: long + +-- + +*`host.domain`*:: ++ +-- +Name of the domain of which the host is a member. +For example, on Windows this could be the host's Active Directory domain or NetBIOS domain name. For Linux this could be the domain of the host's LDAP provider. + +type: keyword + +example: CONTOSO + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.geo.city_name`*:: ++ +-- +City name. + +type: keyword + +example: Montreal + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.geo.continent_code`*:: ++ +-- +Two-letter code representing continent's name. + +type: keyword + +example: NA + +-- + +*`host.geo.continent_name`*:: ++ +-- +Name of the continent. + +type: keyword + +example: North America + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.geo.country_iso_code`*:: ++ +-- +Country ISO code. + +type: keyword + +example: CA + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.geo.country_name`*:: ++ +-- +Country name. + +type: keyword + +example: Canada + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.geo.location`*:: ++ +-- +Longitude and latitude. + +type: geo_point + +example: { "lon": -73.614830, "lat": 45.505918 } + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.geo.name`*:: ++ +-- +User-defined description of a location, at the level of granularity they care about. +Could be the name of their data centers, the floor number, if this describes a local physical entity, city names. +Not typically used in automated geolocation. + +type: keyword + +example: boston-dc + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.geo.postal_code`*:: ++ +-- +Postal code associated with the location. +Values appropriate for this field may also be known as a postcode or ZIP code and will vary widely from country to country. + +type: keyword + +example: 94040 + +-- + +*`host.geo.region_iso_code`*:: ++ +-- +Region ISO code. + +type: keyword + +example: CA-QC + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.geo.region_name`*:: ++ +-- +Region name. + +type: keyword + +example: Quebec + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.geo.timezone`*:: ++ +-- +The time zone of the location, such as IANA time zone name. + +type: keyword + +example: America/Argentina/Buenos_Aires + +-- + +*`host.hostname`*:: ++ +-- +Hostname of the host. +It normally contains what the `hostname` command returns on the host machine. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.id`*:: ++ +-- +Unique host id. +As hostname is not always unique, use values that are meaningful in your environment. +Example: The current usage of `beat.name`. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.ip`*:: ++ +-- +Host ip addresses. + +type: ip + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.mac`*:: ++ +-- +Host MAC addresses. +The notation format from RFC 7042 is suggested: Each octet (that is, 8-bit byte) is represented by two [uppercase] hexadecimal digits giving the value of the octet as an unsigned integer. Successive octets are separated by a hyphen. + +type: keyword + +example: ["00-00-5E-00-53-23", "00-00-5E-00-53-24"] + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.name`*:: ++ +-- +Name of the host. +It can contain what `hostname` returns on Unix systems, the fully qualified domain name, or a name specified by the user. The sender decides which value to use. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.network.egress.bytes`*:: ++ +-- +The number of bytes (gauge) sent out on all network interfaces by the host since the last metric collection. + +type: long + +-- + +*`host.network.egress.packets`*:: ++ +-- +The number of packets (gauge) sent out on all network interfaces by the host since the last metric collection. + +type: long + +-- + +*`host.network.ingress.bytes`*:: ++ +-- +The number of bytes received (gauge) on all network interfaces by the host since the last metric collection. + +type: long + +-- + +*`host.network.ingress.packets`*:: ++ +-- +The number of packets (gauge) received on all network interfaces by the host since the last metric collection. + +type: long + +-- + +*`host.os.family`*:: ++ +-- +OS family (such as redhat, debian, freebsd, windows). + +type: keyword + +example: debian + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.os.full`*:: ++ +-- +Operating system name, including the version or code name. + +type: keyword + +example: Mac OS Mojave + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.os.full.text`*:: ++ +-- +type: text + +-- + +*`host.os.kernel`*:: ++ +-- +Operating system kernel version as a raw string. + +type: keyword + +example: 4.4.0-112-generic + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.os.name`*:: ++ +-- +Operating system name, without the version. + +type: keyword + +example: Mac OS X + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.os.name.text`*:: ++ +-- +type: text + +-- + +*`host.os.platform`*:: ++ +-- +Operating system platform (such centos, ubuntu, windows). + +type: keyword + +example: darwin + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.os.type`*:: ++ +-- +Use the `os.type` field to categorize the operating system into one of the broad commercial families. +One of these following values should be used (lowercase): linux, macos, unix, windows. +If the OS you're dealing with is not in the list, the field should not be populated. Please let us know by opening an issue with ECS, to propose its addition. + +type: keyword + +example: macos + +-- + +*`host.os.version`*:: ++ +-- +Operating system version as a raw string. + +type: keyword + +example: 10.14.1 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.type`*:: ++ +-- +Type of host. +For Cloud providers this can be the machine type like `t2.medium`. If vm, this could be the container, for example, or other information meaningful in your environment. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.uptime`*:: ++ +-- +Seconds the host has been up. + +type: long + +example: 1325 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.user.domain`*:: ++ +-- +Name of the directory the user is a member of. +For example, an LDAP or Active Directory domain name. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.user.email`*:: ++ +-- +User email address. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.user.full_name`*:: ++ +-- +User's full name, if available. + +type: keyword + +example: Albert Einstein + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.user.full_name.text`*:: ++ +-- +type: text + +-- + +*`host.user.group.domain`*:: ++ +-- +Name of the directory the group is a member of. +For example, an LDAP or Active Directory domain name. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.user.group.id`*:: ++ +-- +Unique identifier for the group on the system/platform. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.user.group.name`*:: ++ +-- +Name of the group. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.user.hash`*:: ++ +-- +Unique user hash to correlate information for a user in anonymized form. +Useful if `user.id` or `user.name` contain confidential information and cannot be used. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.user.id`*:: ++ +-- +Unique identifier of the user. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.user.name`*:: ++ +-- +Short name or login of the user. + +type: keyword + +example: albert + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`host.user.name.text`*:: ++ +-- +type: text + +-- + +*`host.user.roles`*:: ++ +-- +Array of user roles at the time of the event. + +type: keyword + +example: ["kibana_admin", "reporting_user"] + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== http + +Fields related to HTTP activity. Use the `url` field set to store the url of the request. + + +*`http.request.body.bytes`*:: ++ +-- +Size in bytes of the request body. + +type: long + +example: 887 + +format: bytes + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`http.request.body.content`*:: ++ +-- +The full HTTP request body. + +type: keyword + +example: Hello world + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`http.request.body.content.text`*:: ++ +-- +type: text + +-- + +*`http.request.bytes`*:: ++ +-- +Total size in bytes of the request (body and headers). + +type: long + +example: 1437 + +format: bytes + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`http.request.id`*:: ++ +-- +A unique identifier for each HTTP request to correlate logs between clients and servers in transactions. +The id may be contained in a non-standard HTTP header, such as `X-Request-ID` or `X-Correlation-ID`. + +type: keyword + +example: 123e4567-e89b-12d3-a456-426614174000 + +-- + +*`http.request.method`*:: ++ +-- +HTTP request method. +Prior to ECS 1.6.0 the following guidance was provided: +"The field value must be normalized to lowercase for querying." +As of ECS 1.6.0, the guidance is deprecated because the original case of the method may be useful in anomaly detection. Original case will be mandated in ECS 2.0.0 + +type: keyword + +example: GET, POST, PUT, PoST + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`http.request.mime_type`*:: ++ +-- +Mime type of the body of the request. +This value must only be populated based on the content of the request body, not on the `Content-Type` header. Comparing the mime type of a request with the request's Content-Type header can be helpful in detecting threats or misconfigured clients. + +type: keyword + +example: image/gif + +-- + +*`http.request.referrer`*:: ++ +-- +Referrer for this HTTP request. + +type: keyword + +example: https://blog.example.com/ + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`http.response.body.bytes`*:: ++ +-- +Size in bytes of the response body. + +type: long + +example: 887 + +format: bytes + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`http.response.body.content`*:: ++ +-- +The full HTTP response body. + +type: keyword + +example: Hello world + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`http.response.body.content.text`*:: ++ +-- +type: text + +-- + +*`http.response.bytes`*:: ++ +-- +Total size in bytes of the response (body and headers). + +type: long + +example: 1437 + +format: bytes + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`http.response.mime_type`*:: ++ +-- +Mime type of the body of the response. +This value must only be populated based on the content of the response body, not on the `Content-Type` header. Comparing the mime type of a response with the response's Content-Type header can be helpful in detecting misconfigured servers. + +type: keyword + +example: image/gif + +-- + +*`http.response.status_code`*:: ++ +-- +HTTP response status code. + +type: long + +example: 404 + +format: string + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`http.version`*:: ++ +-- +HTTP version. + +type: keyword + +example: 1.1 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== interface + +The interface fields are used to record ingress and egress interface information when reported by an observer (e.g. firewall, router, load balancer) in the context of the observer handling a network connection. In the case of a single observer interface (e.g. network sensor on a span port) only the observer.ingress information should be populated. + + +*`interface.alias`*:: ++ +-- +Interface alias as reported by the system, typically used in firewall implementations for e.g. inside, outside, or dmz logical interface naming. + +type: keyword + +example: outside + +-- + +*`interface.id`*:: ++ +-- +Interface ID as reported by an observer (typically SNMP interface ID). + +type: keyword + +example: 10 + +-- + +*`interface.name`*:: ++ +-- +Interface name as reported by the system. + +type: keyword + +example: eth0 + +-- + +[float] +=== log + +Details about the event's logging mechanism or logging transport. +The log.* fields are typically populated with details about the logging mechanism used to create and/or transport the event. For example, syslog details belong under `log.syslog.*`. +The details specific to your event source are typically not logged under `log.*`, but rather in `event.*` or in other ECS fields. + + +*`log.file.path`*:: ++ +-- +Full path to the log file this event came from, including the file name. It should include the drive letter, when appropriate. +If the event wasn't read from a log file, do not populate this field. + +type: keyword + +example: /var/log/fun-times.log + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`log.level`*:: ++ +-- +Original log level of the log event. +If the source of the event provides a log level or textual severity, this is the one that goes in `log.level`. If your source doesn't specify one, you may put your event transport's severity here (e.g. Syslog severity). +Some examples are `warn`, `err`, `i`, `informational`. + +type: keyword + +example: error + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`log.logger`*:: ++ +-- +The name of the logger inside an application. This is usually the name of the class which initialized the logger, or can be a custom name. + +type: keyword + +example: org.elasticsearch.bootstrap.Bootstrap + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`log.origin.file.line`*:: ++ +-- +The line number of the file containing the source code which originated the log event. + +type: integer + +example: 42 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`log.origin.file.name`*:: ++ +-- +The name of the file containing the source code which originated the log event. +Note that this field is not meant to capture the log file. The correct field to capture the log file is `log.file.path`. + +type: keyword + +example: Bootstrap.java + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`log.origin.function`*:: ++ +-- +The name of the function or method which originated the log event. + +type: keyword + +example: init + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`log.original`*:: ++ +-- +Deprecated for removal in next major version release. This field is superseded by `event.original`. +This is the original log message and contains the full log message before splitting it up in multiple parts. +In contrast to the `message` field which can contain an extracted part of the log message, this field contains the original, full log message. It can have already some modifications applied like encoding or new lines removed to clean up the log message. +This field is not indexed and doc_values are disabled so it can't be queried but the value can be retrieved from `_source`. + +type: keyword + +example: Sep 19 08:26:10 localhost My log + +{yes-icon} {ecs-ref}[ECS] field. + +Field is not indexed. + +-- + +*`log.syslog`*:: ++ +-- +The Syslog metadata of the event, if the event was transmitted via Syslog. Please see RFCs 5424 or 3164. + +type: object + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`log.syslog.facility.code`*:: ++ +-- +The Syslog numeric facility of the log event, if available. +According to RFCs 5424 and 3164, this value should be an integer between 0 and 23. + +type: long + +example: 23 + +format: string + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`log.syslog.facility.name`*:: ++ +-- +The Syslog text-based facility of the log event, if available. + +type: keyword + +example: local7 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`log.syslog.priority`*:: ++ +-- +Syslog numeric priority of the event, if available. +According to RFCs 5424 and 3164, the priority is 8 * facility + severity. This number is therefore expected to contain a value between 0 and 191. + +type: long + +example: 135 + +format: string + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`log.syslog.severity.code`*:: ++ +-- +The Syslog numeric severity of the log event, if available. +If the event source publishing via Syslog provides a different numeric severity value (e.g. firewall, IDS), your source's numeric severity should go to `event.severity`. If the event source does not specify a distinct severity, you can optionally copy the Syslog severity to `event.severity`. + +type: long + +example: 3 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`log.syslog.severity.name`*:: ++ +-- +The Syslog numeric severity of the log event, if available. +If the event source publishing via Syslog provides a different severity value (e.g. firewall, IDS), your source's text severity should go to `log.level`. If the event source does not specify a distinct severity, you can optionally copy the Syslog severity to `log.level`. + +type: keyword + +example: Error + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== network + +The network is defined as the communication path over which a host or network event happens. +The network.* fields should be populated with details about the network activity associated with an event. + + +*`network.application`*:: ++ +-- +A name given to an application level protocol. This can be arbitrarily assigned for things like microservices, but also apply to things like skype, icq, facebook, twitter. This would be used in situations where the vendor or service can be decoded such as from the source/dest IP owners, ports, or wire format. +The field value must be normalized to lowercase for querying. See the documentation section "Implementing ECS". + +type: keyword + +example: aim + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`network.bytes`*:: ++ +-- +Total bytes transferred in both directions. +If `source.bytes` and `destination.bytes` are known, `network.bytes` is their sum. + +type: long + +example: 368 + +format: bytes + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`network.community_id`*:: ++ +-- +A hash of source and destination IPs and ports, as well as the protocol used in a communication. This is a tool-agnostic standard to identify flows. +Learn more at https://github.com/corelight/community-id-spec. + +type: keyword + +example: 1:hO+sN4H+MG5MY/8hIrXPqc4ZQz0= + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`network.direction`*:: ++ +-- +Direction of the network traffic. +Recommended values are: + * ingress + * egress + * inbound + * outbound + * internal + * external + * unknown + +When mapping events from a host-based monitoring context, populate this field from the host's point of view, using the values "ingress" or "egress". +When mapping events from a network or perimeter-based monitoring context, populate this field from the point of view of the network perimeter, using the values "inbound", "outbound", "internal" or "external". +Note that "internal" is not crossing perimeter boundaries, and is meant to describe communication between two hosts within the perimeter. Note also that "external" is meant to describe traffic between two hosts that are external to the perimeter. This could for example be useful for ISPs or VPN service providers. + +type: keyword + +example: inbound + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`network.forwarded_ip`*:: ++ +-- +Host IP address when the source IP address is the proxy. + +type: ip + +example: 192.1.1.2 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`network.iana_number`*:: ++ +-- +IANA Protocol Number (https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml). Standardized list of protocols. This aligns well with NetFlow and sFlow related logs which use the IANA Protocol Number. + +type: keyword + +example: 6 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`network.inner`*:: ++ +-- +Network.inner fields are added in addition to network.vlan fields to describe the innermost VLAN when q-in-q VLAN tagging is present. Allowed fields include vlan.id and vlan.name. Inner vlan fields are typically used when sending traffic with multiple 802.1q encapsulations to a network sensor (e.g. Zeek, Wireshark.) + +type: object + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`network.inner.vlan.id`*:: ++ +-- +VLAN ID as reported by the observer. + +type: keyword + +example: 10 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`network.inner.vlan.name`*:: ++ +-- +Optional VLAN name as reported by the observer. + +type: keyword + +example: outside + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`network.name`*:: ++ +-- +Name given by operators to sections of their network. + +type: keyword + +example: Guest Wifi + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`network.packets`*:: ++ +-- +Total packets transferred in both directions. +If `source.packets` and `destination.packets` are known, `network.packets` is their sum. + +type: long + +example: 24 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`network.protocol`*:: ++ +-- +L7 Network protocol name. ex. http, lumberjack, transport protocol. +The field value must be normalized to lowercase for querying. See the documentation section "Implementing ECS". + +type: keyword + +example: http + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`network.transport`*:: ++ +-- +Same as network.iana_number, but instead using the Keyword name of the transport layer (udp, tcp, ipv6-icmp, etc.) +The field value must be normalized to lowercase for querying. See the documentation section "Implementing ECS". + +type: keyword + +example: tcp + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`network.type`*:: ++ +-- +In the OSI Model this would be the Network Layer. ipv4, ipv6, ipsec, pim, etc +The field value must be normalized to lowercase for querying. See the documentation section "Implementing ECS". + +type: keyword + +example: ipv4 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`network.vlan.id`*:: ++ +-- +VLAN ID as reported by the observer. + +type: keyword + +example: 10 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`network.vlan.name`*:: ++ +-- +Optional VLAN name as reported by the observer. + +type: keyword + +example: outside + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== observer + +An observer is defined as a special network, security, or application device used to detect, observe, or create network, security, or application-related events and metrics. +This could be a custom hardware appliance or a server that has been configured to run special network, security, or application software. Examples include firewalls, web proxies, intrusion detection/prevention systems, network monitoring sensors, web application firewalls, data loss prevention systems, and APM servers. The observer.* fields shall be populated with details of the system, if any, that detects, observes and/or creates a network, security, or application event or metric. Message queues and ETL components used in processing events or metrics are not considered observers in ECS. + + +*`observer.egress`*:: ++ +-- +Observer.egress holds information like interface number and name, vlan, and zone information to classify egress traffic. Single armed monitoring such as a network sensor on a span port should only use observer.ingress to categorize traffic. + +type: object + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.egress.interface.alias`*:: ++ +-- +Interface alias as reported by the system, typically used in firewall implementations for e.g. inside, outside, or dmz logical interface naming. + +type: keyword + +example: outside + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.egress.interface.id`*:: ++ +-- +Interface ID as reported by an observer (typically SNMP interface ID). + +type: keyword + +example: 10 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.egress.interface.name`*:: ++ +-- +Interface name as reported by the system. + +type: keyword + +example: eth0 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.egress.vlan.id`*:: ++ +-- +VLAN ID as reported by the observer. + +type: keyword + +example: 10 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.egress.vlan.name`*:: ++ +-- +Optional VLAN name as reported by the observer. + +type: keyword + +example: outside + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.egress.zone`*:: ++ +-- +Network zone of outbound traffic as reported by the observer to categorize the destination area of egress traffic, e.g. Internal, External, DMZ, HR, Legal, etc. + +type: keyword + +example: Public_Internet + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.geo.city_name`*:: ++ +-- +City name. + +type: keyword + +example: Montreal + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.geo.continent_code`*:: ++ +-- +Two-letter code representing continent's name. + +type: keyword + +example: NA + +-- + +*`observer.geo.continent_name`*:: ++ +-- +Name of the continent. + +type: keyword + +example: North America + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.geo.country_iso_code`*:: ++ +-- +Country ISO code. + +type: keyword + +example: CA + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.geo.country_name`*:: ++ +-- +Country name. + +type: keyword + +example: Canada + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.geo.location`*:: ++ +-- +Longitude and latitude. + +type: geo_point + +example: { "lon": -73.614830, "lat": 45.505918 } + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.geo.name`*:: ++ +-- +User-defined description of a location, at the level of granularity they care about. +Could be the name of their data centers, the floor number, if this describes a local physical entity, city names. +Not typically used in automated geolocation. + +type: keyword + +example: boston-dc + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.geo.postal_code`*:: ++ +-- +Postal code associated with the location. +Values appropriate for this field may also be known as a postcode or ZIP code and will vary widely from country to country. + +type: keyword + +example: 94040 + +-- + +*`observer.geo.region_iso_code`*:: ++ +-- +Region ISO code. + +type: keyword + +example: CA-QC + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.geo.region_name`*:: ++ +-- +Region name. + +type: keyword + +example: Quebec + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.geo.timezone`*:: ++ +-- +The time zone of the location, such as IANA time zone name. + +type: keyword + +example: America/Argentina/Buenos_Aires + +-- + +*`observer.hostname`*:: ++ +-- +Hostname of the observer. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.ingress`*:: ++ +-- +Observer.ingress holds information like interface number and name, vlan, and zone information to classify ingress traffic. Single armed monitoring such as a network sensor on a span port should only use observer.ingress to categorize traffic. + +type: object + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.ingress.interface.alias`*:: ++ +-- +Interface alias as reported by the system, typically used in firewall implementations for e.g. inside, outside, or dmz logical interface naming. + +type: keyword + +example: outside + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.ingress.interface.id`*:: ++ +-- +Interface ID as reported by an observer (typically SNMP interface ID). + +type: keyword + +example: 10 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.ingress.interface.name`*:: ++ +-- +Interface name as reported by the system. + +type: keyword + +example: eth0 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.ingress.vlan.id`*:: ++ +-- +VLAN ID as reported by the observer. + +type: keyword + +example: 10 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.ingress.vlan.name`*:: ++ +-- +Optional VLAN name as reported by the observer. + +type: keyword + +example: outside + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.ingress.zone`*:: ++ +-- +Network zone of incoming traffic as reported by the observer to categorize the source area of ingress traffic. e.g. internal, External, DMZ, HR, Legal, etc. + +type: keyword + +example: DMZ + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.ip`*:: ++ +-- +IP addresses of the observer. + +type: ip + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.mac`*:: ++ +-- +MAC addresses of the observer. +The notation format from RFC 7042 is suggested: Each octet (that is, 8-bit byte) is represented by two [uppercase] hexadecimal digits giving the value of the octet as an unsigned integer. Successive octets are separated by a hyphen. + +type: keyword + +example: ["00-00-5E-00-53-23", "00-00-5E-00-53-24"] + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.name`*:: ++ +-- +Custom name of the observer. +This is a name that can be given to an observer. This can be helpful for example if multiple firewalls of the same model are used in an organization. +If no custom name is needed, the field can be left empty. + +type: keyword + +example: 1_proxySG + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.os.family`*:: ++ +-- +OS family (such as redhat, debian, freebsd, windows). + +type: keyword + +example: debian + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.os.full`*:: ++ +-- +Operating system name, including the version or code name. + +type: keyword + +example: Mac OS Mojave + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.os.full.text`*:: ++ +-- +type: text + +-- + +*`observer.os.kernel`*:: ++ +-- +Operating system kernel version as a raw string. + +type: keyword + +example: 4.4.0-112-generic + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.os.name`*:: ++ +-- +Operating system name, without the version. + +type: keyword + +example: Mac OS X + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.os.name.text`*:: ++ +-- +type: text + +-- + +*`observer.os.platform`*:: ++ +-- +Operating system platform (such centos, ubuntu, windows). + +type: keyword + +example: darwin + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.os.type`*:: ++ +-- +Use the `os.type` field to categorize the operating system into one of the broad commercial families. +One of these following values should be used (lowercase): linux, macos, unix, windows. +If the OS you're dealing with is not in the list, the field should not be populated. Please let us know by opening an issue with ECS, to propose its addition. + +type: keyword + +example: macos + +-- + +*`observer.os.version`*:: ++ +-- +Operating system version as a raw string. + +type: keyword + +example: 10.14.1 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.product`*:: ++ +-- +The product name of the observer. + +type: keyword + +example: s200 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.serial_number`*:: ++ +-- +Observer serial number. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.type`*:: ++ +-- +The type of the observer the data is coming from. +There is no predefined list of observer types. Some examples are `forwarder`, `firewall`, `ids`, `ips`, `proxy`, `poller`, `sensor`, `APM server`. + +type: keyword + +example: firewall + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.vendor`*:: ++ +-- +Vendor name of the observer. + +type: keyword + +example: Symantec + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`observer.version`*:: ++ +-- +Observer version. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== orchestrator + +Fields that describe the resources which container orchestrators manage or act upon. + + +*`orchestrator.api_version`*:: ++ +-- +API version being used to carry out the action + +type: keyword + +example: v1beta1 + +-- + +*`orchestrator.cluster.name`*:: ++ +-- +Name of the cluster. + +type: keyword + +-- + +*`orchestrator.cluster.url`*:: ++ +-- +URL of the API used to manage the cluster. + +type: keyword + +-- + +*`orchestrator.cluster.version`*:: ++ +-- +The version of the cluster. + +type: keyword + +-- + +*`orchestrator.namespace`*:: ++ +-- +Namespace in which the action is taking place. + +type: keyword + +example: kube-system + +-- + +*`orchestrator.organization`*:: ++ +-- +Organization affected by the event (for multi-tenant orchestrator setups). + +type: keyword + +example: elastic + +-- + +*`orchestrator.resource.name`*:: ++ +-- +Name of the resource being acted upon. + +type: keyword + +example: test-pod-cdcws + +-- + +*`orchestrator.resource.type`*:: ++ +-- +Type of resource being acted upon. + +type: keyword + +example: service + +-- + +*`orchestrator.type`*:: ++ +-- +Orchestrator cluster type (e.g. kubernetes, nomad or cloudfoundry). + +type: keyword + +example: kubernetes + +-- + +[float] +=== organization + +The organization fields enrich data with information about the company or entity the data is associated with. +These fields help you arrange or filter data stored in an index by one or multiple organizations. + + +*`organization.id`*:: ++ +-- +Unique identifier for the organization. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`organization.name`*:: ++ +-- +Organization name. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`organization.name.text`*:: ++ +-- +type: text + +-- + +[float] +=== os + +The OS fields contain information about the operating system. + + +*`os.family`*:: ++ +-- +OS family (such as redhat, debian, freebsd, windows). + +type: keyword + +example: debian + +-- + +*`os.full`*:: ++ +-- +Operating system name, including the version or code name. + +type: keyword + +example: Mac OS Mojave + +-- + +*`os.full.text`*:: ++ +-- +type: text + +-- + +*`os.kernel`*:: ++ +-- +Operating system kernel version as a raw string. + +type: keyword + +example: 4.4.0-112-generic + +-- + +*`os.name`*:: ++ +-- +Operating system name, without the version. + +type: keyword + +example: Mac OS X + +-- + +*`os.name.text`*:: ++ +-- +type: text + +-- + +*`os.platform`*:: ++ +-- +Operating system platform (such centos, ubuntu, windows). + +type: keyword + +example: darwin + +-- + +*`os.type`*:: ++ +-- +Use the `os.type` field to categorize the operating system into one of the broad commercial families. +One of these following values should be used (lowercase): linux, macos, unix, windows. +If the OS you're dealing with is not in the list, the field should not be populated. Please let us know by opening an issue with ECS, to propose its addition. + +type: keyword + +example: macos + +-- + +*`os.version`*:: ++ +-- +Operating system version as a raw string. + +type: keyword + +example: 10.14.1 + +-- + +[float] +=== package + +These fields contain information about an installed software package. It contains general information about a package, such as name, version or size. It also contains installation details, such as time or location. + + +*`package.architecture`*:: ++ +-- +Package architecture. + +type: keyword + +example: x86_64 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`package.build_version`*:: ++ +-- +Additional information about the build version of the installed package. +For example use the commit SHA of a non-released package. + +type: keyword + +example: 36f4f7e89dd61b0988b12ee000b98966867710cd + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`package.checksum`*:: ++ +-- +Checksum of the installed package for verification. + +type: keyword + +example: 68b329da9893e34099c7d8ad5cb9c940 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`package.description`*:: ++ +-- +Description of the package. + +type: keyword + +example: Open source programming language to build simple/reliable/efficient software. + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`package.install_scope`*:: ++ +-- +Indicating how the package was installed, e.g. user-local, global. + +type: keyword + +example: global + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`package.installed`*:: ++ +-- +Time when package was installed. + +type: date + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`package.license`*:: ++ +-- +License under which the package was released. +Use a short name, e.g. the license identifier from SPDX License List where possible (https://spdx.org/licenses/). + +type: keyword + +example: Apache License 2.0 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`package.name`*:: ++ +-- +Package name + +type: keyword + +example: go + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`package.path`*:: ++ +-- +Path where the package is installed. + +type: keyword + +example: /usr/local/Cellar/go/1.12.9/ + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`package.reference`*:: ++ +-- +Home page or reference URL of the software in this package, if available. + +type: keyword + +example: https://golang.org + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`package.size`*:: ++ +-- +Package size in bytes. + +type: long + +example: 62231 + +format: string + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`package.type`*:: ++ +-- +Type of package. +This should contain the package file type, rather than the package manager name. Examples: rpm, dpkg, brew, npm, gem, nupkg, jar. + +type: keyword + +example: rpm + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`package.version`*:: ++ +-- +Package version + +type: keyword + +example: 1.12.9 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== pe + +These fields contain Windows Portable Executable (PE) metadata. + + +*`pe.architecture`*:: ++ +-- +CPU architecture target for the file. + +type: keyword + +example: x64 + +-- + +*`pe.company`*:: ++ +-- +Internal company name of the file, provided at compile-time. + +type: keyword + +example: Microsoft Corporation + +-- + +*`pe.description`*:: ++ +-- +Internal description of the file, provided at compile-time. + +type: keyword + +example: Paint + +-- + +*`pe.file_version`*:: ++ +-- +Internal version of the file, provided at compile-time. + +type: keyword + +example: 6.3.9600.17415 + +-- + +*`pe.imphash`*:: ++ +-- +A hash of the imports in a PE file. An imphash -- or import hash -- can be used to fingerprint binaries even after recompilation or other code-level transformations have occurred, which would change more traditional hash values. +Learn more at https://www.fireeye.com/blog/threat-research/2014/01/tracking-malware-import-hashing.html. + +type: keyword + +example: 0c6803c4e922103c4dca5963aad36ddf + +-- + +*`pe.original_file_name`*:: ++ +-- +Internal name of the file, provided at compile-time. + +type: keyword + +example: MSPAINT.EXE + +-- + +*`pe.product`*:: ++ +-- +Internal product name of the file, provided at compile-time. + +type: keyword + +example: Microsoft® Windows® Operating System + +-- + +[float] +=== process + +These fields contain information about a process. +These fields can help you correlate metrics information with a process id/name from a log message. The `process.pid` often stays in the metric itself and is copied to the global field for correlation. + + +*`process.args`*:: ++ +-- +Array of process arguments, starting with the absolute path to the executable. +May be filtered to protect sensitive information. + +type: keyword + +example: ["/usr/bin/ssh", "-l", "user", "10.0.0.16"] + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.args_count`*:: ++ +-- +Length of the process.args array. +This field can be useful for querying or performing bucket analysis on how many arguments were provided to start a process. More arguments may be an indication of suspicious activity. + +type: long + +example: 4 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.code_signature.exists`*:: ++ +-- +Boolean to capture if a signature is present. + +type: boolean + +example: true + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.code_signature.signing_id`*:: ++ +-- +The identifier used to sign the process. +This is used to identify the application manufactured by a software vendor. The field is relevant to Apple *OS only. + +type: keyword + +example: com.apple.xpc.proxy + +-- + +*`process.code_signature.status`*:: ++ +-- +Additional information about the certificate status. +This is useful for logging cryptographic errors with the certificate validity or trust status. Leave unpopulated if the validity or trust of the certificate was unchecked. + +type: keyword + +example: ERROR_UNTRUSTED_ROOT + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.code_signature.subject_name`*:: ++ +-- +Subject name of the code signer + +type: keyword + +example: Microsoft Corporation + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.code_signature.team_id`*:: ++ +-- +The team identifier used to sign the process. +This is used to identify the team or vendor of a software product. The field is relevant to Apple *OS only. + +type: keyword + +example: EQHXZ8M8AV + +-- + +*`process.code_signature.trusted`*:: ++ +-- +Stores the trust status of the certificate chain. +Validating the trust of the certificate chain may be complicated, and this field should only be populated by tools that actively check the status. + +type: boolean + +example: true + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.code_signature.valid`*:: ++ +-- +Boolean to capture if the digital signature is verified against the binary content. +Leave unpopulated if a certificate was unchecked. + +type: boolean + +example: true + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.command_line`*:: ++ +-- +Full command line that started the process, including the absolute path to the executable, and all arguments. +Some arguments may be filtered to protect sensitive information. + +type: keyword + +example: /usr/bin/ssh -l user 10.0.0.16 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.command_line.text`*:: ++ +-- +type: text + +-- + +*`process.elf.architecture`*:: ++ +-- +Machine architecture of the ELF file. + +type: keyword + +example: x86-64 + +-- + +*`process.elf.byte_order`*:: ++ +-- +Byte sequence of ELF file. + +type: keyword + +example: Little Endian + +-- + +*`process.elf.cpu_type`*:: ++ +-- +CPU type of the ELF file. + +type: keyword + +example: Intel + +-- + +*`process.elf.creation_date`*:: ++ +-- +Extracted when possible from the file's metadata. Indicates when it was built or compiled. It can also be faked by malware creators. + +type: date + +-- + +*`process.elf.exports`*:: ++ +-- +List of exported element names and types. + +type: flattened + +-- + +*`process.elf.header.abi_version`*:: ++ +-- +Version of the ELF Application Binary Interface (ABI). + +type: keyword + +-- + +*`process.elf.header.class`*:: ++ +-- +Header class of the ELF file. + +type: keyword + +-- + +*`process.elf.header.data`*:: ++ +-- +Data table of the ELF header. + +type: keyword + +-- + +*`process.elf.header.entrypoint`*:: ++ +-- +Header entrypoint of the ELF file. + +type: long + +format: string + +-- + +*`process.elf.header.object_version`*:: ++ +-- +"0x1" for original ELF files. + +type: keyword + +-- + +*`process.elf.header.os_abi`*:: ++ +-- +Application Binary Interface (ABI) of the Linux OS. + +type: keyword + +-- + +*`process.elf.header.type`*:: ++ +-- +Header type of the ELF file. + +type: keyword + +-- + +*`process.elf.header.version`*:: ++ +-- +Version of the ELF header. + +type: keyword + +-- + +*`process.elf.imports`*:: ++ +-- +List of imported element names and types. + +type: flattened + +-- + +*`process.elf.sections`*:: ++ +-- +An array containing an object for each section of the ELF file. +The keys that should be present in these objects are defined by sub-fields underneath `elf.sections.*`. + +type: nested + +-- + +*`process.elf.sections.chi2`*:: ++ +-- +Chi-square probability distribution of the section. + +type: long + +format: number + +-- + +*`process.elf.sections.entropy`*:: ++ +-- +Shannon entropy calculation from the section. + +type: long + +format: number + +-- + +*`process.elf.sections.flags`*:: ++ +-- +ELF Section List flags. + +type: keyword + +-- + +*`process.elf.sections.name`*:: ++ +-- +ELF Section List name. + +type: keyword + +-- + +*`process.elf.sections.physical_offset`*:: ++ +-- +ELF Section List offset. + +type: keyword + +-- + +*`process.elf.sections.physical_size`*:: ++ +-- +ELF Section List physical size. + +type: long + +format: bytes + +-- + +*`process.elf.sections.type`*:: ++ +-- +ELF Section List type. + +type: keyword + +-- + +*`process.elf.sections.virtual_address`*:: ++ +-- +ELF Section List virtual address. + +type: long + +format: string + +-- + +*`process.elf.sections.virtual_size`*:: ++ +-- +ELF Section List virtual size. + +type: long + +format: string + +-- + +*`process.elf.segments`*:: ++ +-- +An array containing an object for each segment of the ELF file. +The keys that should be present in these objects are defined by sub-fields underneath `elf.segments.*`. + +type: nested + +-- + +*`process.elf.segments.sections`*:: ++ +-- +ELF object segment sections. + +type: keyword + +-- + +*`process.elf.segments.type`*:: ++ +-- +ELF object segment type. + +type: keyword + +-- + +*`process.elf.shared_libraries`*:: ++ +-- +List of shared libraries used by this ELF object. + +type: keyword + +-- + +*`process.elf.telfhash`*:: ++ +-- +telfhash symbol hash for ELF file. + +type: keyword + +-- + +*`process.entity_id`*:: ++ +-- +Unique identifier for the process. +The implementation of this is specified by the data source, but some examples of what could be used here are a process-generated UUID, Sysmon Process GUIDs, or a hash of some uniquely identifying components of a process. +Constructing a globally unique identifier is a common practice to mitigate PID reuse as well as to identify a specific process over time, across multiple monitored hosts. + +type: keyword + +example: c2c455d9f99375d + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.executable`*:: ++ +-- +Absolute path to the process executable. + +type: keyword + +example: /usr/bin/ssh + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.executable.text`*:: ++ +-- +type: text + +-- + +*`process.exit_code`*:: ++ +-- +The exit code of the process, if this is a termination event. +The field should be absent if there is no exit code for the event (e.g. process start). + +type: long + +example: 137 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.hash.md5`*:: ++ +-- +MD5 hash. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.hash.sha1`*:: ++ +-- +SHA1 hash. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.hash.sha256`*:: ++ +-- +SHA256 hash. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.hash.sha512`*:: ++ +-- +SHA512 hash. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.hash.ssdeep`*:: ++ +-- +SSDEEP hash. + +type: keyword + +-- + +*`process.name`*:: ++ +-- +Process name. +Sometimes called program name or similar. + +type: keyword + +example: ssh + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.name.text`*:: ++ +-- +type: text + +-- + +*`process.parent.args`*:: ++ +-- +Array of process arguments, starting with the absolute path to the executable. +May be filtered to protect sensitive information. + +type: keyword + +example: ["/usr/bin/ssh", "-l", "user", "10.0.0.16"] + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.args_count`*:: ++ +-- +Length of the process.args array. +This field can be useful for querying or performing bucket analysis on how many arguments were provided to start a process. More arguments may be an indication of suspicious activity. + +type: long + +example: 4 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.code_signature.exists`*:: ++ +-- +Boolean to capture if a signature is present. + +type: boolean + +example: true + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.code_signature.signing_id`*:: ++ +-- +The identifier used to sign the process. +This is used to identify the application manufactured by a software vendor. The field is relevant to Apple *OS only. + +type: keyword + +example: com.apple.xpc.proxy + +-- + +*`process.parent.code_signature.status`*:: ++ +-- +Additional information about the certificate status. +This is useful for logging cryptographic errors with the certificate validity or trust status. Leave unpopulated if the validity or trust of the certificate was unchecked. + +type: keyword + +example: ERROR_UNTRUSTED_ROOT + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.code_signature.subject_name`*:: ++ +-- +Subject name of the code signer + +type: keyword + +example: Microsoft Corporation + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.code_signature.team_id`*:: ++ +-- +The team identifier used to sign the process. +This is used to identify the team or vendor of a software product. The field is relevant to Apple *OS only. + +type: keyword + +example: EQHXZ8M8AV + +-- + +*`process.parent.code_signature.trusted`*:: ++ +-- +Stores the trust status of the certificate chain. +Validating the trust of the certificate chain may be complicated, and this field should only be populated by tools that actively check the status. + +type: boolean + +example: true + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.code_signature.valid`*:: ++ +-- +Boolean to capture if the digital signature is verified against the binary content. +Leave unpopulated if a certificate was unchecked. + +type: boolean + +example: true + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.command_line`*:: ++ +-- +Full command line that started the process, including the absolute path to the executable, and all arguments. +Some arguments may be filtered to protect sensitive information. + +type: keyword + +example: /usr/bin/ssh -l user 10.0.0.16 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.command_line.text`*:: ++ +-- +type: text + +-- + +*`process.parent.elf.architecture`*:: ++ +-- +Machine architecture of the ELF file. + +type: keyword + +example: x86-64 + +-- + +*`process.parent.elf.byte_order`*:: ++ +-- +Byte sequence of ELF file. + +type: keyword + +example: Little Endian + +-- + +*`process.parent.elf.cpu_type`*:: ++ +-- +CPU type of the ELF file. + +type: keyword + +example: Intel + +-- + +*`process.parent.elf.creation_date`*:: ++ +-- +Extracted when possible from the file's metadata. Indicates when it was built or compiled. It can also be faked by malware creators. + +type: date + +-- + +*`process.parent.elf.exports`*:: ++ +-- +List of exported element names and types. + +type: flattened + +-- + +*`process.parent.elf.header.abi_version`*:: ++ +-- +Version of the ELF Application Binary Interface (ABI). + +type: keyword + +-- + +*`process.parent.elf.header.class`*:: ++ +-- +Header class of the ELF file. + +type: keyword + +-- + +*`process.parent.elf.header.data`*:: ++ +-- +Data table of the ELF header. + +type: keyword + +-- + +*`process.parent.elf.header.entrypoint`*:: ++ +-- +Header entrypoint of the ELF file. + +type: long + +format: string + +-- + +*`process.parent.elf.header.object_version`*:: ++ +-- +"0x1" for original ELF files. + +type: keyword + +-- + +*`process.parent.elf.header.os_abi`*:: ++ +-- +Application Binary Interface (ABI) of the Linux OS. + +type: keyword + +-- + +*`process.parent.elf.header.type`*:: ++ +-- +Header type of the ELF file. + +type: keyword + +-- + +*`process.parent.elf.header.version`*:: ++ +-- +Version of the ELF header. + +type: keyword + +-- + +*`process.parent.elf.imports`*:: ++ +-- +List of imported element names and types. + +type: flattened + +-- + +*`process.parent.elf.sections`*:: ++ +-- +An array containing an object for each section of the ELF file. +The keys that should be present in these objects are defined by sub-fields underneath `elf.sections.*`. + +type: nested + +-- + +*`process.parent.elf.sections.chi2`*:: ++ +-- +Chi-square probability distribution of the section. + +type: long + +format: number + +-- + +*`process.parent.elf.sections.entropy`*:: ++ +-- +Shannon entropy calculation from the section. + +type: long + +format: number + +-- + +*`process.parent.elf.sections.flags`*:: ++ +-- +ELF Section List flags. + +type: keyword + +-- + +*`process.parent.elf.sections.name`*:: ++ +-- +ELF Section List name. + +type: keyword + +-- + +*`process.parent.elf.sections.physical_offset`*:: ++ +-- +ELF Section List offset. + +type: keyword + +-- + +*`process.parent.elf.sections.physical_size`*:: ++ +-- +ELF Section List physical size. + +type: long + +format: bytes + +-- + +*`process.parent.elf.sections.type`*:: ++ +-- +ELF Section List type. + +type: keyword + +-- + +*`process.parent.elf.sections.virtual_address`*:: ++ +-- +ELF Section List virtual address. + +type: long + +format: string + +-- + +*`process.parent.elf.sections.virtual_size`*:: ++ +-- +ELF Section List virtual size. + +type: long + +format: string + +-- + +*`process.parent.elf.segments`*:: ++ +-- +An array containing an object for each segment of the ELF file. +The keys that should be present in these objects are defined by sub-fields underneath `elf.segments.*`. + +type: nested + +-- + +*`process.parent.elf.segments.sections`*:: ++ +-- +ELF object segment sections. + +type: keyword + +-- + +*`process.parent.elf.segments.type`*:: ++ +-- +ELF object segment type. + +type: keyword + +-- + +*`process.parent.elf.shared_libraries`*:: ++ +-- +List of shared libraries used by this ELF object. + +type: keyword + +-- + +*`process.parent.elf.telfhash`*:: ++ +-- +telfhash symbol hash for ELF file. + +type: keyword + +-- + +*`process.parent.entity_id`*:: ++ +-- +Unique identifier for the process. +The implementation of this is specified by the data source, but some examples of what could be used here are a process-generated UUID, Sysmon Process GUIDs, or a hash of some uniquely identifying components of a process. +Constructing a globally unique identifier is a common practice to mitigate PID reuse as well as to identify a specific process over time, across multiple monitored hosts. + +type: keyword + +example: c2c455d9f99375d + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.executable`*:: ++ +-- +Absolute path to the process executable. + +type: keyword + +example: /usr/bin/ssh + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.executable.text`*:: ++ +-- +type: text + +-- + +*`process.parent.exit_code`*:: ++ +-- +The exit code of the process, if this is a termination event. +The field should be absent if there is no exit code for the event (e.g. process start). + +type: long + +example: 137 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.hash.md5`*:: ++ +-- +MD5 hash. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.hash.sha1`*:: ++ +-- +SHA1 hash. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.hash.sha256`*:: ++ +-- +SHA256 hash. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.hash.sha512`*:: ++ +-- +SHA512 hash. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.hash.ssdeep`*:: ++ +-- +SSDEEP hash. + +type: keyword + +-- + +*`process.parent.name`*:: ++ +-- +Process name. +Sometimes called program name or similar. + +type: keyword + +example: ssh + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.name.text`*:: ++ +-- +type: text + +-- + +*`process.parent.pe.architecture`*:: ++ +-- +CPU architecture target for the file. + +type: keyword + +example: x64 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.pe.company`*:: ++ +-- +Internal company name of the file, provided at compile-time. + +type: keyword + +example: Microsoft Corporation + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.pe.description`*:: ++ +-- +Internal description of the file, provided at compile-time. + +type: keyword + +example: Paint + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.pe.file_version`*:: ++ +-- +Internal version of the file, provided at compile-time. + +type: keyword + +example: 6.3.9600.17415 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.pe.imphash`*:: ++ +-- +A hash of the imports in a PE file. An imphash -- or import hash -- can be used to fingerprint binaries even after recompilation or other code-level transformations have occurred, which would change more traditional hash values. +Learn more at https://www.fireeye.com/blog/threat-research/2014/01/tracking-malware-import-hashing.html. + +type: keyword + +example: 0c6803c4e922103c4dca5963aad36ddf + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.pe.original_file_name`*:: ++ +-- +Internal name of the file, provided at compile-time. + +type: keyword + +example: MSPAINT.EXE + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.pe.product`*:: ++ +-- +Internal product name of the file, provided at compile-time. + +type: keyword + +example: Microsoft® Windows® Operating System + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.pgid`*:: ++ +-- +Identifier of the group of processes the process belongs to. + +type: long + +format: string + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.pid`*:: ++ +-- +Process id. + +type: long + +example: 4242 + +format: string + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.ppid`*:: ++ +-- +Parent process' pid. + +type: long + +example: 4241 + +format: string + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.start`*:: ++ +-- +The time the process started. + +type: date + +example: 2016-05-23T08:05:34.853Z + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.thread.id`*:: ++ +-- +Thread ID. + +type: long + +example: 4242 + +format: string + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.thread.name`*:: ++ +-- +Thread name. + +type: keyword + +example: thread-0 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.title`*:: ++ +-- +Process title. +The proctitle, some times the same as process name. Can also be different: for example a browser setting its title to the web page currently opened. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.title.text`*:: ++ +-- +type: text + +-- + +*`process.parent.uptime`*:: ++ +-- +Seconds the process has been up. + +type: long + +example: 1325 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.working_directory`*:: ++ +-- +The working directory of the process. + +type: keyword + +example: /home/alice + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.parent.working_directory.text`*:: ++ +-- +type: text + +-- + +*`process.pe.architecture`*:: ++ +-- +CPU architecture target for the file. + +type: keyword + +example: x64 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.pe.company`*:: ++ +-- +Internal company name of the file, provided at compile-time. + +type: keyword + +example: Microsoft Corporation + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.pe.description`*:: ++ +-- +Internal description of the file, provided at compile-time. + +type: keyword + +example: Paint + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.pe.file_version`*:: ++ +-- +Internal version of the file, provided at compile-time. + +type: keyword + +example: 6.3.9600.17415 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.pe.imphash`*:: ++ +-- +A hash of the imports in a PE file. An imphash -- or import hash -- can be used to fingerprint binaries even after recompilation or other code-level transformations have occurred, which would change more traditional hash values. +Learn more at https://www.fireeye.com/blog/threat-research/2014/01/tracking-malware-import-hashing.html. + +type: keyword + +example: 0c6803c4e922103c4dca5963aad36ddf + +{yes-icon} {ecs-ref}[ECS] field. + +-- +*`process.pe.original_file_name`*:: ++ +-- +Internal name of the file, provided at compile-time. +type: keyword -*`docker.container.id`*:: +example: MSPAINT.EXE + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.pe.product`*:: + -- -type: alias +Internal product name of the file, provided at compile-time. + +type: keyword + +example: Microsoft® Windows® Operating System + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.pgid`*:: ++ +-- +Identifier of the group of processes the process belongs to. + +type: long + +format: string + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.pid`*:: ++ +-- +Process id. + +type: long + +example: 4242 + +format: string + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.ppid`*:: ++ +-- +Parent process' pid. + +type: long + +example: 4241 + +format: string + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.start`*:: ++ +-- +The time the process started. + +type: date + +example: 2016-05-23T08:05:34.853Z + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.thread.id`*:: ++ +-- +Thread ID. + +type: long + +example: 4242 + +format: string + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.thread.name`*:: ++ +-- +Thread name. + +type: keyword + +example: thread-0 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.title`*:: ++ +-- +Process title. +The proctitle, some times the same as process name. Can also be different: for example a browser setting its title to the web page currently opened. + +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.title.text`*:: ++ +-- +type: text + +-- + +*`process.uptime`*:: ++ +-- +Seconds the process has been up. + +type: long + +example: 1325 + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.working_directory`*:: ++ +-- +The working directory of the process. + +type: keyword + +example: /home/alice + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`process.working_directory.text`*:: ++ +-- +type: text + +-- + +[float] +=== registry + +Fields related to Windows Registry operations. + + +*`registry.data.bytes`*:: ++ +-- +Original bytes written with base64 encoding. +For Windows registry operations, such as SetValueEx and RegQueryValueEx, this corresponds to the data pointed by `lp_data`. This is optional but provides better recoverability and should be populated for REG_BINARY encoded values. + +type: keyword + +example: ZQBuAC0AVQBTAAAAZQBuAAAAAAA= + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`registry.data.strings`*:: ++ +-- +Content when writing string types. +Populated as an array when writing string data to the registry. For single string registry types (REG_SZ, REG_EXPAND_SZ), this should be an array with one string. For sequences of string with REG_MULTI_SZ, this array will be variable length. For numeric data, such as REG_DWORD and REG_QWORD, this should be populated with the decimal representation (e.g `"1"`). + +type: keyword + +example: ["C:\rta\red_ttp\bin\myapp.exe"] + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`registry.data.type`*:: ++ +-- +Standard registry type for encoding contents + +type: keyword + +example: REG_SZ + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`registry.hive`*:: ++ +-- +Abbreviated name for the hive. + +type: keyword + +example: HKLM + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`registry.key`*:: ++ +-- +Hive-relative path of keys. + +type: keyword + +example: SOFTWARE\Microsoft\Windows NT\CurrentVersion\Image File Execution Options\winword.exe + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`registry.path`*:: ++ +-- +Full path, including hive, key and value + +type: keyword + +example: HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Image File Execution Options\winword.exe\Debugger + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`registry.value`*:: ++ +-- +Name of the value written. + +type: keyword + +example: Debugger + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +[float] +=== related + +This field set is meant to facilitate pivoting around a piece of data. +Some pieces of information can be seen in many places in an ECS event. To facilitate searching for them, store an array of all seen values to their corresponding field in `related.`. +A concrete example is IP addresses, which can be under host, observer, source, destination, client, server, and network.forwarded_ip. If you append all IPs to `related.ip`, you can then search for a given IP trivially, no matter where it appeared, by querying `related.ip:192.0.2.15`. + + +*`related.hash`*:: ++ +-- +All the hashes seen on your event. Populating this field, then using it to search for hashes can help in situations where you're unsure what the hash algorithm is (and therefore which key name to search). + +type: keyword -alias to: container.id +{yes-icon} {ecs-ref}[ECS] field. -- -*`docker.container.image`*:: +*`related.hosts`*:: + -- -type: alias +All hostnames or other host identifiers seen on your event. Example identifiers include FQDNs, domain names, workstation names, or aliases. -alias to: container.image.name +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. -- -*`docker.container.name`*:: +*`related.ip`*:: + -- -type: alias +All of the IPs seen on your event. -alias to: container.name +type: ip + +{yes-icon} {ecs-ref}[ECS] field. -- -*`docker.container.labels`*:: +*`related.user`*:: + -- -Image labels. +All the user names or other user identifiers seen on the event. +type: keyword -type: object +{yes-icon} {ecs-ref}[ECS] field. -- -[[exported-fields-ecs]] -== ECS fields +[float] +=== rule -ECS Fields. +Rule fields are used to capture the specifics of any observer or agent rules that generate alerts or other notable events. +Examples of data sources that would populate the rule fields include: network admission control platforms, network or host IDS/IPS, network firewalls, web application firewalls, url filters, endpoint detection and response (EDR) systems, etc. -*`@timestamp`*:: +*`rule.author`*:: + -- -Date/time when the event originated. -This is the date/time extracted from the event, typically representing when the event was generated by the source. -If the event source has no original timestamp, this value is typically populated by the first time the event was received by the pipeline. -Required field for all events. +Name, organization, or pseudonym of the author or authors who created the rule used to generate this event. -type: date +type: keyword -example: 2016-05-23T08:05:34.853Z +example: ["Star-Lord"] -required: True +{yes-icon} {ecs-ref}[ECS] field. -- -*`labels`*:: +*`rule.category`*:: + -- -Custom key/value pairs. -Can be used to add meta information to events. Should not contain nested objects. All values are stored as keyword. -Example: `docker` and `k8s` labels. - -type: object - -example: {"application": "foo-bar", "env": "production"} - --- +A categorization value keyword used by the entity using the rule for detection of this event. -*`message`*:: -+ --- -For log events the message field contains the log message, optimized for viewing in a log viewer. -For structured logs without an original message field, other fields can be concatenated to form a human-readable summary of the event. -If multiple messages exist, they can be combined into one message. +type: keyword -type: text +example: Attempted Information Leak -example: Hello World +{yes-icon} {ecs-ref}[ECS] field. -- -*`tags`*:: +*`rule.description`*:: + -- -List of keywords used to tag each event. +The description of the rule generating the event. type: keyword -example: ["production", "env2"] - --- - -[float] -=== agent +example: Block requests to public DNS over HTTPS / TLS protocols -The agent fields contain the data about the software entity, if any, that collects, detects, or observes events on a host, or takes measurements on a host. -Examples include Beats. Agents may also run on observers. ECS agent.* fields shall be populated with details of the agent running on the host or observer where the event happened or the measurement was taken. +{yes-icon} {ecs-ref}[ECS] field. +-- -*`agent.ephemeral_id`*:: +*`rule.id`*:: + -- -Ephemeral identifier of this agent (if one exists). -This id normally changes across restarts, but `agent.id` does not. +A rule ID that is unique within the scope of an agent, observer, or other entity using the rule for detection of this event. type: keyword -example: 8a4f500f +example: 101 + +{yes-icon} {ecs-ref}[ECS] field. -- -*`agent.id`*:: +*`rule.license`*:: + -- -Unique identifier of this agent (if one exists). -Example: For Beats this would be beat.id. +Name of the license under which the rule used to generate this event is made available. type: keyword -example: 8a4f500d +example: Apache 2.0 + +{yes-icon} {ecs-ref}[ECS] field. -- -*`agent.name`*:: +*`rule.name`*:: + -- -Custom name of the agent. -This is a name that can be given to an agent. This can be helpful if for example two Filebeat instances are running on the same host but a human readable separation is needed on which Filebeat instance data is coming from. -If no name is given, the name is often left empty. +The name of the rule or signature generating the event. type: keyword -example: foo +example: BLOCK_DNS_over_TLS + +{yes-icon} {ecs-ref}[ECS] field. -- -*`agent.type`*:: +*`rule.reference`*:: + -- -Type of the agent. -The agent type stays always the same and should be given by the agent used. In case of Filebeat the agent would always be Filebeat also if two Filebeat instances are run on the same machine. +Reference URL to additional information about the rule used to generate this event. +The URL can point to the vendor's documentation about the rule. If that's not available, it can also be a link to a more general page describing this type of alert. type: keyword -example: filebeat +example: https://en.wikipedia.org/wiki/DNS_over_TLS + +{yes-icon} {ecs-ref}[ECS] field. -- -*`agent.version`*:: +*`rule.ruleset`*:: + -- -Version of the agent. +Name of the ruleset, policy, group, or parent category in which the rule used to generate this event is a member. type: keyword -example: 6.0.0-rc2 - --- - -[float] -=== as +example: Standard_Protocol_Filters -An autonomous system (AS) is a collection of connected Internet Protocol (IP) routing prefixes under the control of one or more network operators on behalf of a single administrative entity or domain that presents a common, clearly defined routing policy to the internet. +{yes-icon} {ecs-ref}[ECS] field. +-- -*`as.number`*:: +*`rule.uuid`*:: + -- -Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. +A rule ID that is unique within the scope of a set or group of agents, observers, or other entities using the rule for detection of this event. -type: long +type: keyword -example: 15169 +example: 1100110011 + +{yes-icon} {ecs-ref}[ECS] field. -- -*`as.organization.name`*:: +*`rule.version`*:: + -- -Organization name. +The version / revision of the rule being used for analysis. type: keyword -example: Google LLC - --- +example: 1.1 -*`as.organization.name.text`*:: -+ --- -type: text +{yes-icon} {ecs-ref}[ECS] field. -- [float] -=== client +=== server -A client is defined as the initiator of a network connection for events regarding sessions, connections, or bidirectional flow records. -For TCP events, the client is the initiator of the TCP connection that sends the SYN packet(s). For other protocols, the client is generally the initiator or requestor in the network transaction. Some systems use the term "originator" to refer the client in TCP connections. The client fields describe details about the system acting as the client in the network event. Client fields are usually populated in conjunction with server fields. Client fields are generally not populated for packet-level events. +A Server is defined as the responder in a network connection for events regarding sessions, connections, or bidirectional flow records. +For TCP events, the server is the receiver of the initial SYN packet(s) of the TCP connection. For other protocols, the server is generally the responder in the network transaction. Some systems actually use the term "responder" to refer the server in TCP connections. The server fields describe details about the system acting as the server in the network event. Server fields are usually populated in conjunction with client fields. Server fields are generally not populated for packet-level events. Client / server representations can add semantic context to an exchange, which is helpful to visualize the data in certain situations. If your context falls in that category, you should still ensure that source and destination are filled appropriately. -*`client.address`*:: +*`server.address`*:: + -- -Some event client addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. +Some event server addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.as.number`*:: +*`server.as.number`*:: + -- Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. @@ -2225,9 +15218,11 @@ type: long example: 15169 +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.as.organization.name`*:: +*`server.as.organization.name`*:: + -- Organization name. @@ -2236,19 +15231,21 @@ type: keyword example: Google LLC +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.as.organization.name.text`*:: +*`server.as.organization.name.text`*:: + -- type: text -- -*`client.bytes`*:: +*`server.bytes`*:: + -- -Bytes sent from the client to the server. +Bytes sent from the server to the client. type: long @@ -2256,18 +15253,22 @@ example: 184 format: bytes +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.domain`*:: +*`server.domain`*:: + -- -Client domain. +Server domain. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.geo.city_name`*:: +*`server.geo.city_name`*:: + -- City name. @@ -2276,9 +15277,22 @@ type: keyword example: Montreal +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.geo.continent_name`*:: +*`server.geo.continent_code`*:: ++ +-- +Two-letter code representing continent's name. + +type: keyword + +example: NA + +-- + +*`server.geo.continent_name`*:: + -- Name of the continent. @@ -2287,9 +15301,11 @@ type: keyword example: North America +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.geo.country_iso_code`*:: +*`server.geo.country_iso_code`*:: + -- Country ISO code. @@ -2298,9 +15314,11 @@ type: keyword example: CA +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.geo.country_name`*:: +*`server.geo.country_name`*:: + -- Country name. @@ -2309,9 +15327,11 @@ type: keyword example: Canada +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.geo.location`*:: +*`server.geo.location`*:: + -- Longitude and latitude. @@ -2320,9 +15340,11 @@ type: geo_point example: { "lon": -73.614830, "lat": 45.505918 } +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.geo.name`*:: +*`server.geo.name`*:: + -- User-defined description of a location, at the level of granularity they care about. @@ -2333,9 +15355,23 @@ type: keyword example: boston-dc +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.geo.region_iso_code`*:: +*`server.geo.postal_code`*:: ++ +-- +Postal code associated with the location. +Values appropriate for this field may also be known as a postcode or ZIP code and will vary widely from country to country. + +type: keyword + +example: 94040 + +-- + +*`server.geo.region_iso_code`*:: + -- Region ISO code. @@ -2344,9 +15380,11 @@ type: keyword example: CA-QC +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.geo.region_name`*:: +*`server.geo.region_name`*:: + -- Region name. @@ -2355,97 +15393,140 @@ type: keyword example: Quebec +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.ip`*:: +*`server.geo.timezone`*:: + -- -IP address of the client. -Can be one or multiple IPv4 or IPv6 addresses. +The time zone of the location, such as IANA time zone name. + +type: keyword + +example: America/Argentina/Buenos_Aires + +-- + +*`server.ip`*:: ++ +-- +IP address of the server (IPv4 or IPv6). type: ip +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.mac`*:: +*`server.mac`*:: + -- -MAC address of the client. +MAC address of the server. +The notation format from RFC 7042 is suggested: Each octet (that is, 8-bit byte) is represented by two [uppercase] hexadecimal digits giving the value of the octet as an unsigned integer. Successive octets are separated by a hyphen. type: keyword +example: 00-00-5E-00-53-23 + +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.nat.ip`*:: +*`server.nat.ip`*:: + -- -Translated IP of source based NAT sessions (e.g. internal client to internet). -Typically connections traversing load balancers, firewalls, or routers. +Translated ip of destination based NAT sessions (e.g. internet to private DMZ) +Typically used with load balancers, firewalls, or routers. type: ip +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.nat.port`*:: +*`server.nat.port`*:: + -- -Translated port of source based NAT sessions (e.g. internal client to internet). -Typically connections traversing load balancers, firewalls, or routers. +Translated port of destination based NAT sessions (e.g. internet to private DMZ) +Typically used with load balancers, firewalls, or routers. type: long format: string +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.packets`*:: +*`server.packets`*:: + -- -Packets sent from the client to the server. +Packets sent from the server to the client. type: long example: 12 +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.port`*:: +*`server.port`*:: ++ +-- +Port of the server. + +type: long + +format: string + +{yes-icon} {ecs-ref}[ECS] field. + +-- + +*`server.registered_domain`*:: + -- -Port of the client. +The highest registered server domain, stripped of the subdomain. +For example, the registered domain for "foo.example.com" is "example.com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". -type: long +type: keyword -format: string +example: example.com + +{yes-icon} {ecs-ref}[ECS] field. -- -*`client.registered_domain`*:: +*`server.subdomain`*:: + -- -The highest registered client domain, stripped of the subdomain. -For example, the registered domain for "foo.google.com" is "google.com". -This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". +The subdomain portion of a fully qualified domain name includes all of the names except the host name under the registered_domain. In a partially qualified domain, or if the the qualification level of the full name cannot be determined, subdomain contains all of the names below the registered domain. +For example the subdomain portion of "www.east.mydomain.co.uk" is "east". If the domain has multiple levels of subdomain, such as "sub2.sub1.example.com", the subdomain field should contain "sub2.sub1", with no trailing period. type: keyword -example: google.com +example: east -- -*`client.top_level_domain`*:: +*`server.top_level_domain`*:: + -- -The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for google.com is "com". +The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for example.com is "com". This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last label will not work well for effective TLDs such as "co.uk". type: keyword example: co.uk +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.user.domain`*:: +*`server.user.domain`*:: + -- Name of the directory the user is a member of. @@ -2453,18 +15534,22 @@ For example, an LDAP or Active Directory domain name. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.user.email`*:: +*`server.user.email`*:: + -- User email address. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.user.full_name`*:: +*`server.user.full_name`*:: + -- User's full name, if available. @@ -2473,16 +15558,18 @@ type: keyword example: Albert Einstein +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.user.full_name.text`*:: +*`server.user.full_name.text`*:: + -- type: text -- -*`client.user.group.domain`*:: +*`server.user.group.domain`*:: + -- Name of the directory the group is a member of. @@ -2490,27 +15577,33 @@ For example, an LDAP or Active Directory domain name. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.user.group.id`*:: +*`server.user.group.id`*:: + -- Unique identifier for the group on the system/platform. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.user.group.name`*:: +*`server.user.group.name`*:: + -- Name of the group. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.user.hash`*:: +*`server.user.hash`*:: + -- Unique user hash to correlate information for a user in anonymized form. @@ -2518,18 +15611,22 @@ Useful if `user.id` or `user.name` contain confidential information and cannot b type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.user.id`*:: +*`server.user.id`*:: + -- -Unique identifiers of the user. +Unique identifier of the user. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.user.name`*:: +*`server.user.name`*:: + -- Short name or login of the user. @@ -2538,242 +15635,156 @@ type: keyword example: albert +{yes-icon} {ecs-ref}[ECS] field. + -- -*`client.user.name.text`*:: +*`server.user.name.text`*:: + -- type: text -- -[float] -=== cloud - -Fields related to the cloud or infrastructure the events are coming from. - - -*`cloud.account.id`*:: +*`server.user.roles`*:: + -- -The cloud account or organization id used to identify different entities in a multi-tenant environment. -Examples: AWS account id, Google Cloud ORG Id, or other unique identifier. +Array of user roles at the time of the event. type: keyword -example: 666777888999 +example: ["kibana_admin", "reporting_user"] --- +{yes-icon} {ecs-ref}[ECS] field. -*`cloud.availability_zone`*:: -+ -- -Availability zone in which this host is running. -type: keyword +[float] +=== service -example: us-east-1c +The service fields describe the service for or from which the data was collected. +These fields help you find and correlate logs for a specific service and version. --- -*`cloud.instance.id`*:: +*`service.ephemeral_id`*:: + -- -Instance ID of the host machine. +Ephemeral identifier of this service (if one exists). +This id normally changes across restarts, but `service.id` does not. type: keyword -example: i-1234567890abcdef0 - --- - -*`cloud.instance.name`*:: -+ --- -Instance name of the host machine. +example: 8a4f500f -type: keyword +{yes-icon} {ecs-ref}[ECS] field. -- -*`cloud.machine.type`*:: +*`service.id`*:: + -- -Machine type of the host machine. +Unique identifier of the running service. If the service is comprised of many nodes, the `service.id` should be the same for all nodes. +This id should uniquely identify the service. This makes it possible to correlate logs and metrics for one specific service, no matter which particular node emitted the event. +Note that if you need to see the events from one specific host of the service, you should filter on that `host.name` or `host.id` instead. type: keyword -example: t2.medium - --- - -*`cloud.provider`*:: -+ --- -Name of the cloud provider. Example values are aws, azure, gcp, or digitalocean. - -type: keyword +example: d37e5ebfe0ae6c4972dbe9f0174a1637bb8247f6 -example: aws +{yes-icon} {ecs-ref}[ECS] field. -- -*`cloud.region`*:: +*`service.name`*:: + -- -Region in which this host is running. +Name of the service data is collected from. +The name of the service is normally user given. This allows for distributed services that run on multiple hosts to correlate the related instances based on the name. +In the case of Elasticsearch the `service.name` could contain the cluster name. For Beats the `service.name` is by default a copy of the `service.type` field if no name is specified. type: keyword -example: us-east-1 - --- - -[float] -=== code_signature - -These fields contain information about binary code signatures. - - -*`code_signature.exists`*:: -+ --- -Boolean to capture if a signature is present. - -type: boolean - -example: true - --- - -*`code_signature.status`*:: -+ --- -Additional information about the certificate status. -This is useful for logging cryptographic errors with the certificate validity or trust status. Leave unpopulated if the validity or trust of the certificate was unchecked. - -type: keyword +example: elasticsearch-metrics -example: ERROR_UNTRUSTED_ROOT +{yes-icon} {ecs-ref}[ECS] field. -- -*`code_signature.subject_name`*:: +*`service.node.name`*:: + -- -Subject name of the code signer +Name of a service node. +This allows for two nodes of the same service running on the same host to be differentiated. Therefore, `service.node.name` should typically be unique across nodes of a given service. +In the case of Elasticsearch, the `service.node.name` could contain the unique node name within the Elasticsearch cluster. In cases where the service doesn't have the concept of a node name, the host name or container name can be used to distinguish running instances that make up this service. If those do not provide uniqueness (e.g. multiple instances of the service running on the same host) - the node name can be manually set. type: keyword -example: Microsoft Corporation - --- - -*`code_signature.trusted`*:: -+ --- -Stores the trust status of the certificate chain. -Validating the trust of the certificate chain may be complicated, and this field should only be populated by tools that actively check the status. - -type: boolean - -example: true - --- - -*`code_signature.valid`*:: -+ --- -Boolean to capture if the digital signature is verified against the binary content. -Leave unpopulated if a certificate was unchecked. - -type: boolean +example: instance-0000000016 -example: true +{yes-icon} {ecs-ref}[ECS] field. -- -[float] -=== container - -Container fields are used for meta information about the specific container that is the source of information. -These fields help correlate data based containers from any runtime. - - -*`container.id`*:: +*`service.state`*:: + -- -Unique container id. +Current state of the service. type: keyword --- - -*`container.image.name`*:: -+ --- -Name of the image the container was built on. - -type: keyword +{yes-icon} {ecs-ref}[ECS] field. -- -*`container.image.tag`*:: +*`service.type`*:: + -- -Container image tags. +The type of the service data is collected from. +The type can be used to group and correlate logs and metrics from one service type. +Example: If logs or metrics are collected from Elasticsearch, `service.type` would be `elasticsearch`. type: keyword --- - -*`container.labels`*:: -+ --- -Image labels. +example: elasticsearch -type: object +{yes-icon} {ecs-ref}[ECS] field. -- -*`container.name`*:: +*`service.version`*:: + -- -Container name. +Version of the service the data was collected from. +This allows to look at a data set only for a specific version of a service. type: keyword --- - -*`container.runtime`*:: -+ --- -Runtime managing this container. - -type: keyword +example: 3.2.4 -example: docker +{yes-icon} {ecs-ref}[ECS] field. -- [float] -=== destination +=== source -Destination fields describe details about the destination of a packet/event. -Destination fields are usually populated in conjunction with source fields. +Source fields capture details about the sender of a network exchange/packet. These fields are populated from a network event, packet, or other event containing details of a network transaction. +Source fields are usually populated in conjunction with destination fields. The source and destination fields are considered the baseline and should always be filled if an event contains source and destination details from a network transaction. If the event also contains identification of the client and server roles, then the client and server fields should also be populated. -*`destination.address`*:: +*`source.address`*:: + -- -Some event destination addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. +Some event source addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.as.number`*:: +*`source.as.number`*:: + -- Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. @@ -2782,9 +15793,11 @@ type: long example: 15169 +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.as.organization.name`*:: +*`source.as.organization.name`*:: + -- Organization name. @@ -2793,19 +15806,21 @@ type: keyword example: Google LLC +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.as.organization.name.text`*:: +*`source.as.organization.name.text`*:: + -- type: text -- -*`destination.bytes`*:: +*`source.bytes`*:: + -- -Bytes sent from the destination to the source. +Bytes sent from the source to the destination. type: long @@ -2813,18 +15828,22 @@ example: 184 format: bytes +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.domain`*:: +*`source.domain`*:: + -- -Destination domain. +Source domain. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.geo.city_name`*:: +*`source.geo.city_name`*:: + -- City name. @@ -2833,9 +15852,22 @@ type: keyword example: Montreal +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.geo.continent_name`*:: +*`source.geo.continent_code`*:: ++ +-- +Two-letter code representing continent's name. + +type: keyword + +example: NA + +-- + +*`source.geo.continent_name`*:: + -- Name of the continent. @@ -2844,9 +15876,11 @@ type: keyword example: North America +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.geo.country_iso_code`*:: +*`source.geo.country_iso_code`*:: + -- Country ISO code. @@ -2855,9 +15889,11 @@ type: keyword example: CA +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.geo.country_name`*:: +*`source.geo.country_name`*:: + -- Country name. @@ -2866,9 +15902,11 @@ type: keyword example: Canada +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.geo.location`*:: +*`source.geo.location`*:: + -- Longitude and latitude. @@ -2877,9 +15915,11 @@ type: geo_point example: { "lon": -73.614830, "lat": 45.505918 } +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.geo.name`*:: +*`source.geo.name`*:: + -- User-defined description of a location, at the level of granularity they care about. @@ -2890,9 +15930,23 @@ type: keyword example: boston-dc +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.geo.region_iso_code`*:: +*`source.geo.postal_code`*:: ++ +-- +Postal code associated with the location. +Values appropriate for this field may also be known as a postcode or ZIP code and will vary widely from country to country. + +type: keyword + +example: 94040 + +-- + +*`source.geo.region_iso_code`*:: + -- Region ISO code. @@ -2901,9 +15955,11 @@ type: keyword example: CA-QC +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.geo.region_name`*:: +*`source.geo.region_name`*:: + -- Region name. @@ -2912,97 +15968,140 @@ type: keyword example: Quebec +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.ip`*:: +*`source.geo.timezone`*:: ++ +-- +The time zone of the location, such as IANA time zone name. + +type: keyword + +example: America/Argentina/Buenos_Aires + +-- + +*`source.ip`*:: + -- -IP address of the destination. -Can be one or multiple IPv4 or IPv6 addresses. +IP address of the source (IPv4 or IPv6). type: ip +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.mac`*:: +*`source.mac`*:: + -- -MAC address of the destination. +MAC address of the source. +The notation format from RFC 7042 is suggested: Each octet (that is, 8-bit byte) is represented by two [uppercase] hexadecimal digits giving the value of the octet as an unsigned integer. Successive octets are separated by a hyphen. type: keyword +example: 00-00-5E-00-53-23 + +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.nat.ip`*:: +*`source.nat.ip`*:: + -- -Translated ip of destination based NAT sessions (e.g. internet to private DMZ) -Typically used with load balancers, firewalls, or routers. +Translated ip of source based NAT sessions (e.g. internal client to internet) +Typically connections traversing load balancers, firewalls, or routers. type: ip +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.nat.port`*:: +*`source.nat.port`*:: + -- -Port the source session is translated to by NAT Device. +Translated port of source based NAT sessions. (e.g. internal client to internet) Typically used with load balancers, firewalls, or routers. type: long format: string +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.packets`*:: +*`source.packets`*:: + -- -Packets sent from the destination to the source. +Packets sent from the source to the destination. type: long example: 12 +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.port`*:: +*`source.port`*:: + -- -Port of the destination. +Port of the source. type: long format: string +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.registered_domain`*:: +*`source.registered_domain`*:: + -- -The highest registered destination domain, stripped of the subdomain. -For example, the registered domain for "foo.google.com" is "google.com". +The highest registered source domain, stripped of the subdomain. +For example, the registered domain for "foo.example.com" is "example.com". This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". type: keyword -example: google.com +example: example.com + +{yes-icon} {ecs-ref}[ECS] field. -- -*`destination.top_level_domain`*:: +*`source.subdomain`*:: ++ +-- +The subdomain portion of a fully qualified domain name includes all of the names except the host name under the registered_domain. In a partially qualified domain, or if the the qualification level of the full name cannot be determined, subdomain contains all of the names below the registered domain. +For example the subdomain portion of "www.east.mydomain.co.uk" is "east". If the domain has multiple levels of subdomain, such as "sub2.sub1.example.com", the subdomain field should contain "sub2.sub1", with no trailing period. + +type: keyword + +example: east + +-- + +*`source.top_level_domain`*:: + -- -The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for google.com is "com". +The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for example.com is "com". This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last label will not work well for effective TLDs such as "co.uk". type: keyword example: co.uk +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.user.domain`*:: +*`source.user.domain`*:: + -- Name of the directory the user is a member of. @@ -3010,18 +16109,22 @@ For example, an LDAP or Active Directory domain name. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.user.email`*:: +*`source.user.email`*:: + -- User email address. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.user.full_name`*:: +*`source.user.full_name`*:: + -- User's full name, if available. @@ -3030,16 +16133,18 @@ type: keyword example: Albert Einstein +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.user.full_name.text`*:: +*`source.user.full_name.text`*:: + -- type: text -- -*`destination.user.group.domain`*:: +*`source.user.group.domain`*:: + -- Name of the directory the group is a member of. @@ -3047,27 +16152,33 @@ For example, an LDAP or Active Directory domain name. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.user.group.id`*:: +*`source.user.group.id`*:: + -- Unique identifier for the group on the system/platform. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.user.group.name`*:: +*`source.user.group.name`*:: + -- Name of the group. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.user.hash`*:: +*`source.user.hash`*:: + -- Unique user hash to correlate information for a user in anonymized form. @@ -3075,18 +16186,22 @@ Useful if `user.id` or `user.name` contain confidential information and cannot b type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.user.id`*:: +*`source.user.id`*:: + -- -Unique identifiers of the user. +Unique identifier of the user. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.user.name`*:: +*`source.user.name`*:: + -- Short name or login of the user. @@ -3095,961 +16210,861 @@ type: keyword example: albert +{yes-icon} {ecs-ref}[ECS] field. + -- -*`destination.user.name.text`*:: +*`source.user.name.text`*:: + -- type: text -- -[float] -=== dll - -These fields contain information about code libraries dynamically loaded into processes. - -Many operating systems refer to "shared code libraries" with different names, but this field set refers to all of the following: -* Dynamic-link library (`.dll`) commonly used on Windows -* Shared Object (`.so`) commonly used on Unix-like operating systems -* Dynamic library (`.dylib`) commonly used on macOS - - -*`dll.code_signature.exists`*:: +*`source.user.roles`*:: + -- -Boolean to capture if a signature is present. +Array of user roles at the time of the event. -type: boolean +type: keyword -example: true +example: ["kibana_admin", "reporting_user"] --- +{yes-icon} {ecs-ref}[ECS] field. -*`dll.code_signature.status`*:: -+ -- -Additional information about the certificate status. -This is useful for logging cryptographic errors with the certificate validity or trust status. Leave unpopulated if the validity or trust of the certificate was unchecked. -type: keyword +[float] +=== threat -example: ERROR_UNTRUSTED_ROOT +Fields to classify events and alerts according to a threat taxonomy such as the MITRE ATT&CK® framework. +These fields are for users to classify alerts from all of their sources (e.g. IDS, NGFW, etc.) within a common taxonomy. The threat.tactic.* are meant to capture the high level category of the threat (e.g. "impact"). The threat.technique.* fields are meant to capture which kind of approach is used by this detected threat, to accomplish the goal (e.g. "endpoint denial of service"). --- -*`dll.code_signature.subject_name`*:: +*`threat.enrichments`*:: + -- -Subject name of the code signer - -type: keyword +A list of associated indicators objects enriching the event, and the context of that association/enrichment. -example: Microsoft Corporation +type: nested -- -*`dll.code_signature.trusted`*:: +*`threat.enrichments.indicator`*:: + -- -Stores the trust status of the certificate chain. -Validating the trust of the certificate chain may be complicated, and this field should only be populated by tools that actively check the status. +Object containing associated indicators enriching the event. -type: boolean - -example: true +type: object -- -*`dll.code_signature.valid`*:: +*`threat.enrichments.indicator.as.number`*:: + -- -Boolean to capture if the digital signature is verified against the binary content. -Leave unpopulated if a certificate was unchecked. +Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. -type: boolean +type: long -example: true +example: 15169 -- -*`dll.hash.md5`*:: +*`threat.enrichments.indicator.as.organization.name`*:: + -- -MD5 hash. +Organization name. type: keyword +example: Google LLC + -- -*`dll.hash.sha1`*:: +*`threat.enrichments.indicator.as.organization.name.text`*:: + -- -SHA1 hash. - -type: keyword +type: text -- -*`dll.hash.sha256`*:: +*`threat.enrichments.indicator.confidence`*:: + -- -SHA256 hash. +Identifies the confidence rating assigned by the provider using STIX confidence scales. Expected values: + * Not Specified, None, Low, Medium, High + * 0-10 + * Admirality Scale (1-6) + * DNI Scale (5-95) + * WEP Scale (Impossible - Certain) type: keyword +example: High + -- -*`dll.hash.sha512`*:: +*`threat.enrichments.indicator.description`*:: + -- -SHA512 hash. +Describes the type of action conducted by the threat. type: keyword +example: IP x.x.x.x was observed delivering the Angler EK. + -- -*`dll.name`*:: +*`threat.enrichments.indicator.email.address`*:: + -- -Name of the library. -This generally maps to the name of the file on disk. +Identifies a threat indicator as an email address (irrespective of direction). type: keyword -example: kernel32.dll +example: phish@example.com -- -*`dll.path`*:: +*`threat.enrichments.indicator.file.accessed`*:: + -- -Full file path of the library. - -type: keyword +Last time the file was accessed. +Note that not all filesystems keep track of access time. -example: C:\Windows\System32\kernel32.dll +type: date -- -*`dll.pe.company`*:: +*`threat.enrichments.indicator.file.attributes`*:: + -- -Internal company name of the file, provided at compile-time. +Array of file attributes. +Attributes names will vary by platform. Here's a non-exhaustive list of values that are expected in this field: archive, compressed, directory, encrypted, execute, hidden, read, readonly, system, write. type: keyword -example: Microsoft Corporation +example: ["readonly", "system"] -- -*`dll.pe.description`*:: +*`threat.enrichments.indicator.file.code_signature.exists`*:: + -- -Internal description of the file, provided at compile-time. +Boolean to capture if a signature is present. -type: keyword +type: boolean -example: Paint +example: true -- -*`dll.pe.file_version`*:: +*`threat.enrichments.indicator.file.code_signature.signing_id`*:: + -- -Internal version of the file, provided at compile-time. +The identifier used to sign the process. +This is used to identify the application manufactured by a software vendor. The field is relevant to Apple *OS only. type: keyword -example: 6.3.9600.17415 +example: com.apple.xpc.proxy -- -*`dll.pe.original_file_name`*:: +*`threat.enrichments.indicator.file.code_signature.status`*:: + -- -Internal name of the file, provided at compile-time. +Additional information about the certificate status. +This is useful for logging cryptographic errors with the certificate validity or trust status. Leave unpopulated if the validity or trust of the certificate was unchecked. type: keyword -example: MSPAINT.EXE +example: ERROR_UNTRUSTED_ROOT -- -*`dll.pe.product`*:: +*`threat.enrichments.indicator.file.code_signature.subject_name`*:: + -- -Internal product name of the file, provided at compile-time. +Subject name of the code signer type: keyword -example: Microsoft® Windows® Operating System +example: Microsoft Corporation -- -[float] -=== dns - -Fields describing DNS queries and answers. -DNS events should either represent a single DNS query prior to getting answers (`dns.type:query`) or they should represent a full exchange and contain the query details as well as all of the answers that were provided for this query (`dns.type:answer`). - - -*`dns.answers`*:: +*`threat.enrichments.indicator.file.code_signature.team_id`*:: + -- -An array containing an object for each answer section returned by the server. -The main keys that should be present in these objects are defined by ECS. Records that have more information may contain more keys than what ECS defines. -Not all DNS data sources give all details about DNS answers. At minimum, answer objects must contain the `data` key. If more information is available, map as much of it to ECS as possible, and add any additional fields to the answer objects as custom fields. +The team identifier used to sign the process. +This is used to identify the team or vendor of a software product. The field is relevant to Apple *OS only. -type: object +type: keyword + +example: EQHXZ8M8AV -- -*`dns.answers.class`*:: +*`threat.enrichments.indicator.file.code_signature.trusted`*:: + -- -The class of DNS data contained in this resource record. +Stores the trust status of the certificate chain. +Validating the trust of the certificate chain may be complicated, and this field should only be populated by tools that actively check the status. -type: keyword +type: boolean -example: IN +example: true -- -*`dns.answers.data`*:: +*`threat.enrichments.indicator.file.code_signature.valid`*:: + -- -The data describing the resource. -The meaning of this data depends on the type and class of the resource record. +Boolean to capture if the digital signature is verified against the binary content. +Leave unpopulated if a certificate was unchecked. -type: keyword +type: boolean -example: 10.10.10.10 +example: true -- -*`dns.answers.name`*:: +*`threat.enrichments.indicator.file.created`*:: + -- -The domain name to which this resource record pertains. -If a chain of CNAME is being resolved, each answer's `name` should be the one that corresponds with the answer's `data`. It should not simply be the original `question.name` repeated. - -type: keyword +File creation time. +Note that not all filesystems store the creation time. -example: www.google.com +type: date -- -*`dns.answers.ttl`*:: +*`threat.enrichments.indicator.file.ctime`*:: + -- -The time interval in seconds that this resource record may be cached before it should be discarded. Zero values mean that the data should not be cached. - -type: long +Last time the file attributes or metadata changed. +Note that changes to the file content will update `mtime`. This implies `ctime` will be adjusted at the same time, since `mtime` is an attribute of the file. -example: 180 +type: date -- -*`dns.answers.type`*:: +*`threat.enrichments.indicator.file.device`*:: + -- -The type of data contained in this resource record. +Device that is the source of the file. type: keyword -example: CNAME +example: sda -- -*`dns.header_flags`*:: +*`threat.enrichments.indicator.file.directory`*:: + -- -Array of 2 letter DNS header flags. -Expected values are: AA, TC, RD, RA, AD, CD, DO. +Directory where the file is located. It should include the drive letter, when appropriate. type: keyword -example: ['RD', 'RA'] +example: /home/alice -- -*`dns.id`*:: +*`threat.enrichments.indicator.file.drive_letter`*:: + -- -The DNS packet identifier assigned by the program that generated the query. The identifier is copied to the response. +Drive letter where the file is located. This field is only relevant on Windows. +The value should be uppercase, and not include the colon. type: keyword -example: 62111 +example: C -- -*`dns.op_code`*:: +*`threat.enrichments.indicator.file.elf.architecture`*:: + -- -The DNS operation code that specifies the kind of query in the message. This value is set by the originator of a query and copied into the response. +Machine architecture of the ELF file. type: keyword -example: QUERY +example: x86-64 -- -*`dns.question.class`*:: +*`threat.enrichments.indicator.file.elf.byte_order`*:: + -- -The class of records being queried. +Byte sequence of ELF file. type: keyword -example: IN +example: Little Endian -- -*`dns.question.name`*:: +*`threat.enrichments.indicator.file.elf.cpu_type`*:: + -- -The name being queried. -If the name field contains non-printable characters (below 32 or above 126), those characters should be represented as escaped base 10 integers (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, and line feeds should be converted to \t, \r, and \n respectively. +CPU type of the ELF file. type: keyword -example: www.google.com +example: Intel -- -*`dns.question.registered_domain`*:: +*`threat.enrichments.indicator.file.elf.creation_date`*:: + -- -The highest registered domain, stripped of the subdomain. -For example, the registered domain for "foo.google.com" is "google.com". -This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". - -type: keyword +Extracted when possible from the file's metadata. Indicates when it was built or compiled. It can also be faked by malware creators. -example: google.com +type: date -- -*`dns.question.subdomain`*:: +*`threat.enrichments.indicator.file.elf.exports`*:: + -- -The subdomain is all of the labels under the registered_domain. -If the domain has multiple levels of subdomain, such as "sub2.sub1.example.com", the subdomain field should contain "sub2.sub1", with no trailing period. +List of exported element names and types. -type: keyword - -example: www +type: flattened -- -*`dns.question.top_level_domain`*:: +*`threat.enrichments.indicator.file.elf.header.abi_version`*:: + -- -The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for google.com is "com". -This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last label will not work well for effective TLDs such as "co.uk". +Version of the ELF Application Binary Interface (ABI). type: keyword -example: co.uk - -- -*`dns.question.type`*:: +*`threat.enrichments.indicator.file.elf.header.class`*:: + -- -The type of record being queried. +Header class of the ELF file. type: keyword -example: AAAA - -- -*`dns.resolved_ip`*:: +*`threat.enrichments.indicator.file.elf.header.data`*:: + -- -Array containing all IPs seen in `answers.data`. -The `answers` array can be difficult to use, because of the variety of data formats it can contain. Extracting all IP addresses seen in there to `dns.resolved_ip` makes it possible to index them as IP addresses, and makes them easier to visualize and query for. - -type: ip +Data table of the ELF header. -example: ['10.10.10.10', '10.10.10.11'] +type: keyword -- -*`dns.response_code`*:: +*`threat.enrichments.indicator.file.elf.header.entrypoint`*:: + -- -The DNS response code. +Header entrypoint of the ELF file. -type: keyword +type: long -example: NOERROR +format: string -- -*`dns.type`*:: +*`threat.enrichments.indicator.file.elf.header.object_version`*:: + -- -The type of DNS event captured, query or answer. -If your source of DNS events only gives you DNS queries, you should only create dns events of type `dns.type:query`. -If your source of DNS events gives you answers as well, you should create one event per query (optionally as soon as the query is seen). And a second event containing all query details as well as an array of answers. +"0x1" for original ELF files. type: keyword -example: answer - -- -[float] -=== ecs - -Meta-information specific to ECS. - - -*`ecs.version`*:: +*`threat.enrichments.indicator.file.elf.header.os_abi`*:: + -- -ECS version this event conforms to. `ecs.version` is a required field and must exist in all events. -When querying across multiple indices -- which may conform to slightly different ECS versions -- this field lets integrations adjust to the schema version of the events. +Application Binary Interface (ABI) of the Linux OS. type: keyword -example: 1.0.0 - -required: True - -- -[float] -=== error - -These fields can represent errors of any kind. -Use them for errors that happen while fetching events or in cases where the event itself contains an error. - - -*`error.code`*:: +*`threat.enrichments.indicator.file.elf.header.type`*:: + -- -Error code describing the error. +Header type of the ELF file. type: keyword -- -*`error.id`*:: +*`threat.enrichments.indicator.file.elf.header.version`*:: + -- -Unique identifier for the error. +Version of the ELF header. type: keyword -- -*`error.message`*:: +*`threat.enrichments.indicator.file.elf.imports`*:: + -- -Error message. +List of imported element names and types. -type: text +type: flattened -- -*`error.stack_trace`*:: +*`threat.enrichments.indicator.file.elf.sections`*:: + -- -The stack trace of this error in plain text. +An array containing an object for each section of the ELF file. +The keys that should be present in these objects are defined by sub-fields underneath `elf.sections.*`. -type: keyword +type: nested -- -*`error.stack_trace.text`*:: +*`threat.enrichments.indicator.file.elf.sections.chi2`*:: + -- -type: text +Chi-square probability distribution of the section. + +type: long + +format: number -- -*`error.type`*:: +*`threat.enrichments.indicator.file.elf.sections.entropy`*:: + -- -The type of the error, for example the class name of the exception. +Shannon entropy calculation from the section. -type: keyword +type: long -example: java.lang.NullPointerException +format: number -- -[float] -=== event - -The event fields are used for context information about the log or metric event itself. -A log is defined as an event containing details of something that happened. Log events must include the time at which the thing happened. Examples of log events include a process starting on a host, a network packet being sent from a source to a destination, or a network connection between a client and a server being initiated or closed. A metric is defined as an event containing one or more numerical measurements and the time at which the measurement was taken. Examples of metric events include memory pressure measured on a host and device temperature. See the `event.kind` definition in this section for additional details about metric and state events. - - -*`event.action`*:: +*`threat.enrichments.indicator.file.elf.sections.flags`*:: + -- -The action captured by the event. -This describes the information in the event. It is more specific than `event.category`. Examples are `group-add`, `process-started`, `file-created`. The value is normally defined by the implementer. +ELF Section List flags. type: keyword -example: user-password-change - -- -*`event.category`*:: +*`threat.enrichments.indicator.file.elf.sections.name`*:: + -- -This is one of four ECS Categorization Fields, and indicates the second level in the ECS category hierarchy. -`event.category` represents the "big buckets" of ECS categories. For example, filtering on `event.category:process` yields all events relating to process activity. This field is closely related to `event.type`, which is used as a subcategory. -This field is an array. This will allow proper categorization of some events that fall in multiple categories. +ELF Section List name. type: keyword -example: authentication - -- -*`event.code`*:: +*`threat.enrichments.indicator.file.elf.sections.physical_offset`*:: + -- -Identification code for this event, if one exists. -Some event sources use event codes to identify messages unambiguously, regardless of message language or wording adjustments over time. An example of this is the Windows Event ID. +ELF Section List offset. type: keyword -example: 4648 - -- -*`event.created`*:: +*`threat.enrichments.indicator.file.elf.sections.physical_size`*:: + -- -event.created contains the date/time when the event was first read by an agent, or by your pipeline. -This field is distinct from @timestamp in that @timestamp typically contain the time extracted from the original event. -In most situations, these two timestamps will be slightly different. The difference can be used to calculate the delay between your source generating an event, and the time when your agent first processed it. This can be used to monitor your agent's or pipeline's ability to keep up with your event source. -In case the two timestamps are identical, @timestamp should be used. +ELF Section List physical size. -type: date +type: long -example: 2016-05-23T08:05:34.857Z +format: bytes -- -*`event.dataset`*:: +*`threat.enrichments.indicator.file.elf.sections.type`*:: + -- -Name of the dataset. -If an event source publishes more than one type of log or events (e.g. access log, error log), the dataset is used to specify which one the event comes from. -It's recommended but not required to start the dataset name with the module name, followed by a dot, then the dataset name. +ELF Section List type. type: keyword -example: apache.access - -- -*`event.duration`*:: +*`threat.enrichments.indicator.file.elf.sections.virtual_address`*:: + -- -Duration of the event in nanoseconds. -If event.start and event.end are known this value should be the difference between the end and start time. +ELF Section List virtual address. type: long -format: duration +format: string -- -*`event.end`*:: +*`threat.enrichments.indicator.file.elf.sections.virtual_size`*:: + -- -event.end contains the date when the event ended or when the activity was last observed. +ELF Section List virtual size. -type: date +type: long + +format: string -- -*`event.hash`*:: +*`threat.enrichments.indicator.file.elf.segments`*:: + -- -Hash (perhaps logstash fingerprint) of raw field to be able to demonstrate log integrity. +An array containing an object for each segment of the ELF file. +The keys that should be present in these objects are defined by sub-fields underneath `elf.segments.*`. -type: keyword - -example: 123456789012345678901234567890ABCD +type: nested -- -*`event.id`*:: +*`threat.enrichments.indicator.file.elf.segments.sections`*:: + -- -Unique ID to describe the event. +ELF object segment sections. type: keyword -example: 8a4f500d - -- -*`event.ingested`*:: +*`threat.enrichments.indicator.file.elf.segments.type`*:: + -- -Timestamp when an event arrived in the central data store. -This is different from `@timestamp`, which is when the event originally occurred. It's also different from `event.created`, which is meant to capture the first time an agent saw the event. -In normal conditions, assuming no tampering, the timestamps should chronologically look like this: `@timestamp` < `event.created` < `event.ingested`. - -type: date +ELF object segment type. -example: 2016-05-23T08:05:35.101Z +type: keyword -- -*`event.kind`*:: +*`threat.enrichments.indicator.file.elf.shared_libraries`*:: + -- -This is one of four ECS Categorization Fields, and indicates the highest level in the ECS category hierarchy. -`event.kind` gives high-level information about what type of information the event contains, without being specific to the contents of the event. For example, values of this field distinguish alert events from metric events. -The value of this field can be used to inform how these kinds of events should be handled. They may warrant different retention, different access control, it may also help understand whether the data coming in at a regular interval or not. +List of shared libraries used by this ELF object. type: keyword -example: alert - -- -*`event.module`*:: +*`threat.enrichments.indicator.file.elf.telfhash`*:: + -- -Name of the module this data is coming from. -If your monitoring agent supports the concept of modules or plugins to process events of a given source (e.g. Apache logs), `event.module` should contain the name of this module. +telfhash symbol hash for ELF file. type: keyword -example: apache - -- -*`event.original`*:: +*`threat.enrichments.indicator.file.extension`*:: + -- -Raw text message of entire event. Used to demonstrate log integrity. -This field is not indexed and doc_values are disabled. It cannot be searched, but it can be retrieved from `_source`. +File extension, excluding the leading dot. +Note that when the file name has multiple extensions (example.tar.gz), only the last one should be captured ("gz", not "tar.gz"). type: keyword -example: Sep 19 08:26:10 host CEF:0|Security| threatmanager|1.0|100| worm successfully stopped|10|src=10.0.0.1 dst=2.1.2.2spt=1232 +example: png -- -*`event.outcome`*:: +*`threat.enrichments.indicator.file.gid`*:: + -- -This is one of four ECS Categorization Fields, and indicates the lowest level in the ECS category hierarchy. -`event.outcome` simply denotes whether the event represents a success or a failure from the perspective of the entity that produced the event. -Note that when a single transaction is described in multiple events, each event may populate different values of `event.outcome`, according to their perspective. -Also note that in the case of a compound event (a single event that contains multiple logical events), this field should be populated with the value that best captures the overall success or failure from the perspective of the event producer. -Further note that not all events will have an associated outcome. For example, this field is generally not populated for metric events, events with `event.type:info`, or any events for which an outcome does not make logical sense. +Primary group ID (GID) of the file. type: keyword -example: success +example: 1001 -- -*`event.provider`*:: +*`threat.enrichments.indicator.file.group`*:: + -- -Source of the event. -Event transports such as Syslog or the Windows Event Log typically mention the source of an event. It can be the name of the software that generated the event (e.g. Sysmon, httpd), or of a subsystem of the operating system (kernel, Microsoft-Windows-Security-Auditing). +Primary group name of the file. type: keyword -example: kernel +example: alice -- -*`event.reference`*:: +*`threat.enrichments.indicator.file.inode`*:: + -- -Reference URL linking to additional information about this event. -This URL links to a static definition of the this event. Alert events, indicated by `event.kind:alert`, are a common use case for this field. +Inode representing the file in the filesystem. type: keyword -example: https://system.vendor.com/event/#0001234 +example: 256383 -- -*`event.risk_score`*:: +*`threat.enrichments.indicator.file.mime_type`*:: + -- -Risk score or priority of the event (e.g. security solutions). Use your system's original value here. +MIME type should identify the format of the file or stream of bytes using https://www.iana.org/assignments/media-types/media-types.xhtml[IANA official types], where possible. When more than one type is applicable, the most specific type should be used. -type: float +type: keyword -- -*`event.risk_score_norm`*:: +*`threat.enrichments.indicator.file.mode`*:: + -- -Normalized risk score or priority of the event, on a scale of 0 to 100. -This is mainly useful if you use more than one system that assigns risk scores, and you want to see a normalized value across all systems. +Mode of the file in octal representation. -type: float +type: keyword + +example: 0640 -- -*`event.sequence`*:: +*`threat.enrichments.indicator.file.mtime`*:: + -- -Sequence number of the event. -The sequence number is a value published by some event sources, to make the exact ordering of events unambiguous, regardless of the timestamp precision. - -type: long +Last time the file content was modified. -format: string +type: date -- -*`event.severity`*:: +*`threat.enrichments.indicator.file.name`*:: + -- -The numeric severity of the event according to your event source. -What the different severity values mean can be different between sources and use cases. It's up to the implementer to make sure severities are consistent across events from the same source. -The Syslog severity belongs in `log.syslog.severity.code`. `event.severity` is meant to represent the severity according to the event source (e.g. firewall, IDS). If the event source does not publish its own severity, you may optionally copy the `log.syslog.severity.code` to `event.severity`. - -type: long +Name of the file including the extension, without the directory. -example: 7 +type: keyword -format: string +example: example.png -- -*`event.start`*:: +*`threat.enrichments.indicator.file.owner`*:: + -- -event.start contains the date when the event started or when the activity was first observed. +File owner's username. -type: date +type: keyword + +example: alice -- -*`event.timezone`*:: +*`threat.enrichments.indicator.file.path`*:: + -- -This field should be populated when the event's timestamp does not include timezone information already (e.g. default Syslog timestamps). It's optional otherwise. -Acceptable timezone formats are: a canonical ID (e.g. "Europe/Amsterdam"), abbreviated (e.g. "EST") or an HH:mm differential (e.g. "-05:00"). +Full path to the file, including the file name. It should include the drive letter, when appropriate. type: keyword --- +example: /home/alice/example.png -*`event.type`*:: -+ -- -This is one of four ECS Categorization Fields, and indicates the third level in the ECS category hierarchy. -`event.type` represents a categorization "sub-bucket" that, when used along with the `event.category` field values, enables filtering events down to a level appropriate for single visualization. -This field is an array. This will allow proper categorization of some events that fall in multiple event types. -type: keyword +*`threat.enrichments.indicator.file.path.text`*:: ++ +-- +type: text -- -*`event.url`*:: +*`threat.enrichments.indicator.file.size`*:: + -- -URL linking to an external system to continue investigation of this event. -This URL links to another system where in-depth investigation of the specific occurence of this event can take place. Alert events, indicated by `event.kind:alert`, are a common use case for this field. +File size in bytes. +Only relevant when `file.type` is "file". -type: keyword +type: long -example: https://mysystem.mydomain.com/alert/5271dedb-f5b0-4218-87f0-4ac4870a38fe +example: 16384 -- -[float] -=== file +*`threat.enrichments.indicator.file.target_path`*:: ++ +-- +Target path for symlinks. -A file is defined as a set of information that has been created on, or has existed on a filesystem. -File objects can be associated with host events, network events, and/or file events (e.g., those produced by File Integrity Monitoring [FIM] products or services). File fields provide details about the affected file associated with the event or metric. +type: keyword +-- -*`file.accessed`*:: +*`threat.enrichments.indicator.file.target_path.text`*:: + -- -Last time the file was accessed. -Note that not all filesystems keep track of access time. - -type: date +type: text -- -*`file.attributes`*:: +*`threat.enrichments.indicator.file.type`*:: + -- -Array of file attributes. -Attributes names will vary by platform. Here's a non-exhaustive list of values that are expected in this field: archive, compressed, directory, encrypted, execute, hidden, read, readonly, system, write. +File type (file, dir, or symlink). type: keyword -example: ["readonly", "system"] +example: file -- -*`file.code_signature.exists`*:: +*`threat.enrichments.indicator.file.uid`*:: + -- -Boolean to capture if a signature is present. +The user ID (UID) or security identifier (SID) of the file owner. -type: boolean +type: keyword -example: true +example: 1001 -- -*`file.code_signature.status`*:: +*`threat.enrichments.indicator.first_seen`*:: + -- -Additional information about the certificate status. -This is useful for logging cryptographic errors with the certificate validity or trust status. Leave unpopulated if the validity or trust of the certificate was unchecked. +The date and time when intelligence source first reported sighting this indicator. -type: keyword +type: date -example: ERROR_UNTRUSTED_ROOT +example: 2020-11-05T17:25:47.000Z -- -*`file.code_signature.subject_name`*:: +*`threat.enrichments.indicator.geo.city_name`*:: + -- -Subject name of the code signer +City name. type: keyword -example: Microsoft Corporation +example: Montreal -- -*`file.code_signature.trusted`*:: +*`threat.enrichments.indicator.geo.continent_code`*:: + -- -Stores the trust status of the certificate chain. -Validating the trust of the certificate chain may be complicated, and this field should only be populated by tools that actively check the status. +Two-letter code representing continent's name. -type: boolean +type: keyword -example: true +example: NA -- -*`file.code_signature.valid`*:: +*`threat.enrichments.indicator.geo.continent_name`*:: + -- -Boolean to capture if the digital signature is verified against the binary content. -Leave unpopulated if a certificate was unchecked. +Name of the continent. -type: boolean +type: keyword -example: true +example: North America -- -*`file.created`*:: +*`threat.enrichments.indicator.geo.country_iso_code`*:: + -- -File creation time. -Note that not all filesystems store the creation time. +Country ISO code. -type: date +type: keyword + +example: CA -- -*`file.ctime`*:: +*`threat.enrichments.indicator.geo.country_name`*:: + -- -Last time the file attributes or metadata changed. -Note that changes to the file content will update `mtime`. This implies `ctime` will be adjusted at the same time, since `mtime` is an attribute of the file. +Country name. -type: date +type: keyword + +example: Canada -- -*`file.device`*:: +*`threat.enrichments.indicator.geo.location`*:: + -- -Device that is the source of the file. +Longitude and latitude. -type: keyword +type: geo_point -example: sda +example: { "lon": -73.614830, "lat": 45.505918 } -- -*`file.directory`*:: +*`threat.enrichments.indicator.geo.name`*:: + -- -Directory where the file is located. It should include the drive letter, when appropriate. +User-defined description of a location, at the level of granularity they care about. +Could be the name of their data centers, the floor number, if this describes a local physical entity, city names. +Not typically used in automated geolocation. type: keyword -example: /home/alice +example: boston-dc -- -*`file.drive_letter`*:: +*`threat.enrichments.indicator.geo.postal_code`*:: + -- -Drive letter where the file is located. This field is only relevant on Windows. -The value should be uppercase, and not include the colon. +Postal code associated with the location. +Values appropriate for this field may also be known as a postcode or ZIP code and will vary widely from country to country. type: keyword -example: C +example: 94040 -- -*`file.extension`*:: +*`threat.enrichments.indicator.geo.region_iso_code`*:: + -- -File extension. +Region ISO code. type: keyword -example: png +example: CA-QC -- -*`file.gid`*:: +*`threat.enrichments.indicator.geo.region_name`*:: + -- -Primary group ID (GID) of the file. +Region name. type: keyword -example: 1001 +example: Quebec -- -*`file.group`*:: +*`threat.enrichments.indicator.geo.timezone`*:: + -- -Primary group name of the file. +The time zone of the location, such as IANA time zone name. type: keyword -example: alice +example: America/Argentina/Buenos_Aires -- -*`file.hash.md5`*:: +*`threat.enrichments.indicator.hash.md5`*:: + -- MD5 hash. @@ -4058,7 +17073,7 @@ type: keyword -- -*`file.hash.sha1`*:: +*`threat.enrichments.indicator.hash.sha1`*:: + -- SHA1 hash. @@ -4067,7 +17082,7 @@ type: keyword -- -*`file.hash.sha256`*:: +*`threat.enrichments.indicator.hash.sha256`*:: + -- SHA256 hash. @@ -4076,7 +17091,7 @@ type: keyword -- -*`file.hash.sha512`*:: +*`threat.enrichments.indicator.hash.sha512`*:: + -- SHA512 hash. @@ -4085,3638 +17100,3885 @@ type: keyword -- -*`file.inode`*:: +*`threat.enrichments.indicator.hash.ssdeep`*:: + -- -Inode representing the file in the filesystem. +SSDEEP hash. type: keyword -example: 256383 +-- +*`threat.enrichments.indicator.ip`*:: ++ -- +Identifies a threat indicator as an IP address (irrespective of direction). -*`file.mime_type`*:: +type: ip + +example: 1.2.3.4 + +-- + +*`threat.enrichments.indicator.last_seen`*:: + -- -MIME type should identify the format of the file or stream of bytes using https://www.iana.org/assignments/media-types/media-types.xhtml[IANA official types], where possible. When more than one type is applicable, the most specific type should be used. +The date and time when intelligence source last reported sighting this indicator. -type: keyword +type: date + +example: 2020-11-05T17:25:47.000Z -- -*`file.mode`*:: +*`threat.enrichments.indicator.marking.tlp`*:: + -- -Mode of the file in octal representation. +Traffic Light Protocol sharing markings. Recommended values are: + * WHITE + * GREEN + * AMBER + * RED type: keyword -example: 0640 +example: White -- -*`file.mtime`*:: +*`threat.enrichments.indicator.modified_at`*:: + -- -Last time the file content was modified. +The date and time when intelligence source last modified information for this indicator. type: date +example: 2020-11-05T17:25:47.000Z + -- -*`file.name`*:: +*`threat.enrichments.indicator.pe.architecture`*:: + -- -Name of the file including the extension, without the directory. +CPU architecture target for the file. type: keyword -example: example.png +example: x64 -- -*`file.owner`*:: +*`threat.enrichments.indicator.pe.company`*:: + -- -File owner's username. +Internal company name of the file, provided at compile-time. type: keyword -example: alice +example: Microsoft Corporation -- -*`file.path`*:: +*`threat.enrichments.indicator.pe.description`*:: + -- -Full path to the file, including the file name. It should include the drive letter, when appropriate. +Internal description of the file, provided at compile-time. type: keyword -example: /home/alice/example.png +example: Paint -- -*`file.path.text`*:: +*`threat.enrichments.indicator.pe.file_version`*:: + -- -type: text +Internal version of the file, provided at compile-time. + +type: keyword + +example: 6.3.9600.17415 -- -*`file.pe.company`*:: +*`threat.enrichments.indicator.pe.imphash`*:: + -- -Internal company name of the file, provided at compile-time. +A hash of the imports in a PE file. An imphash -- or import hash -- can be used to fingerprint binaries even after recompilation or other code-level transformations have occurred, which would change more traditional hash values. +Learn more at https://www.fireeye.com/blog/threat-research/2014/01/tracking-malware-import-hashing.html. type: keyword -example: Microsoft Corporation +example: 0c6803c4e922103c4dca5963aad36ddf -- -*`file.pe.description`*:: +*`threat.enrichments.indicator.pe.original_file_name`*:: + -- -Internal description of the file, provided at compile-time. +Internal name of the file, provided at compile-time. type: keyword -example: Paint +example: MSPAINT.EXE -- -*`file.pe.file_version`*:: +*`threat.enrichments.indicator.pe.product`*:: + -- -Internal version of the file, provided at compile-time. +Internal product name of the file, provided at compile-time. type: keyword -example: 6.3.9600.17415 +example: Microsoft® Windows® Operating System -- -*`file.pe.original_file_name`*:: +*`threat.enrichments.indicator.port`*:: + -- -Internal name of the file, provided at compile-time. +Identifies a threat indicator as a port number (irrespective of direction). -type: keyword +type: long -example: MSPAINT.EXE +example: 443 -- -*`file.pe.product`*:: +*`threat.enrichments.indicator.provider`*:: + -- -Internal product name of the file, provided at compile-time. +The name of the indicator's provider. type: keyword -example: Microsoft® Windows® Operating System +example: lrz_urlhaus -- -*`file.size`*:: +*`threat.enrichments.indicator.reference`*:: + -- -File size in bytes. -Only relevant when `file.type` is "file". +Reference URL linking to additional information about this indicator. -type: long +type: keyword -example: 16384 +example: https://system.example.com/indicator/0001234 -- -*`file.target_path`*:: +*`threat.enrichments.indicator.registry.data.bytes`*:: + -- -Target path for symlinks. +Original bytes written with base64 encoding. +For Windows registry operations, such as SetValueEx and RegQueryValueEx, this corresponds to the data pointed by `lp_data`. This is optional but provides better recoverability and should be populated for REG_BINARY encoded values. type: keyword +example: ZQBuAC0AVQBTAAAAZQBuAAAAAAA= + -- -*`file.target_path.text`*:: +*`threat.enrichments.indicator.registry.data.strings`*:: + -- -type: text +Content when writing string types. +Populated as an array when writing string data to the registry. For single string registry types (REG_SZ, REG_EXPAND_SZ), this should be an array with one string. For sequences of string with REG_MULTI_SZ, this array will be variable length. For numeric data, such as REG_DWORD and REG_QWORD, this should be populated with the decimal representation (e.g `"1"`). + +type: keyword + +example: ["C:\rta\red_ttp\bin\myapp.exe"] -- -*`file.type`*:: +*`threat.enrichments.indicator.registry.data.type`*:: + -- -File type (file, dir, or symlink). +Standard registry type for encoding contents type: keyword -example: file +example: REG_SZ -- -*`file.uid`*:: +*`threat.enrichments.indicator.registry.hive`*:: + -- -The user ID (UID) or security identifier (SID) of the file owner. +Abbreviated name for the hive. type: keyword -example: 1001 +example: HKLM -- -[float] -=== geo +*`threat.enrichments.indicator.registry.key`*:: ++ +-- +Hive-relative path of keys. -Geo fields can carry data about a specific location related to an event. -This geolocation information can be derived from techniques such as Geo IP, or be user-supplied. +type: keyword +example: SOFTWARE\Microsoft\Windows NT\CurrentVersion\Image File Execution Options\winword.exe -*`geo.city_name`*:: +-- + +*`threat.enrichments.indicator.registry.path`*:: + -- -City name. +Full path, including hive, key and value type: keyword -example: Montreal +example: HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Image File Execution Options\winword.exe\Debugger -- -*`geo.continent_name`*:: +*`threat.enrichments.indicator.registry.value`*:: + -- -Name of the continent. +Name of the value written. type: keyword -example: North America +example: Debugger -- -*`geo.country_iso_code`*:: +*`threat.enrichments.indicator.scanner_stats`*:: + -- -Country ISO code. +Count of AV/EDR vendors that successfully detected malicious file or URL. -type: keyword +type: long -example: CA +example: 4 -- -*`geo.country_name`*:: +*`threat.enrichments.indicator.sightings`*:: + -- -Country name. +Number of times this indicator was observed conducting threat activity. + +type: long + +example: 20 + +-- + +*`threat.enrichments.indicator.type`*:: ++ +-- +Type of indicator as represented by Cyber Observable in STIX 2.0. Recommended values: + * autonomous-system + * artifact + * directory + * domain-name + * email-addr + * file + * ipv4-addr + * ipv6-addr + * mac-addr + * mutex + * port + * process + * software + * url + * user-account + * windows-registry-key + * x509-certificate type: keyword -example: Canada +example: ipv4-addr -- -*`geo.location`*:: +*`threat.enrichments.indicator.url.domain`*:: + -- -Longitude and latitude. +Domain of the url, such as "www.elastic.co". +In some cases a URL may refer to an IP and/or port directly, without a domain name. In this case, the IP address would go to the `domain` field. +If the URL contains a literal IPv6 address enclosed by `[` and `]` (IETF RFC 2732), the `[` and `]` characters should also be captured in the `domain` field. -type: geo_point +type: keyword -example: { "lon": -73.614830, "lat": 45.505918 } +example: www.elastic.co -- -*`geo.name`*:: +*`threat.enrichments.indicator.url.extension`*:: + -- -User-defined description of a location, at the level of granularity they care about. -Could be the name of their data centers, the floor number, if this describes a local physical entity, city names. -Not typically used in automated geolocation. +The field contains the file extension from the original request url, excluding the leading dot. +The file extension is only set if it exists, as not every url has a file extension. +The leading period must not be included. For example, the value must be "png", not ".png". +Note that when the file name has multiple extensions (example.tar.gz), only the last one should be captured ("gz", not "tar.gz"). type: keyword -example: boston-dc +example: png -- -*`geo.region_iso_code`*:: +*`threat.enrichments.indicator.url.fragment`*:: + -- -Region ISO code. +Portion of the url after the `#`, such as "top". +The `#` is not part of the fragment. type: keyword -example: CA-QC - -- -*`geo.region_name`*:: +*`threat.enrichments.indicator.url.full`*:: + -- -Region name. +If full URLs are important to your use case, they should be stored in `url.full`, whether this field is reconstructed or present in the event source. type: keyword -example: Quebec +example: https://www.elastic.co:443/search?q=elasticsearch#top -- -[float] -=== group - -The group fields are meant to represent groups that are relevant to the event. +*`threat.enrichments.indicator.url.full.text`*:: ++ +-- +type: text +-- -*`group.domain`*:: +*`threat.enrichments.indicator.url.original`*:: + -- -Name of the directory the group is a member of. -For example, an LDAP or Active Directory domain name. +Unmodified original url as seen in the event source. +Note that in network monitoring, the observed URL may be a full URL, whereas in access logs, the URL is often just represented as a path. +This field is meant to represent the URL as it was observed, complete or not. type: keyword +example: https://www.elastic.co:443/search?q=elasticsearch#top or /search?q=elasticsearch + -- -*`group.id`*:: +*`threat.enrichments.indicator.url.original.text`*:: + -- -Unique identifier for the group on the system/platform. - -type: keyword +type: text -- -*`group.name`*:: +*`threat.enrichments.indicator.url.password`*:: + -- -Name of the group. +Password of the request. type: keyword -- -[float] -=== hash - -The hash fields represent different hash algorithms and their values. -Field names for common hashes (e.g. MD5, SHA1) are predefined. Add fields for other hashes by lowercasing the hash algorithm name and using underscore separators as appropriate (snake case, e.g. sha3_512). - - -*`hash.md5`*:: +*`threat.enrichments.indicator.url.path`*:: + -- -MD5 hash. +Path of the request, such as "/search". type: keyword -- -*`hash.sha1`*:: +*`threat.enrichments.indicator.url.port`*:: + -- -SHA1 hash. +Port of the request, such as 443. -type: keyword +type: long + +example: 443 + +format: string -- -*`hash.sha256`*:: +*`threat.enrichments.indicator.url.query`*:: + -- -SHA256 hash. +The query field describes the query string of the request, such as "q=elasticsearch". +The `?` is excluded from the query string. If a URL contains no `?`, there is no query field. If there is a `?` but no query, the query field exists with an empty string. The `exists` query can be used to differentiate between the two cases. type: keyword -- -*`hash.sha512`*:: +*`threat.enrichments.indicator.url.registered_domain`*:: + -- -SHA512 hash. +The highest registered url domain, stripped of the subdomain. +For example, the registered domain for "foo.example.com" is "example.com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". type: keyword --- - -[float] -=== host - -A host is defined as a general computing instance. -ECS host.* fields should be populated with details about the host on which the event happened, or from which the measurement was taken. Host types include hardware, virtual machines, Docker containers, and Kubernetes nodes. +example: example.com +-- -*`host.architecture`*:: +*`threat.enrichments.indicator.url.scheme`*:: + -- -Operating system architecture. +Scheme of the request, such as "https". +Note: The `:` is not part of the scheme. type: keyword -example: x86_64 +example: https -- -*`host.domain`*:: +*`threat.enrichments.indicator.url.subdomain`*:: + -- -Name of the domain of which the host is a member. -For example, on Windows this could be the host's Active Directory domain or NetBIOS domain name. For Linux this could be the domain of the host's LDAP provider. +The subdomain portion of a fully qualified domain name includes all of the names except the host name under the registered_domain. In a partially qualified domain, or if the the qualification level of the full name cannot be determined, subdomain contains all of the names below the registered domain. +For example the subdomain portion of "www.east.mydomain.co.uk" is "east". If the domain has multiple levels of subdomain, such as "sub2.sub1.example.com", the subdomain field should contain "sub2.sub1", with no trailing period. type: keyword -example: CONTOSO +example: east -- -*`host.geo.city_name`*:: +*`threat.enrichments.indicator.url.top_level_domain`*:: + -- -City name. +The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for example.com is "com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last label will not work well for effective TLDs such as "co.uk". type: keyword -example: Montreal +example: co.uk -- -*`host.geo.continent_name`*:: +*`threat.enrichments.indicator.url.username`*:: + -- -Name of the continent. +Username of the request. type: keyword -example: North America - -- -*`host.geo.country_iso_code`*:: +*`threat.enrichments.indicator.x509.alternative_names`*:: + -- -Country ISO code. +List of subject alternative names (SAN). Name types vary by certificate authority and certificate type but commonly contain IP addresses, DNS names (and wildcards), and email addresses. type: keyword -example: CA +example: *.elastic.co -- -*`host.geo.country_name`*:: +*`threat.enrichments.indicator.x509.issuer.common_name`*:: + -- -Country name. +List of common name (CN) of issuing certificate authority. type: keyword -example: Canada +example: Example SHA2 High Assurance Server CA -- -*`host.geo.location`*:: +*`threat.enrichments.indicator.x509.issuer.country`*:: + -- -Longitude and latitude. +List of country (C) codes -type: geo_point +type: keyword -example: { "lon": -73.614830, "lat": 45.505918 } +example: US -- -*`host.geo.name`*:: +*`threat.enrichments.indicator.x509.issuer.distinguished_name`*:: + -- -User-defined description of a location, at the level of granularity they care about. -Could be the name of their data centers, the floor number, if this describes a local physical entity, city names. -Not typically used in automated geolocation. +Distinguished name (DN) of issuing certificate authority. type: keyword -example: boston-dc +example: C=US, O=Example Inc, OU=www.example.com, CN=Example SHA2 High Assurance Server CA -- -*`host.geo.region_iso_code`*:: +*`threat.enrichments.indicator.x509.issuer.locality`*:: + -- -Region ISO code. +List of locality names (L) type: keyword -example: CA-QC +example: Mountain View -- -*`host.geo.region_name`*:: +*`threat.enrichments.indicator.x509.issuer.organization`*:: + -- -Region name. +List of organizations (O) of issuing certificate authority. type: keyword -example: Quebec +example: Example Inc -- -*`host.hostname`*:: +*`threat.enrichments.indicator.x509.issuer.organizational_unit`*:: + -- -Hostname of the host. -It normally contains what the `hostname` command returns on the host machine. +List of organizational units (OU) of issuing certificate authority. type: keyword +example: www.example.com + -- -*`host.id`*:: +*`threat.enrichments.indicator.x509.issuer.state_or_province`*:: + -- -Unique host id. -As hostname is not always unique, use values that are meaningful in your environment. -Example: The current usage of `beat.name`. +List of state or province names (ST, S, or P) type: keyword +example: California + -- -*`host.ip`*:: +*`threat.enrichments.indicator.x509.not_after`*:: + -- -Host ip addresses. +Time at which the certificate is no longer considered valid. -type: ip +type: date + +example: 2020-07-16 03:15:39+00:00 -- -*`host.mac`*:: +*`threat.enrichments.indicator.x509.not_before`*:: + -- -Host mac addresses. +Time at which the certificate is first considered valid. -type: keyword +type: date + +example: 2019-08-16 01:40:25+00:00 -- -*`host.name`*:: +*`threat.enrichments.indicator.x509.public_key_algorithm`*:: + -- -Name of the host. -It can contain what `hostname` returns on Unix systems, the fully qualified domain name, or a name specified by the user. The sender decides which value to use. +Algorithm used to generate the public key. type: keyword +example: RSA + -- -*`host.os.family`*:: +*`threat.enrichments.indicator.x509.public_key_curve`*:: + -- -OS family (such as redhat, debian, freebsd, windows). +The curve used by the elliptic curve public key algorithm. This is algorithm specific. type: keyword -example: debian +example: nistp521 -- -*`host.os.full`*:: +*`threat.enrichments.indicator.x509.public_key_exponent`*:: + -- -Operating system name, including the version or code name. +Exponent used to derive the public key. This is algorithm specific. -type: keyword +type: long -example: Mac OS Mojave +example: 65537 + +Field is not indexed. -- -*`host.os.full.text`*:: +*`threat.enrichments.indicator.x509.public_key_size`*:: + -- -type: text +The size of the public key space in bits. + +type: long + +example: 2048 -- -*`host.os.kernel`*:: +*`threat.enrichments.indicator.x509.serial_number`*:: + -- -Operating system kernel version as a raw string. +Unique serial number issued by the certificate authority. For consistency, if this value is alphanumeric, it should be formatted without colons and uppercase characters. type: keyword -example: 4.4.0-112-generic +example: 55FBB9C7DEBF09809D12CCAA -- -*`host.os.name`*:: +*`threat.enrichments.indicator.x509.signature_algorithm`*:: + -- -Operating system name, without the version. +Identifier for certificate signature algorithm. We recommend using names found in Go Lang Crypto library. See https://github.com/golang/go/blob/go1.14/src/crypto/x509/x509.go#L337-L353. type: keyword -example: Mac OS X +example: SHA256-RSA -- -*`host.os.name.text`*:: +*`threat.enrichments.indicator.x509.subject.common_name`*:: + -- -type: text +List of common names (CN) of subject. + +type: keyword + +example: shared.global.example.net -- -*`host.os.platform`*:: +*`threat.enrichments.indicator.x509.subject.country`*:: + -- -Operating system platform (such centos, ubuntu, windows). +List of country (C) code type: keyword -example: darwin +example: US -- -*`host.os.version`*:: +*`threat.enrichments.indicator.x509.subject.distinguished_name`*:: + -- -Operating system version as a raw string. +Distinguished name (DN) of the certificate subject entity. type: keyword -example: 10.14.1 +example: C=US, ST=California, L=San Francisco, O=Example, Inc., CN=shared.global.example.net -- -*`host.type`*:: +*`threat.enrichments.indicator.x509.subject.locality`*:: + -- -Type of host. -For Cloud providers this can be the machine type like `t2.medium`. If vm, this could be the container, for example, or other information meaningful in your environment. +List of locality names (L) type: keyword +example: San Francisco + -- -*`host.uptime`*:: +*`threat.enrichments.indicator.x509.subject.organization`*:: + -- -Seconds the host has been up. +List of organizations (O) of subject. -type: long +type: keyword -example: 1325 +example: Example, Inc. -- -*`host.user.domain`*:: +*`threat.enrichments.indicator.x509.subject.organizational_unit`*:: + -- -Name of the directory the user is a member of. -For example, an LDAP or Active Directory domain name. +List of organizational units (OU) of subject. type: keyword -- -*`host.user.email`*:: +*`threat.enrichments.indicator.x509.subject.state_or_province`*:: + -- -User email address. +List of state or province names (ST, S, or P) type: keyword +example: California + -- -*`host.user.full_name`*:: +*`threat.enrichments.indicator.x509.version_number`*:: + -- -User's full name, if available. +Version of x509 format. type: keyword -example: Albert Einstein +example: 3 -- -*`host.user.full_name.text`*:: +*`threat.enrichments.matched.atomic`*:: + -- -type: text +Identifies the atomic indicator value that matched a local environment endpoint or network event. + +type: keyword + +example: bad-domain.com -- -*`host.user.group.domain`*:: +*`threat.enrichments.matched.field`*:: + -- -Name of the directory the group is a member of. -For example, an LDAP or Active Directory domain name. +Identifies the field of the atomic indicator that matched a local environment endpoint or network event. type: keyword +example: file.hash.sha256 + -- -*`host.user.group.id`*:: +*`threat.enrichments.matched.id`*:: + -- -Unique identifier for the group on the system/platform. +Identifies the _id of the indicator document enriching the event. type: keyword +example: ff93aee5-86a1-4a61-b0e6-0cdc313d01b5 + -- -*`host.user.group.name`*:: +*`threat.enrichments.matched.index`*:: + -- -Name of the group. +Identifies the _index of the indicator document enriching the event. type: keyword +example: filebeat-8.0.0-2021.05.23-000011 + -- -*`host.user.hash`*:: +*`threat.enrichments.matched.type`*:: + -- -Unique user hash to correlate information for a user in anonymized form. -Useful if `user.id` or `user.name` contain confidential information and cannot be used. +Identifies the type of match that caused the event to be enriched with the given indicator type: keyword +example: indicator_match_rule + -- -*`host.user.id`*:: +*`threat.framework`*:: + -- -Unique identifiers of the user. +Name of the threat framework used to further categorize and classify the tactic and technique of the reported threat. Framework classification can be provided by detecting systems, evaluated at ingest time, or retrospectively tagged to events. type: keyword +example: MITRE ATT&CK + +{yes-icon} {ecs-ref}[ECS] field. + -- -*`host.user.name`*:: +*`threat.group.alias`*:: + -- -Short name or login of the user. +The alias(es) of the group for a set of related intrusion activity that are tracked by a common name in the security community. While not required, you can use a MITRE ATT&CK® group alias(es). type: keyword -example: albert +example: [ "Magecart Group 6" ] -- -*`host.user.name.text`*:: +*`threat.group.id`*:: + -- -type: text - --- +The id of the group for a set of related intrusion activity that are tracked by a common name in the security community. While not required, you can use a MITRE ATT&CK® group id. -[float] -=== http +type: keyword -Fields related to HTTP activity. Use the `url` field set to store the url of the request. +example: G0037 +-- -*`http.request.body.bytes`*:: +*`threat.group.name`*:: + -- -Size in bytes of the request body. - -type: long +The name of the group for a set of related intrusion activity that are tracked by a common name in the security community. While not required, you can use a MITRE ATT&CK® group name. -example: 887 +type: keyword -format: bytes +example: FIN6 -- -*`http.request.body.content`*:: +*`threat.group.reference`*:: + -- -The full HTTP request body. +The reference URL of the group for a set of related intrusion activity that are tracked by a common name in the security community. While not required, you can use a MITRE ATT&CK® group reference URL. type: keyword -example: Hello world +example: https://attack.mitre.org/groups/G0037/ -- -*`http.request.body.content.text`*:: +*`threat.indicator.as.number`*:: + -- -type: text +Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. + +type: long + +example: 15169 -- -*`http.request.bytes`*:: +*`threat.indicator.as.organization.name`*:: + -- -Total size in bytes of the request (body and headers). - -type: long +Organization name. -example: 1437 +type: keyword -format: bytes +example: Google LLC -- -*`http.request.method`*:: +*`threat.indicator.as.organization.name.text`*:: + -- -HTTP request method. -The field value must be normalized to lowercase for querying. See the documentation section "Implementing ECS". - -type: keyword - -example: get, post, put +type: text -- -*`http.request.referrer`*:: +*`threat.indicator.confidence`*:: + -- -Referrer for this HTTP request. +Identifies the confidence rating assigned by the provider using STIX confidence scales. +Recommended values: + * Not Specified, None, Low, Medium, High + * 0-10 + * Admirality Scale (1-6) + * DNI Scale (5-95) + * WEP Scale (Impossible - Certain) type: keyword -example: https://blog.example.com/ +example: High -- -*`http.response.body.bytes`*:: +*`threat.indicator.description`*:: + -- -Size in bytes of the response body. - -type: long +Describes the type of action conducted by the threat. -example: 887 +type: keyword -format: bytes +example: IP x.x.x.x was observed delivering the Angler EK. -- -*`http.response.body.content`*:: +*`threat.indicator.email.address`*:: + -- -The full HTTP response body. +Identifies a threat indicator as an email address (irrespective of direction). type: keyword -example: Hello world +example: phish@example.com -- -*`http.response.body.content.text`*:: +*`threat.indicator.file.accessed`*:: + -- -type: text +Last time the file was accessed. +Note that not all filesystems keep track of access time. + +type: date -- -*`http.response.bytes`*:: +*`threat.indicator.file.attributes`*:: + -- -Total size in bytes of the response (body and headers). - -type: long +Array of file attributes. +Attributes names will vary by platform. Here's a non-exhaustive list of values that are expected in this field: archive, compressed, directory, encrypted, execute, hidden, read, readonly, system, write. -example: 1437 +type: keyword -format: bytes +example: ["readonly", "system"] -- -*`http.response.status_code`*:: +*`threat.indicator.file.code_signature.exists`*:: + -- -HTTP response status code. - -type: long +Boolean to capture if a signature is present. -example: 404 +type: boolean -format: string +example: true -- -*`http.version`*:: +*`threat.indicator.file.code_signature.signing_id`*:: + -- -HTTP version. +The identifier used to sign the process. +This is used to identify the application manufactured by a software vendor. The field is relevant to Apple *OS only. type: keyword -example: 1.1 +example: com.apple.xpc.proxy -- -[float] -=== interface - -The interface fields are used to record ingress and egress interface information when reported by an observer (e.g. firewall, router, load balancer) in the context of the observer handling a network connection. In the case of a single observer interface (e.g. network sensor on a span port) only the observer.ingress information should be populated. - - -*`interface.alias`*:: +*`threat.indicator.file.code_signature.status`*:: + -- -Interface alias as reported by the system, typically used in firewall implementations for e.g. inside, outside, or dmz logical interface naming. +Additional information about the certificate status. +This is useful for logging cryptographic errors with the certificate validity or trust status. Leave unpopulated if the validity or trust of the certificate was unchecked. type: keyword -example: outside +example: ERROR_UNTRUSTED_ROOT -- -*`interface.id`*:: +*`threat.indicator.file.code_signature.subject_name`*:: + -- -Interface ID as reported by an observer (typically SNMP interface ID). +Subject name of the code signer type: keyword -example: 10 +example: Microsoft Corporation -- -*`interface.name`*:: +*`threat.indicator.file.code_signature.team_id`*:: + -- -Interface name as reported by the system. +The team identifier used to sign the process. +This is used to identify the team or vendor of a software product. The field is relevant to Apple *OS only. type: keyword -example: eth0 +example: EQHXZ8M8AV -- -[float] -=== log +*`threat.indicator.file.code_signature.trusted`*:: ++ +-- +Stores the trust status of the certificate chain. +Validating the trust of the certificate chain may be complicated, and this field should only be populated by tools that actively check the status. -Details about the event's logging mechanism or logging transport. -The log.* fields are typically populated with details about the logging mechanism used to create and/or transport the event. For example, syslog details belong under `log.syslog.*`. -The details specific to your event source are typically not logged under `log.*`, but rather in `event.*` or in other ECS fields. +type: boolean + +example: true +-- -*`log.level`*:: +*`threat.indicator.file.code_signature.valid`*:: + -- -Original log level of the log event. -If the source of the event provides a log level or textual severity, this is the one that goes in `log.level`. If your source doesn't specify one, you may put your event transport's severity here (e.g. Syslog severity). -Some examples are `warn`, `err`, `i`, `informational`. +Boolean to capture if the digital signature is verified against the binary content. +Leave unpopulated if a certificate was unchecked. -type: keyword +type: boolean -example: error +example: true -- -*`log.logger`*:: +*`threat.indicator.file.created`*:: + -- -The name of the logger inside an application. This is usually the name of the class which initialized the logger, or can be a custom name. - -type: keyword +File creation time. +Note that not all filesystems store the creation time. -example: org.elasticsearch.bootstrap.Bootstrap +type: date -- -*`log.origin.file.line`*:: +*`threat.indicator.file.ctime`*:: + -- -The line number of the file containing the source code which originated the log event. - -type: integer +Last time the file attributes or metadata changed. +Note that changes to the file content will update `mtime`. This implies `ctime` will be adjusted at the same time, since `mtime` is an attribute of the file. -example: 42 +type: date -- -*`log.origin.file.name`*:: +*`threat.indicator.file.device`*:: + -- -The name of the file containing the source code which originated the log event. Note that this is not the name of the log file. +Device that is the source of the file. type: keyword -example: Bootstrap.java +example: sda -- -*`log.origin.function`*:: +*`threat.indicator.file.directory`*:: + -- -The name of the function or method which originated the log event. +Directory where the file is located. It should include the drive letter, when appropriate. type: keyword -example: init +example: /home/alice -- -*`log.original`*:: +*`threat.indicator.file.drive_letter`*:: + -- -This is the original log message and contains the full log message before splitting it up in multiple parts. -In contrast to the `message` field which can contain an extracted part of the log message, this field contains the original, full log message. It can have already some modifications applied like encoding or new lines removed to clean up the log message. -This field is not indexed and doc_values are disabled so it can't be queried but the value can be retrieved from `_source`. +Drive letter where the file is located. This field is only relevant on Windows. +The value should be uppercase, and not include the colon. type: keyword -example: Sep 19 08:26:10 localhost My log +example: C -- -*`log.syslog`*:: +*`threat.indicator.file.elf.architecture`*:: + -- -The Syslog metadata of the event, if the event was transmitted via Syslog. Please see RFCs 5424 or 3164. +Machine architecture of the ELF file. -type: object +type: keyword + +example: x86-64 -- -*`log.syslog.facility.code`*:: +*`threat.indicator.file.elf.byte_order`*:: + -- -The Syslog numeric facility of the log event, if available. -According to RFCs 5424 and 3164, this value should be an integer between 0 and 23. - -type: long +Byte sequence of ELF file. -example: 23 +type: keyword -format: string +example: Little Endian -- -*`log.syslog.facility.name`*:: +*`threat.indicator.file.elf.cpu_type`*:: + -- -The Syslog text-based facility of the log event, if available. +CPU type of the ELF file. type: keyword -example: local7 +example: Intel -- -*`log.syslog.priority`*:: +*`threat.indicator.file.elf.creation_date`*:: + -- -Syslog numeric priority of the event, if available. -According to RFCs 5424 and 3164, the priority is 8 * facility + severity. This number is therefore expected to contain a value between 0 and 191. +Extracted when possible from the file's metadata. Indicates when it was built or compiled. It can also be faked by malware creators. -type: long - -example: 135 - -format: string +type: date -- -*`log.syslog.severity.code`*:: +*`threat.indicator.file.elf.exports`*:: + -- -The Syslog numeric severity of the log event, if available. -If the event source publishing via Syslog provides a different numeric severity value (e.g. firewall, IDS), your source's numeric severity should go to `event.severity`. If the event source does not specify a distinct severity, you can optionally copy the Syslog severity to `event.severity`. - -type: long +List of exported element names and types. -example: 3 +type: flattened -- -*`log.syslog.severity.name`*:: +*`threat.indicator.file.elf.header.abi_version`*:: + -- -The Syslog numeric severity of the log event, if available. -If the event source publishing via Syslog provides a different severity value (e.g. firewall, IDS), your source's text severity should go to `log.level`. If the event source does not specify a distinct severity, you can optionally copy the Syslog severity to `log.level`. +Version of the ELF Application Binary Interface (ABI). type: keyword -example: Error - -- -[float] -=== network - -The network is defined as the communication path over which a host or network event happens. -The network.* fields should be populated with details about the network activity associated with an event. - - -*`network.application`*:: +*`threat.indicator.file.elf.header.class`*:: + -- -A name given to an application level protocol. This can be arbitrarily assigned for things like microservices, but also apply to things like skype, icq, facebook, twitter. This would be used in situations where the vendor or service can be decoded such as from the source/dest IP owners, ports, or wire format. -The field value must be normalized to lowercase for querying. See the documentation section "Implementing ECS". +Header class of the ELF file. type: keyword -example: aim - -- -*`network.bytes`*:: +*`threat.indicator.file.elf.header.data`*:: + -- -Total bytes transferred in both directions. -If `source.bytes` and `destination.bytes` are known, `network.bytes` is their sum. +Data table of the ELF header. -type: long - -example: 368 - -format: bytes +type: keyword -- -*`network.community_id`*:: +*`threat.indicator.file.elf.header.entrypoint`*:: + -- -A hash of source and destination IPs and ports, as well as the protocol used in a communication. This is a tool-agnostic standard to identify flows. -Learn more at https://github.com/corelight/community-id-spec. +Header entrypoint of the ELF file. -type: keyword +type: long -example: 1:hO+sN4H+MG5MY/8hIrXPqc4ZQz0= +format: string -- -*`network.direction`*:: +*`threat.indicator.file.elf.header.object_version`*:: + -- -Direction of the network traffic. -Recommended values are: - * inbound - * outbound - * internal - * external - * unknown - -When mapping events from a host-based monitoring context, populate this field from the host's point of view. -When mapping events from a network or perimeter-based monitoring context, populate this field from the point of view of your network perimeter. +"0x1" for original ELF files. type: keyword -example: inbound - -- -*`network.forwarded_ip`*:: +*`threat.indicator.file.elf.header.os_abi`*:: + -- -Host IP address when the source IP address is the proxy. - -type: ip +Application Binary Interface (ABI) of the Linux OS. -example: 192.1.1.2 +type: keyword -- -*`network.iana_number`*:: +*`threat.indicator.file.elf.header.type`*:: + -- -IANA Protocol Number (https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml). Standardized list of protocols. This aligns well with NetFlow and sFlow related logs which use the IANA Protocol Number. +Header type of the ELF file. type: keyword -example: 6 - -- -*`network.inner`*:: +*`threat.indicator.file.elf.header.version`*:: + -- -Network.inner fields are added in addition to network.vlan fields to describe the innermost VLAN when q-in-q VLAN tagging is present. Allowed fields include vlan.id and vlan.name. Inner vlan fields are typically used when sending traffic with multiple 802.1q encapsulations to a network sensor (e.g. Zeek, Wireshark.) +Version of the ELF header. -type: object +type: keyword -- -*`network.inner.vlan.id`*:: +*`threat.indicator.file.elf.imports`*:: + -- -VLAN ID as reported by the observer. - -type: keyword +List of imported element names and types. -example: 10 +type: flattened -- -*`network.inner.vlan.name`*:: +*`threat.indicator.file.elf.sections`*:: + -- -Optional VLAN name as reported by the observer. +An array containing an object for each section of the ELF file. +The keys that should be present in these objects are defined by sub-fields underneath `elf.sections.*`. -type: keyword - -example: outside +type: nested -- -*`network.name`*:: +*`threat.indicator.file.elf.sections.chi2`*:: + -- -Name given by operators to sections of their network. +Chi-square probability distribution of the section. -type: keyword +type: long -example: Guest Wifi +format: number -- -*`network.packets`*:: +*`threat.indicator.file.elf.sections.entropy`*:: + -- -Total packets transferred in both directions. -If `source.packets` and `destination.packets` are known, `network.packets` is their sum. +Shannon entropy calculation from the section. type: long -example: 24 +format: number -- -*`network.protocol`*:: +*`threat.indicator.file.elf.sections.flags`*:: + -- -L7 Network protocol name. ex. http, lumberjack, transport protocol. -The field value must be normalized to lowercase for querying. See the documentation section "Implementing ECS". +ELF Section List flags. type: keyword -example: http - -- -*`network.transport`*:: +*`threat.indicator.file.elf.sections.name`*:: + -- -Same as network.iana_number, but instead using the Keyword name of the transport layer (udp, tcp, ipv6-icmp, etc.) -The field value must be normalized to lowercase for querying. See the documentation section "Implementing ECS". +ELF Section List name. type: keyword -example: tcp - -- -*`network.type`*:: +*`threat.indicator.file.elf.sections.physical_offset`*:: + -- -In the OSI Model this would be the Network Layer. ipv4, ipv6, ipsec, pim, etc -The field value must be normalized to lowercase for querying. See the documentation section "Implementing ECS". +ELF Section List offset. type: keyword -example: ipv4 - -- -*`network.vlan.id`*:: +*`threat.indicator.file.elf.sections.physical_size`*:: + -- -VLAN ID as reported by the observer. +ELF Section List physical size. -type: keyword +type: long -example: 10 +format: bytes -- -*`network.vlan.name`*:: +*`threat.indicator.file.elf.sections.type`*:: + -- -Optional VLAN name as reported by the observer. +ELF Section List type. type: keyword -example: outside +-- +*`threat.indicator.file.elf.sections.virtual_address`*:: ++ -- +ELF Section List virtual address. -[float] -=== observer +type: long -An observer is defined as a special network, security, or application device used to detect, observe, or create network, security, or application-related events and metrics. -This could be a custom hardware appliance or a server that has been configured to run special network, security, or application software. Examples include firewalls, web proxies, intrusion detection/prevention systems, network monitoring sensors, web application firewalls, data loss prevention systems, and APM servers. The observer.* fields shall be populated with details of the system, if any, that detects, observes and/or creates a network, security, or application event or metric. Message queues and ETL components used in processing events or metrics are not considered observers in ECS. +format: string +-- -*`observer.egress`*:: +*`threat.indicator.file.elf.sections.virtual_size`*:: + -- -Observer.egress holds information like interface number and name, vlan, and zone information to classify egress traffic. Single armed monitoring such as a network sensor on a span port should only use observer.ingress to categorize traffic. +ELF Section List virtual size. -type: object +type: long + +format: string -- -*`observer.egress.interface.alias`*:: +*`threat.indicator.file.elf.segments`*:: + -- -Interface alias as reported by the system, typically used in firewall implementations for e.g. inside, outside, or dmz logical interface naming. +An array containing an object for each segment of the ELF file. +The keys that should be present in these objects are defined by sub-fields underneath `elf.segments.*`. -type: keyword - -example: outside +type: nested -- -*`observer.egress.interface.id`*:: +*`threat.indicator.file.elf.segments.sections`*:: + -- -Interface ID as reported by an observer (typically SNMP interface ID). +ELF object segment sections. type: keyword -example: 10 - -- -*`observer.egress.interface.name`*:: +*`threat.indicator.file.elf.segments.type`*:: + -- -Interface name as reported by the system. +ELF object segment type. type: keyword -example: eth0 - -- -*`observer.egress.vlan.id`*:: +*`threat.indicator.file.elf.shared_libraries`*:: + -- -VLAN ID as reported by the observer. +List of shared libraries used by this ELF object. type: keyword -example: 10 - -- -*`observer.egress.vlan.name`*:: +*`threat.indicator.file.elf.telfhash`*:: + -- -Optional VLAN name as reported by the observer. +telfhash symbol hash for ELF file. type: keyword -example: outside - -- -*`observer.egress.zone`*:: +*`threat.indicator.file.extension`*:: + -- -Network zone of outbound traffic as reported by the observer to categorize the destination area of egress traffic, e.g. Internal, External, DMZ, HR, Legal, etc. +File extension, excluding the leading dot. +Note that when the file name has multiple extensions (example.tar.gz), only the last one should be captured ("gz", not "tar.gz"). type: keyword -example: Public_Internet +example: png -- -*`observer.geo.city_name`*:: +*`threat.indicator.file.gid`*:: + -- -City name. +Primary group ID (GID) of the file. type: keyword -example: Montreal +example: 1001 -- -*`observer.geo.continent_name`*:: +*`threat.indicator.file.group`*:: + -- -Name of the continent. +Primary group name of the file. type: keyword -example: North America +example: alice -- -*`observer.geo.country_iso_code`*:: +*`threat.indicator.file.inode`*:: + -- -Country ISO code. +Inode representing the file in the filesystem. type: keyword -example: CA +example: 256383 -- -*`observer.geo.country_name`*:: +*`threat.indicator.file.mime_type`*:: + -- -Country name. +MIME type should identify the format of the file or stream of bytes using https://www.iana.org/assignments/media-types/media-types.xhtml[IANA official types], where possible. When more than one type is applicable, the most specific type should be used. type: keyword -example: Canada - -- -*`observer.geo.location`*:: +*`threat.indicator.file.mode`*:: + -- -Longitude and latitude. +Mode of the file in octal representation. -type: geo_point +type: keyword -example: { "lon": -73.614830, "lat": 45.505918 } +example: 0640 -- -*`observer.geo.name`*:: +*`threat.indicator.file.mtime`*:: + -- -User-defined description of a location, at the level of granularity they care about. -Could be the name of their data centers, the floor number, if this describes a local physical entity, city names. -Not typically used in automated geolocation. - -type: keyword +Last time the file content was modified. -example: boston-dc +type: date -- -*`observer.geo.region_iso_code`*:: +*`threat.indicator.file.name`*:: + -- -Region ISO code. +Name of the file including the extension, without the directory. type: keyword -example: CA-QC +example: example.png -- -*`observer.geo.region_name`*:: +*`threat.indicator.file.owner`*:: + -- -Region name. +File owner's username. type: keyword -example: Quebec +example: alice -- -*`observer.hostname`*:: +*`threat.indicator.file.path`*:: + -- -Hostname of the observer. +Full path to the file, including the file name. It should include the drive letter, when appropriate. type: keyword +example: /home/alice/example.png + -- -*`observer.ingress`*:: +*`threat.indicator.file.path.text`*:: + -- -Observer.ingress holds information like interface number and name, vlan, and zone information to classify ingress traffic. Single armed monitoring such as a network sensor on a span port should only use observer.ingress to categorize traffic. - -type: object +type: text -- -*`observer.ingress.interface.alias`*:: +*`threat.indicator.file.size`*:: + -- -Interface alias as reported by the system, typically used in firewall implementations for e.g. inside, outside, or dmz logical interface naming. +File size in bytes. +Only relevant when `file.type` is "file". -type: keyword +type: long -example: outside +example: 16384 -- -*`observer.ingress.interface.id`*:: +*`threat.indicator.file.target_path`*:: + -- -Interface ID as reported by an observer (typically SNMP interface ID). +Target path for symlinks. type: keyword -example: 10 - -- -*`observer.ingress.interface.name`*:: +*`threat.indicator.file.target_path.text`*:: + -- -Interface name as reported by the system. - -type: keyword - -example: eth0 +type: text -- -*`observer.ingress.vlan.id`*:: +*`threat.indicator.file.type`*:: + -- -VLAN ID as reported by the observer. +File type (file, dir, or symlink). type: keyword -example: 10 +example: file -- -*`observer.ingress.vlan.name`*:: +*`threat.indicator.file.uid`*:: + -- -Optional VLAN name as reported by the observer. +The user ID (UID) or security identifier (SID) of the file owner. type: keyword -example: outside +example: 1001 -- -*`observer.ingress.zone`*:: +*`threat.indicator.first_seen`*:: + -- -Network zone of incoming traffic as reported by the observer to categorize the source area of ingress traffic. e.g. internal, External, DMZ, HR, Legal, etc. +The date and time when intelligence source first reported sighting this indicator. -type: keyword +type: date -example: DMZ +example: 2020-11-05T17:25:47.000Z -- -*`observer.ip`*:: +*`threat.indicator.geo.city_name`*:: + -- -IP addresses of the observer. +City name. -type: ip +type: keyword + +example: Montreal -- -*`observer.mac`*:: +*`threat.indicator.geo.continent_code`*:: + -- -MAC addresses of the observer +Two-letter code representing continent's name. type: keyword +example: NA + -- -*`observer.name`*:: +*`threat.indicator.geo.continent_name`*:: + -- -Custom name of the observer. -This is a name that can be given to an observer. This can be helpful for example if multiple firewalls of the same model are used in an organization. -If no custom name is needed, the field can be left empty. +Name of the continent. type: keyword -example: 1_proxySG +example: North America -- -*`observer.os.family`*:: +*`threat.indicator.geo.country_iso_code`*:: + -- -OS family (such as redhat, debian, freebsd, windows). +Country ISO code. type: keyword -example: debian +example: CA -- -*`observer.os.full`*:: +*`threat.indicator.geo.country_name`*:: + -- -Operating system name, including the version or code name. +Country name. type: keyword -example: Mac OS Mojave +example: Canada -- -*`observer.os.full.text`*:: +*`threat.indicator.geo.location`*:: + -- -type: text +Longitude and latitude. + +type: geo_point + +example: { "lon": -73.614830, "lat": 45.505918 } -- -*`observer.os.kernel`*:: +*`threat.indicator.geo.name`*:: + -- -Operating system kernel version as a raw string. +User-defined description of a location, at the level of granularity they care about. +Could be the name of their data centers, the floor number, if this describes a local physical entity, city names. +Not typically used in automated geolocation. type: keyword -example: 4.4.0-112-generic +example: boston-dc -- -*`observer.os.name`*:: +*`threat.indicator.geo.postal_code`*:: + -- -Operating system name, without the version. +Postal code associated with the location. +Values appropriate for this field may also be known as a postcode or ZIP code and will vary widely from country to country. type: keyword -example: Mac OS X +example: 94040 -- -*`observer.os.name.text`*:: +*`threat.indicator.geo.region_iso_code`*:: + -- -type: text +Region ISO code. + +type: keyword + +example: CA-QC -- -*`observer.os.platform`*:: +*`threat.indicator.geo.region_name`*:: + -- -Operating system platform (such centos, ubuntu, windows). +Region name. type: keyword -example: darwin +example: Quebec -- -*`observer.os.version`*:: +*`threat.indicator.geo.timezone`*:: + -- -Operating system version as a raw string. +The time zone of the location, such as IANA time zone name. type: keyword -example: 10.14.1 +example: America/Argentina/Buenos_Aires -- -*`observer.product`*:: +*`threat.indicator.hash.md5`*:: + -- -The product name of the observer. +MD5 hash. type: keyword -example: s200 - -- -*`observer.serial_number`*:: +*`threat.indicator.hash.sha1`*:: + -- -Observer serial number. +SHA1 hash. type: keyword -- -*`observer.type`*:: +*`threat.indicator.hash.sha256`*:: + -- -The type of the observer the data is coming from. -There is no predefined list of observer types. Some examples are `forwarder`, `firewall`, `ids`, `ips`, `proxy`, `poller`, `sensor`, `APM server`. +SHA256 hash. type: keyword -example: firewall - -- -*`observer.vendor`*:: +*`threat.indicator.hash.sha512`*:: + -- -Vendor name of the observer. +SHA512 hash. type: keyword -example: Symantec - -- -*`observer.version`*:: +*`threat.indicator.hash.ssdeep`*:: + -- -Observer version. +SSDEEP hash. type: keyword -- -[float] -=== organization - -The organization fields enrich data with information about the company or entity the data is associated with. -These fields help you arrange or filter data stored in an index by one or multiple organizations. - - -*`organization.id`*:: +*`threat.indicator.ip`*:: + -- -Unique identifier for the organization. +Identifies a threat indicator as an IP address (irrespective of direction). -type: keyword +type: ip + +example: 1.2.3.4 -- -*`organization.name`*:: +*`threat.indicator.last_seen`*:: + -- -Organization name. +The date and time when intelligence source last reported sighting this indicator. -type: keyword +type: date --- +example: 2020-11-05T17:25:47.000Z -*`organization.name.text`*:: -+ -- -type: text +*`threat.indicator.marking.tlp`*:: ++ -- +Traffic Light Protocol sharing markings. +Recommended values are: + * WHITE + * GREEN + * AMBER + * RED -[float] -=== os +type: keyword -The OS fields contain information about the operating system. +example: WHITE +-- -*`os.family`*:: +*`threat.indicator.modified_at`*:: + -- -OS family (such as redhat, debian, freebsd, windows). +The date and time when intelligence source last modified information for this indicator. -type: keyword +type: date -example: debian +example: 2020-11-05T17:25:47.000Z -- -*`os.full`*:: +*`threat.indicator.pe.architecture`*:: + -- -Operating system name, including the version or code name. +CPU architecture target for the file. type: keyword -example: Mac OS Mojave +example: x64 -- -*`os.full.text`*:: +*`threat.indicator.pe.company`*:: + -- -type: text +Internal company name of the file, provided at compile-time. + +type: keyword + +example: Microsoft Corporation -- -*`os.kernel`*:: +*`threat.indicator.pe.description`*:: + -- -Operating system kernel version as a raw string. +Internal description of the file, provided at compile-time. type: keyword -example: 4.4.0-112-generic +example: Paint -- -*`os.name`*:: +*`threat.indicator.pe.file_version`*:: + -- -Operating system name, without the version. +Internal version of the file, provided at compile-time. type: keyword -example: Mac OS X +example: 6.3.9600.17415 -- -*`os.name.text`*:: +*`threat.indicator.pe.imphash`*:: + -- -type: text +A hash of the imports in a PE file. An imphash -- or import hash -- can be used to fingerprint binaries even after recompilation or other code-level transformations have occurred, which would change more traditional hash values. +Learn more at https://www.fireeye.com/blog/threat-research/2014/01/tracking-malware-import-hashing.html. + +type: keyword + +example: 0c6803c4e922103c4dca5963aad36ddf -- -*`os.platform`*:: +*`threat.indicator.pe.original_file_name`*:: + -- -Operating system platform (such centos, ubuntu, windows). +Internal name of the file, provided at compile-time. type: keyword -example: darwin +example: MSPAINT.EXE -- -*`os.version`*:: +*`threat.indicator.pe.product`*:: + -- -Operating system version as a raw string. +Internal product name of the file, provided at compile-time. type: keyword -example: 10.14.1 +example: Microsoft® Windows® Operating System -- -[float] -=== package +*`threat.indicator.port`*:: ++ +-- +Identifies a threat indicator as a port number (irrespective of direction). -These fields contain information about an installed software package. It contains general information about a package, such as name, version or size. It also contains installation details, such as time or location. +type: long + +example: 443 +-- -*`package.architecture`*:: +*`threat.indicator.provider`*:: + -- -Package architecture. +The name of the indicator's provider. type: keyword -example: x86_64 +example: lrz_urlhaus -- -*`package.build_version`*:: +*`threat.indicator.reference`*:: + -- -Additional information about the build version of the installed package. -For example use the commit SHA of a non-released package. +Reference URL linking to additional information about this indicator. type: keyword -example: 36f4f7e89dd61b0988b12ee000b98966867710cd +example: https://system.example.com/indicator/0001234 -- -*`package.checksum`*:: +*`threat.indicator.registry.data.bytes`*:: + -- -Checksum of the installed package for verification. +Original bytes written with base64 encoding. +For Windows registry operations, such as SetValueEx and RegQueryValueEx, this corresponds to the data pointed by `lp_data`. This is optional but provides better recoverability and should be populated for REG_BINARY encoded values. type: keyword -example: 68b329da9893e34099c7d8ad5cb9c940 +example: ZQBuAC0AVQBTAAAAZQBuAAAAAAA= -- -*`package.description`*:: +*`threat.indicator.registry.data.strings`*:: + -- -Description of the package. +Content when writing string types. +Populated as an array when writing string data to the registry. For single string registry types (REG_SZ, REG_EXPAND_SZ), this should be an array with one string. For sequences of string with REG_MULTI_SZ, this array will be variable length. For numeric data, such as REG_DWORD and REG_QWORD, this should be populated with the decimal representation (e.g `"1"`). type: keyword -example: Open source programming language to build simple/reliable/efficient software. +example: ["C:\rta\red_ttp\bin\myapp.exe"] -- -*`package.install_scope`*:: +*`threat.indicator.registry.data.type`*:: + -- -Indicating how the package was installed, e.g. user-local, global. +Standard registry type for encoding contents type: keyword -example: global +example: REG_SZ -- -*`package.installed`*:: +*`threat.indicator.registry.hive`*:: + -- -Time when package was installed. +Abbreviated name for the hive. -type: date +type: keyword + +example: HKLM -- -*`package.license`*:: +*`threat.indicator.registry.key`*:: + -- -License under which the package was released. -Use a short name, e.g. the license identifier from SPDX License List where possible (https://spdx.org/licenses/). +Hive-relative path of keys. type: keyword -example: Apache License 2.0 +example: SOFTWARE\Microsoft\Windows NT\CurrentVersion\Image File Execution Options\winword.exe -- -*`package.name`*:: +*`threat.indicator.registry.path`*:: + -- -Package name +Full path, including hive, key and value type: keyword -example: go +example: HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Image File Execution Options\winword.exe\Debugger -- -*`package.path`*:: +*`threat.indicator.registry.value`*:: + -- -Path where the package is installed. +Name of the value written. type: keyword -example: /usr/local/Cellar/go/1.12.9/ +example: Debugger -- -*`package.reference`*:: +*`threat.indicator.scanner_stats`*:: + -- -Home page or reference URL of the software in this package, if available. +Count of AV/EDR vendors that successfully detected malicious file or URL. -type: keyword +type: long -example: https://golang.org +example: 4 -- -*`package.size`*:: +*`threat.indicator.sightings`*:: + -- -Package size in bytes. +Number of times this indicator was observed conducting threat activity. type: long -example: 62231 - -format: string +example: 20 -- -*`package.type`*:: +*`threat.indicator.type`*:: + -- -Type of package. -This should contain the package file type, rather than the package manager name. Examples: rpm, dpkg, brew, npm, gem, nupkg, jar. +Type of indicator as represented by Cyber Observable in STIX 2.0. +Recommended values: + * autonomous-system + * artifact + * directory + * domain-name + * email-addr + * file + * ipv4-addr + * ipv6-addr + * mac-addr + * mutex + * port + * process + * software + * url + * user-account + * windows-registry-key + * x509-certificate type: keyword -example: rpm +example: ipv4-addr -- -*`package.version`*:: +*`threat.indicator.url.domain`*:: + -- -Package version +Domain of the url, such as "www.elastic.co". +In some cases a URL may refer to an IP and/or port directly, without a domain name. In this case, the IP address would go to the `domain` field. +If the URL contains a literal IPv6 address enclosed by `[` and `]` (IETF RFC 2732), the `[` and `]` characters should also be captured in the `domain` field. type: keyword -example: 1.12.9 +example: www.elastic.co -- -[float] -=== pe - -These fields contain Windows Portable Executable (PE) metadata. - - -*`pe.company`*:: +*`threat.indicator.url.extension`*:: + -- -Internal company name of the file, provided at compile-time. +The field contains the file extension from the original request url, excluding the leading dot. +The file extension is only set if it exists, as not every url has a file extension. +The leading period must not be included. For example, the value must be "png", not ".png". +Note that when the file name has multiple extensions (example.tar.gz), only the last one should be captured ("gz", not "tar.gz"). type: keyword -example: Microsoft Corporation +example: png -- -*`pe.description`*:: +*`threat.indicator.url.fragment`*:: + -- -Internal description of the file, provided at compile-time. +Portion of the url after the `#`, such as "top". +The `#` is not part of the fragment. type: keyword -example: Paint - -- -*`pe.file_version`*:: +*`threat.indicator.url.full`*:: + -- -Internal version of the file, provided at compile-time. +If full URLs are important to your use case, they should be stored in `url.full`, whether this field is reconstructed or present in the event source. type: keyword -example: 6.3.9600.17415 +example: https://www.elastic.co:443/search?q=elasticsearch#top -- -*`pe.original_file_name`*:: +*`threat.indicator.url.full.text`*:: + -- -Internal name of the file, provided at compile-time. - -type: keyword - -example: MSPAINT.EXE +type: text -- -*`pe.product`*:: +*`threat.indicator.url.original`*:: + -- -Internal product name of the file, provided at compile-time. +Unmodified original url as seen in the event source. +Note that in network monitoring, the observed URL may be a full URL, whereas in access logs, the URL is often just represented as a path. +This field is meant to represent the URL as it was observed, complete or not. type: keyword -example: Microsoft® Windows® Operating System +example: https://www.elastic.co:443/search?q=elasticsearch#top or /search?q=elasticsearch -- -[float] -=== process - -These fields contain information about a process. -These fields can help you correlate metrics information with a process id/name from a log message. The `process.pid` often stays in the metric itself and is copied to the global field for correlation. +*`threat.indicator.url.original.text`*:: ++ +-- +type: text +-- -*`process.args`*:: +*`threat.indicator.url.password`*:: + -- -Array of process arguments, starting with the absolute path to the executable. -May be filtered to protect sensitive information. +Password of the request. type: keyword -example: ['/usr/bin/ssh', '-l', 'user', '10.0.0.16'] - -- -*`process.args_count`*:: +*`threat.indicator.url.path`*:: + -- -Length of the process.args array. -This field can be useful for querying or performing bucket analysis on how many arguments were provided to start a process. More arguments may be an indication of suspicious activity. - -type: long +Path of the request, such as "/search". -example: 4 +type: keyword -- -*`process.code_signature.exists`*:: +*`threat.indicator.url.port`*:: + -- -Boolean to capture if a signature is present. +Port of the request, such as 443. -type: boolean +type: long -example: true +example: 443 + +format: string -- -*`process.code_signature.status`*:: +*`threat.indicator.url.query`*:: + -- -Additional information about the certificate status. -This is useful for logging cryptographic errors with the certificate validity or trust status. Leave unpopulated if the validity or trust of the certificate was unchecked. +The query field describes the query string of the request, such as "q=elasticsearch". +The `?` is excluded from the query string. If a URL contains no `?`, there is no query field. If there is a `?` but no query, the query field exists with an empty string. The `exists` query can be used to differentiate between the two cases. type: keyword -example: ERROR_UNTRUSTED_ROOT - -- -*`process.code_signature.subject_name`*:: +*`threat.indicator.url.registered_domain`*:: + -- -Subject name of the code signer +The highest registered url domain, stripped of the subdomain. +For example, the registered domain for "foo.example.com" is "example.com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". type: keyword -example: Microsoft Corporation +example: example.com -- -*`process.code_signature.trusted`*:: +*`threat.indicator.url.scheme`*:: + -- -Stores the trust status of the certificate chain. -Validating the trust of the certificate chain may be complicated, and this field should only be populated by tools that actively check the status. +Scheme of the request, such as "https". +Note: The `:` is not part of the scheme. -type: boolean +type: keyword -example: true +example: https -- -*`process.code_signature.valid`*:: +*`threat.indicator.url.subdomain`*:: + -- -Boolean to capture if the digital signature is verified against the binary content. -Leave unpopulated if a certificate was unchecked. +The subdomain portion of a fully qualified domain name includes all of the names except the host name under the registered_domain. In a partially qualified domain, or if the the qualification level of the full name cannot be determined, subdomain contains all of the names below the registered domain. +For example the subdomain portion of "www.east.mydomain.co.uk" is "east". If the domain has multiple levels of subdomain, such as "sub2.sub1.example.com", the subdomain field should contain "sub2.sub1", with no trailing period. -type: boolean +type: keyword -example: true +example: east -- -*`process.command_line`*:: +*`threat.indicator.url.top_level_domain`*:: + -- -Full command line that started the process, including the absolute path to the executable, and all arguments. -Some arguments may be filtered to protect sensitive information. +The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for example.com is "com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last label will not work well for effective TLDs such as "co.uk". type: keyword -example: /usr/bin/ssh -l user 10.0.0.16 +example: co.uk -- -*`process.command_line.text`*:: +*`threat.indicator.url.username`*:: + -- -type: text +Username of the request. + +type: keyword -- -*`process.entity_id`*:: +*`threat.indicator.x509.alternative_names`*:: + -- -Unique identifier for the process. -The implementation of this is specified by the data source, but some examples of what could be used here are a process-generated UUID, Sysmon Process GUIDs, or a hash of some uniquely identifying components of a process. -Constructing a globally unique identifier is a common practice to mitigate PID reuse as well as to identify a specific process over time, across multiple monitored hosts. +List of subject alternative names (SAN). Name types vary by certificate authority and certificate type but commonly contain IP addresses, DNS names (and wildcards), and email addresses. type: keyword -example: c2c455d9f99375d +example: *.elastic.co -- -*`process.executable`*:: +*`threat.indicator.x509.issuer.common_name`*:: + -- -Absolute path to the process executable. +List of common name (CN) of issuing certificate authority. type: keyword -example: /usr/bin/ssh - --- - -*`process.executable.text`*:: -+ --- -type: text +example: Example SHA2 High Assurance Server CA -- -*`process.exit_code`*:: +*`threat.indicator.x509.issuer.country`*:: + -- -The exit code of the process, if this is a termination event. -The field should be absent if there is no exit code for the event (e.g. process start). +List of country (C) codes -type: long +type: keyword -example: 137 +example: US -- -*`process.hash.md5`*:: +*`threat.indicator.x509.issuer.distinguished_name`*:: + -- -MD5 hash. +Distinguished name (DN) of issuing certificate authority. type: keyword +example: C=US, O=Example Inc, OU=www.example.com, CN=Example SHA2 High Assurance Server CA + -- -*`process.hash.sha1`*:: +*`threat.indicator.x509.issuer.locality`*:: + -- -SHA1 hash. +List of locality names (L) type: keyword +example: Mountain View + -- -*`process.hash.sha256`*:: +*`threat.indicator.x509.issuer.organization`*:: + -- -SHA256 hash. +List of organizations (O) of issuing certificate authority. type: keyword +example: Example Inc + -- -*`process.hash.sha512`*:: +*`threat.indicator.x509.issuer.organizational_unit`*:: + -- -SHA512 hash. +List of organizational units (OU) of issuing certificate authority. type: keyword +example: www.example.com + -- -*`process.name`*:: +*`threat.indicator.x509.issuer.state_or_province`*:: + -- -Process name. -Sometimes called program name or similar. +List of state or province names (ST, S, or P) type: keyword -example: ssh +example: California -- -*`process.name.text`*:: +*`threat.indicator.x509.not_after`*:: + -- -type: text +Time at which the certificate is no longer considered valid. + +type: date + +example: 2020-07-16 03:15:39+00:00 -- -*`process.parent.args`*:: +*`threat.indicator.x509.not_before`*:: + -- -Array of process arguments. -May be filtered to protect sensitive information. +Time at which the certificate is first considered valid. -type: keyword +type: date -example: ['ssh', '-l', 'user', '10.0.0.16'] +example: 2019-08-16 01:40:25+00:00 -- -*`process.parent.args_count`*:: +*`threat.indicator.x509.public_key_algorithm`*:: + -- -Length of the process.args array. -This field can be useful for querying or performing bucket analysis on how many arguments were provided to start a process. More arguments may be an indication of suspicious activity. +Algorithm used to generate the public key. -type: long +type: keyword -example: 4 +example: RSA -- -*`process.parent.code_signature.exists`*:: +*`threat.indicator.x509.public_key_curve`*:: + -- -Boolean to capture if a signature is present. +The curve used by the elliptic curve public key algorithm. This is algorithm specific. -type: boolean +type: keyword -example: true +example: nistp521 -- -*`process.parent.code_signature.status`*:: +*`threat.indicator.x509.public_key_exponent`*:: + -- -Additional information about the certificate status. -This is useful for logging cryptographic errors with the certificate validity or trust status. Leave unpopulated if the validity or trust of the certificate was unchecked. +Exponent used to derive the public key. This is algorithm specific. -type: keyword +type: long -example: ERROR_UNTRUSTED_ROOT +example: 65537 + +Field is not indexed. -- -*`process.parent.code_signature.subject_name`*:: +*`threat.indicator.x509.public_key_size`*:: + -- -Subject name of the code signer +The size of the public key space in bits. -type: keyword +type: long -example: Microsoft Corporation +example: 2048 -- -*`process.parent.code_signature.trusted`*:: +*`threat.indicator.x509.serial_number`*:: + -- -Stores the trust status of the certificate chain. -Validating the trust of the certificate chain may be complicated, and this field should only be populated by tools that actively check the status. +Unique serial number issued by the certificate authority. For consistency, if this value is alphanumeric, it should be formatted without colons and uppercase characters. -type: boolean +type: keyword -example: true +example: 55FBB9C7DEBF09809D12CCAA -- -*`process.parent.code_signature.valid`*:: +*`threat.indicator.x509.signature_algorithm`*:: + -- -Boolean to capture if the digital signature is verified against the binary content. -Leave unpopulated if a certificate was unchecked. +Identifier for certificate signature algorithm. We recommend using names found in Go Lang Crypto library. See https://github.com/golang/go/blob/go1.14/src/crypto/x509/x509.go#L337-L353. -type: boolean +type: keyword -example: true +example: SHA256-RSA -- -*`process.parent.command_line`*:: +*`threat.indicator.x509.subject.common_name`*:: + -- -Full command line that started the process, including the absolute path to the executable, and all arguments. -Some arguments may be filtered to protect sensitive information. +List of common names (CN) of subject. type: keyword -example: /usr/bin/ssh -l user 10.0.0.16 +example: shared.global.example.net -- -*`process.parent.command_line.text`*:: +*`threat.indicator.x509.subject.country`*:: + -- -type: text +List of country (C) code + +type: keyword + +example: US -- -*`process.parent.entity_id`*:: +*`threat.indicator.x509.subject.distinguished_name`*:: + -- -Unique identifier for the process. -The implementation of this is specified by the data source, but some examples of what could be used here are a process-generated UUID, Sysmon Process GUIDs, or a hash of some uniquely identifying components of a process. -Constructing a globally unique identifier is a common practice to mitigate PID reuse as well as to identify a specific process over time, across multiple monitored hosts. +Distinguished name (DN) of the certificate subject entity. type: keyword -example: c2c455d9f99375d +example: C=US, ST=California, L=San Francisco, O=Example, Inc., CN=shared.global.example.net -- -*`process.parent.executable`*:: +*`threat.indicator.x509.subject.locality`*:: + -- -Absolute path to the process executable. +List of locality names (L) type: keyword -example: /usr/bin/ssh +example: San Francisco -- -*`process.parent.executable.text`*:: +*`threat.indicator.x509.subject.organization`*:: + -- -type: text +List of organizations (O) of subject. + +type: keyword + +example: Example, Inc. -- -*`process.parent.exit_code`*:: +*`threat.indicator.x509.subject.organizational_unit`*:: + -- -The exit code of the process, if this is a termination event. -The field should be absent if there is no exit code for the event (e.g. process start). - -type: long +List of organizational units (OU) of subject. -example: 137 +type: keyword -- -*`process.parent.hash.md5`*:: +*`threat.indicator.x509.subject.state_or_province`*:: + -- -MD5 hash. +List of state or province names (ST, S, or P) type: keyword +example: California + -- -*`process.parent.hash.sha1`*:: +*`threat.indicator.x509.version_number`*:: + -- -SHA1 hash. +Version of x509 format. type: keyword +example: 3 + -- -*`process.parent.hash.sha256`*:: +*`threat.software.id`*:: + -- -SHA256 hash. +The id of the software used by this threat to conduct behavior commonly modeled using MITRE ATT&CK®. While not required, you can use a MITRE ATT&CK® software id. type: keyword +example: S0552 + -- -*`process.parent.hash.sha512`*:: +*`threat.software.name`*:: + -- -SHA512 hash. +The name of the software used by this threat to conduct behavior commonly modeled using MITRE ATT&CK®. While not required, you can use a MITRE ATT&CK® software name. type: keyword +example: AdFind + -- -*`process.parent.name`*:: +*`threat.software.platforms`*:: + -- -Process name. -Sometimes called program name or similar. +The platforms of the software used by this threat to conduct behavior commonly modeled using MITRE ATT&CK®. While not required, you can use a MITRE ATT&CK® software platforms. +Recommended Values: + * AWS + * Azure + * Azure AD + * GCP + * Linux + * macOS + * Network + * Office 365 + * SaaS + * Windows type: keyword -example: ssh +example: [ "Windows" ] -- -*`process.parent.name.text`*:: +*`threat.software.reference`*:: + -- -type: text +The reference URL of the software used by this threat to conduct behavior commonly modeled using MITRE ATT&CK®. While not required, you can use a MITRE ATT&CK® software reference URL. + +type: keyword + +example: https://attack.mitre.org/software/S0552/ -- -*`process.parent.pgid`*:: +*`threat.software.type`*:: + -- -Identifier of the group of processes the process belongs to. +The type of software used by this threat to conduct behavior commonly modeled using MITRE ATT&CK®. While not required, you can use a MITRE ATT&CK® software type. +Recommended values + * Malware + * Tool -type: long +type: keyword -format: string +example: Tool -- -*`process.parent.pid`*:: +*`threat.tactic.id`*:: + -- -Process id. +The id of tactic used by this threat. You can use a MITRE ATT&CK® tactic, for example. (ex. https://attack.mitre.org/tactics/TA0002/ ) -type: long +type: keyword -example: 4242 +example: TA0002 -format: string +{yes-icon} {ecs-ref}[ECS] field. -- -*`process.parent.ppid`*:: +*`threat.tactic.name`*:: + -- -Parent process' pid. +Name of the type of tactic used by this threat. You can use a MITRE ATT&CK® tactic, for example. (ex. https://attack.mitre.org/tactics/TA0002/) -type: long +type: keyword -example: 4241 +example: Execution -format: string +{yes-icon} {ecs-ref}[ECS] field. -- -*`process.parent.start`*:: +*`threat.tactic.reference`*:: + -- -The time the process started. +The reference url of tactic used by this threat. You can use a MITRE ATT&CK® tactic, for example. (ex. https://attack.mitre.org/tactics/TA0002/ ) -type: date +type: keyword -example: 2016-05-23T08:05:34.853Z +example: https://attack.mitre.org/tactics/TA0002/ + +{yes-icon} {ecs-ref}[ECS] field. -- -*`process.parent.thread.id`*:: +*`threat.technique.id`*:: + -- -Thread ID. +The id of technique used by this threat. You can use a MITRE ATT&CK® technique, for example. (ex. https://attack.mitre.org/techniques/T1059/) -type: long +type: keyword -example: 4242 +example: T1059 -format: string +{yes-icon} {ecs-ref}[ECS] field. -- -*`process.parent.thread.name`*:: +*`threat.technique.name`*:: + -- -Thread name. +The name of technique used by this threat. You can use a MITRE ATT&CK® technique, for example. (ex. https://attack.mitre.org/techniques/T1059/) type: keyword -example: thread-0 +example: Command and Scripting Interpreter + +{yes-icon} {ecs-ref}[ECS] field. -- -*`process.parent.title`*:: +*`threat.technique.name.text`*:: + -- -Process title. -The proctitle, some times the same as process name. Can also be different: for example a browser setting its title to the web page currently opened. - -type: keyword +type: text -- -*`process.parent.title.text`*:: +*`threat.technique.reference`*:: + -- -type: text +The reference url of technique used by this threat. You can use a MITRE ATT&CK® technique, for example. (ex. https://attack.mitre.org/techniques/T1059/) + +type: keyword + +example: https://attack.mitre.org/techniques/T1059/ + +{yes-icon} {ecs-ref}[ECS] field. -- -*`process.parent.uptime`*:: +*`threat.technique.subtechnique.id`*:: + -- -Seconds the process has been up. +The full id of subtechnique used by this threat. You can use a MITRE ATT&CK® subtechnique, for example. (ex. https://attack.mitre.org/techniques/T1059/001/) -type: long +type: keyword -example: 1325 +example: T1059.001 -- -*`process.parent.working_directory`*:: +*`threat.technique.subtechnique.name`*:: + -- -The working directory of the process. +The name of subtechnique used by this threat. You can use a MITRE ATT&CK® subtechnique, for example. (ex. https://attack.mitre.org/techniques/T1059/001/) type: keyword -example: /home/alice +example: PowerShell -- -*`process.parent.working_directory.text`*:: +*`threat.technique.subtechnique.name.text`*:: + -- type: text -- -*`process.pe.company`*:: +*`threat.technique.subtechnique.reference`*:: + -- -Internal company name of the file, provided at compile-time. +The reference url of subtechnique used by this threat. You can use a MITRE ATT&CK® subtechnique, for example. (ex. https://attack.mitre.org/techniques/T1059/001/) type: keyword -example: Microsoft Corporation +example: https://attack.mitre.org/techniques/T1059/001/ -- -*`process.pe.description`*:: +[float] +=== tls + +Fields related to a TLS connection. These fields focus on the TLS protocol itself and intentionally avoids in-depth analysis of the related x.509 certificate files. + + +*`tls.cipher`*:: + -- -Internal description of the file, provided at compile-time. +String indicating the cipher used during the current connection. type: keyword -example: Paint +example: TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 + +{yes-icon} {ecs-ref}[ECS] field. -- -*`process.pe.file_version`*:: +*`tls.client.certificate`*:: + -- -Internal version of the file, provided at compile-time. +PEM-encoded stand-alone certificate offered by the client. This is usually mutually-exclusive of `client.certificate_chain` since this value also exists in that list. type: keyword -example: 6.3.9600.17415 +example: MII... + +{yes-icon} {ecs-ref}[ECS] field. -- -*`process.pe.original_file_name`*:: +*`tls.client.certificate_chain`*:: + -- -Internal name of the file, provided at compile-time. +Array of PEM-encoded certificates that make up the certificate chain offered by the client. This is usually mutually-exclusive of `client.certificate` since that value should be the first certificate in the chain. type: keyword -example: MSPAINT.EXE +example: ["MII...", "MII..."] + +{yes-icon} {ecs-ref}[ECS] field. -- -*`process.pe.product`*:: +*`tls.client.hash.md5`*:: + -- -Internal product name of the file, provided at compile-time. +Certificate fingerprint using the MD5 digest of DER-encoded version of certificate offered by the client. For consistency with other hash values, this value should be formatted as an uppercase hash. type: keyword -example: Microsoft® Windows® Operating System +example: 0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC + +{yes-icon} {ecs-ref}[ECS] field. -- -*`process.pgid`*:: +*`tls.client.hash.sha1`*:: + -- -Identifier of the group of processes the process belongs to. +Certificate fingerprint using the SHA1 digest of DER-encoded version of certificate offered by the client. For consistency with other hash values, this value should be formatted as an uppercase hash. -type: long +type: keyword -format: string +example: 9E393D93138888D288266C2D915214D1D1CCEB2A + +{yes-icon} {ecs-ref}[ECS] field. -- -*`process.pid`*:: +*`tls.client.hash.sha256`*:: + -- -Process id. +Certificate fingerprint using the SHA256 digest of DER-encoded version of certificate offered by the client. For consistency with other hash values, this value should be formatted as an uppercase hash. -type: long +type: keyword -example: 4242 +example: 0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0 -format: string +{yes-icon} {ecs-ref}[ECS] field. -- -*`process.ppid`*:: +*`tls.client.issuer`*:: + -- -Parent process' pid. +Distinguished name of subject of the issuer of the x.509 certificate presented by the client. -type: long +type: keyword -example: 4241 +example: CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com -format: string +{yes-icon} {ecs-ref}[ECS] field. -- -*`process.start`*:: +*`tls.client.ja3`*:: + -- -The time the process started. +A hash that identifies clients based on how they perform an SSL/TLS handshake. -type: date +type: keyword -example: 2016-05-23T08:05:34.853Z +example: d4e5b18d6b55c71272893221c96ba240 + +{yes-icon} {ecs-ref}[ECS] field. -- -*`process.thread.id`*:: +*`tls.client.not_after`*:: + -- -Thread ID. +Date/Time indicating when client certificate is no longer considered valid. -type: long +type: date -example: 4242 +example: 2021-01-01T00:00:00.000Z -format: string +{yes-icon} {ecs-ref}[ECS] field. -- -*`process.thread.name`*:: +*`tls.client.not_before`*:: + -- -Thread name. +Date/Time indicating when client certificate is first considered valid. -type: keyword +type: date -example: thread-0 +example: 1970-01-01T00:00:00.000Z + +{yes-icon} {ecs-ref}[ECS] field. -- -*`process.title`*:: +*`tls.client.server_name`*:: + -- -Process title. -The proctitle, some times the same as process name. Can also be different: for example a browser setting its title to the web page currently opened. +Also called an SNI, this tells the server which hostname to which the client is attempting to connect to. When this value is available, it should get copied to `destination.domain`. type: keyword --- +example: www.elastic.co -*`process.title.text`*:: -+ --- -type: text +{yes-icon} {ecs-ref}[ECS] field. -- -*`process.uptime`*:: +*`tls.client.subject`*:: + -- -Seconds the process has been up. +Distinguished name of subject of the x.509 certificate presented by the client. -type: long +type: keyword -example: 1325 +example: CN=myclient, OU=Documentation Team, DC=example, DC=com + +{yes-icon} {ecs-ref}[ECS] field. -- -*`process.working_directory`*:: +*`tls.client.supported_ciphers`*:: + -- -The working directory of the process. +Array of ciphers offered by the client during the client hello. type: keyword -example: /home/alice +example: ["TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."] + +{yes-icon} {ecs-ref}[ECS] field. -- -*`process.working_directory.text`*:: +*`tls.client.x509.alternative_names`*:: + -- -type: text +List of subject alternative names (SAN). Name types vary by certificate authority and certificate type but commonly contain IP addresses, DNS names (and wildcards), and email addresses. --- +type: keyword -[float] -=== registry +example: *.elastic.co -Fields related to Windows Registry operations. +{yes-icon} {ecs-ref}[ECS] field. +-- -*`registry.data.bytes`*:: +*`tls.client.x509.issuer.common_name`*:: + -- -Original bytes written with base64 encoding. -For Windows registry operations, such as SetValueEx and RegQueryValueEx, this corresponds to the data pointed by `lp_data`. This is optional but provides better recoverability and should be populated for REG_BINARY encoded values. +List of common name (CN) of issuing certificate authority. type: keyword -example: ZQBuAC0AVQBTAAAAZQBuAAAAAAA= +example: Example SHA2 High Assurance Server CA + +{yes-icon} {ecs-ref}[ECS] field. -- -*`registry.data.strings`*:: +*`tls.client.x509.issuer.country`*:: + -- -Content when writing string types. -Populated as an array when writing string data to the registry. For single string registry types (REG_SZ, REG_EXPAND_SZ), this should be an array with one string. For sequences of string with REG_MULTI_SZ, this array will be variable length. For numeric data, such as REG_DWORD and REG_QWORD, this should be populated with the decimal representation (e.g `"1"`). +List of country (C) codes type: keyword -example: ["C:\rta\red_ttp\bin\myapp.exe"] +example: US + +{yes-icon} {ecs-ref}[ECS] field. -- -*`registry.data.type`*:: +*`tls.client.x509.issuer.distinguished_name`*:: + -- -Standard registry type for encoding contents +Distinguished name (DN) of issuing certificate authority. type: keyword -example: REG_SZ +example: C=US, O=Example Inc, OU=www.example.com, CN=Example SHA2 High Assurance Server CA + +{yes-icon} {ecs-ref}[ECS] field. -- -*`registry.hive`*:: +*`tls.client.x509.issuer.locality`*:: + -- -Abbreviated name for the hive. +List of locality names (L) type: keyword -example: HKLM +example: Mountain View + +{yes-icon} {ecs-ref}[ECS] field. -- -*`registry.key`*:: +*`tls.client.x509.issuer.organization`*:: + -- -Hive-relative path of keys. +List of organizations (O) of issuing certificate authority. type: keyword -example: SOFTWARE\Microsoft\Windows NT\CurrentVersion\Image File Execution Options\winword.exe +example: Example Inc + +{yes-icon} {ecs-ref}[ECS] field. -- -*`registry.path`*:: +*`tls.client.x509.issuer.organizational_unit`*:: + -- -Full path, including hive, key and value +List of organizational units (OU) of issuing certificate authority. type: keyword -example: HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Image File Execution Options\winword.exe\Debugger +example: www.example.com + +{yes-icon} {ecs-ref}[ECS] field. -- -*`registry.value`*:: +*`tls.client.x509.issuer.state_or_province`*:: + -- -Name of the value written. +List of state or province names (ST, S, or P) type: keyword -example: Debugger - --- - -[float] -=== related +example: California -This field set is meant to facilitate pivoting around a piece of data. -Some pieces of information can be seen in many places in an ECS event. To facilitate searching for them, store an array of all seen values to their corresponding field in `related.`. -A concrete example is IP addresses, which can be under host, observer, source, destination, client, server, and network.forwarded_ip. If you append all IPs to `related.ip`, you can then search for a given IP trivially, no matter where it appeared, by querying `related.ip:192.0.2.15`. +{yes-icon} {ecs-ref}[ECS] field. +-- -*`related.hash`*:: +*`tls.client.x509.not_after`*:: + -- -All the hashes seen on your event. Populating this field, then using it to search for hashes can help in situations where you're unsure what the hash algorithm is (and therefore which key name to search). +Time at which the certificate is no longer considered valid. -type: keyword +type: date + +example: 2020-07-16 03:15:39+00:00 + +{yes-icon} {ecs-ref}[ECS] field. -- -*`related.ip`*:: +*`tls.client.x509.not_before`*:: + -- -All of the IPs seen on your event. +Time at which the certificate is first considered valid. -type: ip +type: date + +example: 2019-08-16 01:40:25+00:00 + +{yes-icon} {ecs-ref}[ECS] field. -- -*`related.user`*:: +*`tls.client.x509.public_key_algorithm`*:: + -- -All the user names seen on your event. +Algorithm used to generate the public key. type: keyword --- - -[float] -=== rule +example: RSA -Rule fields are used to capture the specifics of any observer or agent rules that generate alerts or other notable events. -Examples of data sources that would populate the rule fields include: network admission control platforms, network or host IDS/IPS, network firewalls, web application firewalls, url filters, endpoint detection and response (EDR) systems, etc. +{yes-icon} {ecs-ref}[ECS] field. +-- -*`rule.author`*:: +*`tls.client.x509.public_key_curve`*:: + -- -Name, organization, or pseudonym of the author or authors who created the rule used to generate this event. +The curve used by the elliptic curve public key algorithm. This is algorithm specific. type: keyword -example: ['Star-Lord'] +example: nistp521 + +{yes-icon} {ecs-ref}[ECS] field. -- -*`rule.category`*:: +*`tls.client.x509.public_key_exponent`*:: + -- -A categorization value keyword used by the entity using the rule for detection of this event. +Exponent used to derive the public key. This is algorithm specific. -type: keyword +type: long -example: Attempted Information Leak +example: 65537 + +{yes-icon} {ecs-ref}[ECS] field. + +Field is not indexed. -- -*`rule.description`*:: +*`tls.client.x509.public_key_size`*:: + -- -The description of the rule generating the event. +The size of the public key space in bits. -type: keyword +type: long -example: Block requests to public DNS over HTTPS / TLS protocols +example: 2048 + +{yes-icon} {ecs-ref}[ECS] field. -- -*`rule.id`*:: +*`tls.client.x509.serial_number`*:: + -- -A rule ID that is unique within the scope of an agent, observer, or other entity using the rule for detection of this event. +Unique serial number issued by the certificate authority. For consistency, if this value is alphanumeric, it should be formatted without colons and uppercase characters. type: keyword -example: 101 +example: 55FBB9C7DEBF09809D12CCAA + +{yes-icon} {ecs-ref}[ECS] field. -- -*`rule.license`*:: +*`tls.client.x509.signature_algorithm`*:: + -- -Name of the license under which the rule used to generate this event is made available. +Identifier for certificate signature algorithm. We recommend using names found in Go Lang Crypto library. See https://github.com/golang/go/blob/go1.14/src/crypto/x509/x509.go#L337-L353. type: keyword -example: Apache 2.0 +example: SHA256-RSA + +{yes-icon} {ecs-ref}[ECS] field. -- -*`rule.name`*:: +*`tls.client.x509.subject.common_name`*:: + -- -The name of the rule or signature generating the event. +List of common names (CN) of subject. type: keyword -example: BLOCK_DNS_over_TLS +example: shared.global.example.net + +{yes-icon} {ecs-ref}[ECS] field. -- -*`rule.reference`*:: +*`tls.client.x509.subject.country`*:: + -- -Reference URL to additional information about the rule used to generate this event. -The URL can point to the vendor's documentation about the rule. If that's not available, it can also be a link to a more general page describing this type of alert. +List of country (C) code type: keyword -example: https://en.wikipedia.org/wiki/DNS_over_TLS +example: US + +{yes-icon} {ecs-ref}[ECS] field. -- -*`rule.ruleset`*:: +*`tls.client.x509.subject.distinguished_name`*:: + -- -Name of the ruleset, policy, group, or parent category in which the rule used to generate this event is a member. +Distinguished name (DN) of the certificate subject entity. type: keyword -example: Standard_Protocol_Filters +example: C=US, ST=California, L=San Francisco, O=Example, Inc., CN=shared.global.example.net + +{yes-icon} {ecs-ref}[ECS] field. -- -*`rule.uuid`*:: +*`tls.client.x509.subject.locality`*:: + -- -A rule ID that is unique within the scope of a set or group of agents, observers, or other entities using the rule for detection of this event. +List of locality names (L) type: keyword -example: 1100110011 +example: San Francisco + +{yes-icon} {ecs-ref}[ECS] field. -- -*`rule.version`*:: +*`tls.client.x509.subject.organization`*:: + -- -The version / revision of the rule being used for analysis. +List of organizations (O) of subject. type: keyword -example: 1.1 - --- - -[float] -=== server +example: Example, Inc. -A Server is defined as the responder in a network connection for events regarding sessions, connections, or bidirectional flow records. -For TCP events, the server is the receiver of the initial SYN packet(s) of the TCP connection. For other protocols, the server is generally the responder in the network transaction. Some systems actually use the term "responder" to refer the server in TCP connections. The server fields describe details about the system acting as the server in the network event. Server fields are usually populated in conjunction with client fields. Server fields are generally not populated for packet-level events. -Client / server representations can add semantic context to an exchange, which is helpful to visualize the data in certain situations. If your context falls in that category, you should still ensure that source and destination are filled appropriately. +{yes-icon} {ecs-ref}[ECS] field. +-- -*`server.address`*:: +*`tls.client.x509.subject.organizational_unit`*:: + -- -Some event server addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. -Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. +List of organizational units (OU) of subject. type: keyword +{yes-icon} {ecs-ref}[ECS] field. + -- -*`server.as.number`*:: +*`tls.client.x509.subject.state_or_province`*:: + -- -Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. +List of state or province names (ST, S, or P) -type: long +type: keyword -example: 15169 +example: California + +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.as.organization.name`*:: +*`tls.client.x509.version_number`*:: + -- -Organization name. +Version of x509 format. type: keyword -example: Google LLC +example: 3 + +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.as.organization.name.text`*:: +*`tls.curve`*:: + -- -type: text +String indicating the curve used for the given cipher, when applicable. + +type: keyword + +example: secp256r1 + +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.bytes`*:: +*`tls.established`*:: + -- -Bytes sent from the server to the client. - -type: long +Boolean flag indicating if the TLS negotiation was successful and transitioned to an encrypted tunnel. -example: 184 +type: boolean -format: bytes +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.domain`*:: +*`tls.next_protocol`*:: + -- -Server domain. +String indicating the protocol being tunneled. Per the values in the IANA registry (https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), this string should be lower case. type: keyword +example: http/1.1 + +{yes-icon} {ecs-ref}[ECS] field. + -- -*`server.geo.city_name`*:: +*`tls.resumed`*:: + -- -City name. +Boolean flag indicating if this TLS connection was resumed from an existing TLS negotiation. -type: keyword +type: boolean -example: Montreal +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.geo.continent_name`*:: +*`tls.server.certificate`*:: + -- -Name of the continent. +PEM-encoded stand-alone certificate offered by the server. This is usually mutually-exclusive of `server.certificate_chain` since this value also exists in that list. type: keyword -example: North America +example: MII... + +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.geo.country_iso_code`*:: +*`tls.server.certificate_chain`*:: + -- -Country ISO code. +Array of PEM-encoded certificates that make up the certificate chain offered by the server. This is usually mutually-exclusive of `server.certificate` since that value should be the first certificate in the chain. type: keyword -example: CA +example: ["MII...", "MII..."] + +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.geo.country_name`*:: +*`tls.server.hash.md5`*:: + -- -Country name. +Certificate fingerprint using the MD5 digest of DER-encoded version of certificate offered by the server. For consistency with other hash values, this value should be formatted as an uppercase hash. type: keyword -example: Canada +example: 0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC + +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.geo.location`*:: +*`tls.server.hash.sha1`*:: + -- -Longitude and latitude. +Certificate fingerprint using the SHA1 digest of DER-encoded version of certificate offered by the server. For consistency with other hash values, this value should be formatted as an uppercase hash. -type: geo_point +type: keyword -example: { "lon": -73.614830, "lat": 45.505918 } +example: 9E393D93138888D288266C2D915214D1D1CCEB2A + +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.geo.name`*:: +*`tls.server.hash.sha256`*:: + -- -User-defined description of a location, at the level of granularity they care about. -Could be the name of their data centers, the floor number, if this describes a local physical entity, city names. -Not typically used in automated geolocation. +Certificate fingerprint using the SHA256 digest of DER-encoded version of certificate offered by the server. For consistency with other hash values, this value should be formatted as an uppercase hash. type: keyword -example: boston-dc +example: 0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0 + +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.geo.region_iso_code`*:: +*`tls.server.issuer`*:: + -- -Region ISO code. +Subject of the issuer of the x.509 certificate presented by the server. type: keyword -example: CA-QC +example: CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com + +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.geo.region_name`*:: +*`tls.server.ja3s`*:: + -- -Region name. +A hash that identifies servers based on how they perform an SSL/TLS handshake. type: keyword -example: Quebec +example: 394441ab65754e2207b1e1b457b3641d + +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.ip`*:: +*`tls.server.not_after`*:: + -- -IP address of the server. -Can be one or multiple IPv4 or IPv6 addresses. +Timestamp indicating when server certificate is no longer considered valid. -type: ip +type: date + +example: 2021-01-01T00:00:00.000Z + +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.mac`*:: +*`tls.server.not_before`*:: + -- -MAC address of the server. +Timestamp indicating when server certificate is first considered valid. -type: keyword +type: date + +example: 1970-01-01T00:00:00.000Z + +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.nat.ip`*:: +*`tls.server.subject`*:: + -- -Translated ip of destination based NAT sessions (e.g. internet to private DMZ) -Typically used with load balancers, firewalls, or routers. +Subject of the x.509 certificate presented by the server. -type: ip +type: keyword + +example: CN=www.example.com, OU=Infrastructure Team, DC=example, DC=com + +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.nat.port`*:: +*`tls.server.x509.alternative_names`*:: + -- -Translated port of destination based NAT sessions (e.g. internet to private DMZ) -Typically used with load balancers, firewalls, or routers. +List of subject alternative names (SAN). Name types vary by certificate authority and certificate type but commonly contain IP addresses, DNS names (and wildcards), and email addresses. -type: long +type: keyword -format: string +example: *.elastic.co + +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.packets`*:: +*`tls.server.x509.issuer.common_name`*:: + -- -Packets sent from the server to the client. +List of common name (CN) of issuing certificate authority. -type: long +type: keyword -example: 12 +example: Example SHA2 High Assurance Server CA + +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.port`*:: +*`tls.server.x509.issuer.country`*:: + -- -Port of the server. +List of country (C) codes -type: long +type: keyword -format: string +example: US + +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.registered_domain`*:: +*`tls.server.x509.issuer.distinguished_name`*:: + -- -The highest registered server domain, stripped of the subdomain. -For example, the registered domain for "foo.google.com" is "google.com". -This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". +Distinguished name (DN) of issuing certificate authority. type: keyword -example: google.com +example: C=US, O=Example Inc, OU=www.example.com, CN=Example SHA2 High Assurance Server CA + +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.top_level_domain`*:: +*`tls.server.x509.issuer.locality`*:: + -- -The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for google.com is "com". -This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last label will not work well for effective TLDs such as "co.uk". +List of locality names (L) type: keyword -example: co.uk +example: Mountain View + +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.user.domain`*:: +*`tls.server.x509.issuer.organization`*:: + -- -Name of the directory the user is a member of. -For example, an LDAP or Active Directory domain name. +List of organizations (O) of issuing certificate authority. type: keyword +example: Example Inc + +{yes-icon} {ecs-ref}[ECS] field. + -- -*`server.user.email`*:: +*`tls.server.x509.issuer.organizational_unit`*:: + -- -User email address. +List of organizational units (OU) of issuing certificate authority. type: keyword +example: www.example.com + +{yes-icon} {ecs-ref}[ECS] field. + -- -*`server.user.full_name`*:: +*`tls.server.x509.issuer.state_or_province`*:: + -- -User's full name, if available. +List of state or province names (ST, S, or P) type: keyword -example: Albert Einstein +example: California + +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.user.full_name.text`*:: +*`tls.server.x509.not_after`*:: + -- -type: text +Time at which the certificate is no longer considered valid. + +type: date + +example: 2020-07-16 03:15:39+00:00 + +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.user.group.domain`*:: +*`tls.server.x509.not_before`*:: + -- -Name of the directory the group is a member of. -For example, an LDAP or Active Directory domain name. +Time at which the certificate is first considered valid. -type: keyword +type: date + +example: 2019-08-16 01:40:25+00:00 + +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.user.group.id`*:: +*`tls.server.x509.public_key_algorithm`*:: + -- -Unique identifier for the group on the system/platform. +Algorithm used to generate the public key. type: keyword +example: RSA + +{yes-icon} {ecs-ref}[ECS] field. + -- -*`server.user.group.name`*:: +*`tls.server.x509.public_key_curve`*:: + -- -Name of the group. +The curve used by the elliptic curve public key algorithm. This is algorithm specific. type: keyword +example: nistp521 + +{yes-icon} {ecs-ref}[ECS] field. + -- -*`server.user.hash`*:: +*`tls.server.x509.public_key_exponent`*:: + -- -Unique user hash to correlate information for a user in anonymized form. -Useful if `user.id` or `user.name` contain confidential information and cannot be used. +Exponent used to derive the public key. This is algorithm specific. -type: keyword +type: long + +example: 65537 + +{yes-icon} {ecs-ref}[ECS] field. + +Field is not indexed. -- -*`server.user.id`*:: +*`tls.server.x509.public_key_size`*:: + -- -Unique identifiers of the user. +The size of the public key space in bits. -type: keyword +type: long + +example: 2048 + +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.user.name`*:: +*`tls.server.x509.serial_number`*:: + -- -Short name or login of the user. +Unique serial number issued by the certificate authority. For consistency, if this value is alphanumeric, it should be formatted without colons and uppercase characters. type: keyword -example: albert +example: 55FBB9C7DEBF09809D12CCAA + +{yes-icon} {ecs-ref}[ECS] field. -- -*`server.user.name.text`*:: +*`tls.server.x509.signature_algorithm`*:: + -- -type: text +Identifier for certificate signature algorithm. We recommend using names found in Go Lang Crypto library. See https://github.com/golang/go/blob/go1.14/src/crypto/x509/x509.go#L337-L353. --- +type: keyword -[float] -=== service +example: SHA256-RSA -The service fields describe the service for or from which the data was collected. -These fields help you find and correlate logs for a specific service and version. +{yes-icon} {ecs-ref}[ECS] field. +-- -*`service.ephemeral_id`*:: +*`tls.server.x509.subject.common_name`*:: + -- -Ephemeral identifier of this service (if one exists). -This id normally changes across restarts, but `service.id` does not. +List of common names (CN) of subject. type: keyword -example: 8a4f500f +example: shared.global.example.net + +{yes-icon} {ecs-ref}[ECS] field. -- -*`service.id`*:: +*`tls.server.x509.subject.country`*:: + -- -Unique identifier of the running service. If the service is comprised of many nodes, the `service.id` should be the same for all nodes. -This id should uniquely identify the service. This makes it possible to correlate logs and metrics for one specific service, no matter which particular node emitted the event. -Note that if you need to see the events from one specific host of the service, you should filter on that `host.name` or `host.id` instead. +List of country (C) code type: keyword -example: d37e5ebfe0ae6c4972dbe9f0174a1637bb8247f6 +example: US + +{yes-icon} {ecs-ref}[ECS] field. -- -*`service.name`*:: +*`tls.server.x509.subject.distinguished_name`*:: + -- -Name of the service data is collected from. -The name of the service is normally user given. This allows for distributed services that run on multiple hosts to correlate the related instances based on the name. -In the case of Elasticsearch the `service.name` could contain the cluster name. For Beats the `service.name` is by default a copy of the `service.type` field if no name is specified. +Distinguished name (DN) of the certificate subject entity. type: keyword -example: elasticsearch-metrics +example: C=US, ST=California, L=San Francisco, O=Example, Inc., CN=shared.global.example.net + +{yes-icon} {ecs-ref}[ECS] field. -- -*`service.node.name`*:: +*`tls.server.x509.subject.locality`*:: + -- -Name of a service node. -This allows for two nodes of the same service running on the same host to be differentiated. Therefore, `service.node.name` should typically be unique across nodes of a given service. -In the case of Elasticsearch, the `service.node.name` could contain the unique node name within the Elasticsearch cluster. In cases where the service doesn't have the concept of a node name, the host name or container name can be used to distinguish running instances that make up this service. If those do not provide uniqueness (e.g. multiple instances of the service running on the same host) - the node name can be manually set. +List of locality names (L) type: keyword -example: instance-0000000016 +example: San Francisco + +{yes-icon} {ecs-ref}[ECS] field. -- -*`service.state`*:: +*`tls.server.x509.subject.organization`*:: + -- -Current state of the service. +List of organizations (O) of subject. type: keyword +example: Example, Inc. + +{yes-icon} {ecs-ref}[ECS] field. + -- -*`service.type`*:: +*`tls.server.x509.subject.organizational_unit`*:: + -- -The type of the service data is collected from. -The type can be used to group and correlate logs and metrics from one service type. -Example: If logs or metrics are collected from Elasticsearch, `service.type` would be `elasticsearch`. +List of organizational units (OU) of subject. type: keyword -example: elasticsearch +{yes-icon} {ecs-ref}[ECS] field. -- -*`service.version`*:: +*`tls.server.x509.subject.state_or_province`*:: + -- -Version of the service the data was collected from. -This allows to look at a data set only for a specific version of a service. +List of state or province names (ST, S, or P) type: keyword -example: 3.2.4 - --- - -[float] -=== source +example: California -Source fields describe details about the source of a packet/event. -Source fields are usually populated in conjunction with destination fields. +{yes-icon} {ecs-ref}[ECS] field. +-- -*`source.address`*:: +*`tls.server.x509.version_number`*:: + -- -Some event source addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. -Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. +Version of x509 format. type: keyword +example: 3 + +{yes-icon} {ecs-ref}[ECS] field. + -- -*`source.as.number`*:: +*`tls.version`*:: + -- -Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. +Numeric part of the version parsed from the original string. -type: long +type: keyword -example: 15169 +example: 1.2 + +{yes-icon} {ecs-ref}[ECS] field. -- -*`source.as.organization.name`*:: +*`tls.version_protocol`*:: + -- -Organization name. +Normalized lowercase protocol name parsed from original string. type: keyword -example: Google LLC +example: tls + +{yes-icon} {ecs-ref}[ECS] field. -- -*`source.as.organization.name.text`*:: +*`span.id`*:: + -- -type: text +Unique identifier of the span within the scope of its trace. +A span represents an operation within a transaction, such as a request to another service, or a database query. + +type: keyword + +example: 3ff9a8981b7ccd5a + +{yes-icon} {ecs-ref}[ECS] field. -- -*`source.bytes`*:: +*`trace.id`*:: + -- -Bytes sent from the source to the destination. +Unique identifier of the trace. +A trace groups multiple events like transactions that belong together. For example, a user request handled by multiple inter-connected services. -type: long +type: keyword -example: 184 +example: 4bf92f3577b34da6a3ce929d0e0e4736 -format: bytes +{yes-icon} {ecs-ref}[ECS] field. -- -*`source.domain`*:: +*`transaction.id`*:: + -- -Source domain. +Unique identifier of the transaction within the scope of its trace. +A transaction is the highest level of work measured within a service, such as a request to a server. type: keyword +example: 00f067aa0ba902b7 + +{yes-icon} {ecs-ref}[ECS] field. + -- -*`source.geo.city_name`*:: +[float] +=== url + +URL fields provide support for complete or partial URLs, and supports the breaking down into scheme, domain, path, and so on. + + +*`url.domain`*:: + -- -City name. +Domain of the url, such as "www.elastic.co". +In some cases a URL may refer to an IP and/or port directly, without a domain name. In this case, the IP address would go to the `domain` field. +If the URL contains a literal IPv6 address enclosed by `[` and `]` (IETF RFC 2732), the `[` and `]` characters should also be captured in the `domain` field. type: keyword -example: Montreal +example: www.elastic.co + +{yes-icon} {ecs-ref}[ECS] field. -- -*`source.geo.continent_name`*:: +*`url.extension`*:: + -- -Name of the continent. +The field contains the file extension from the original request url, excluding the leading dot. +The file extension is only set if it exists, as not every url has a file extension. +The leading period must not be included. For example, the value must be "png", not ".png". +Note that when the file name has multiple extensions (example.tar.gz), only the last one should be captured ("gz", not "tar.gz"). type: keyword -example: North America +example: png + +{yes-icon} {ecs-ref}[ECS] field. -- -*`source.geo.country_iso_code`*:: +*`url.fragment`*:: + -- -Country ISO code. +Portion of the url after the `#`, such as "top". +The `#` is not part of the fragment. type: keyword -example: CA +{yes-icon} {ecs-ref}[ECS] field. -- -*`source.geo.country_name`*:: +*`url.full`*:: + -- -Country name. +If full URLs are important to your use case, they should be stored in `url.full`, whether this field is reconstructed or present in the event source. type: keyword -example: Canada +example: https://www.elastic.co:443/search?q=elasticsearch#top + +{yes-icon} {ecs-ref}[ECS] field. -- -*`source.geo.location`*:: +*`url.full.text`*:: + -- -Longitude and latitude. - -type: geo_point - -example: { "lon": -73.614830, "lat": 45.505918 } +type: text -- -*`source.geo.name`*:: +*`url.original`*:: + -- -User-defined description of a location, at the level of granularity they care about. -Could be the name of their data centers, the floor number, if this describes a local physical entity, city names. -Not typically used in automated geolocation. +Unmodified original url as seen in the event source. +Note that in network monitoring, the observed URL may be a full URL, whereas in access logs, the URL is often just represented as a path. +This field is meant to represent the URL as it was observed, complete or not. type: keyword -example: boston-dc +example: https://www.elastic.co:443/search?q=elasticsearch#top or /search?q=elasticsearch + +{yes-icon} {ecs-ref}[ECS] field. -- -*`source.geo.region_iso_code`*:: +*`url.original.text`*:: + -- -Region ISO code. - -type: keyword - -example: CA-QC +type: text -- -*`source.geo.region_name`*:: +*`url.password`*:: + -- -Region name. +Password of the request. type: keyword -example: Quebec +{yes-icon} {ecs-ref}[ECS] field. -- -*`source.ip`*:: +*`url.path`*:: + -- -IP address of the source. -Can be one or multiple IPv4 or IPv6 addresses. +Path of the request, such as "/search". -type: ip +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. -- -*`source.mac`*:: +*`url.port`*:: + -- -MAC address of the source. +Port of the request, such as 443. -type: keyword +type: long + +example: 443 + +format: string + +{yes-icon} {ecs-ref}[ECS] field. -- -*`source.nat.ip`*:: +*`url.query`*:: + -- -Translated ip of source based NAT sessions (e.g. internal client to internet) -Typically connections traversing load balancers, firewalls, or routers. +The query field describes the query string of the request, such as "q=elasticsearch". +The `?` is excluded from the query string. If a URL contains no `?`, there is no query field. If there is a `?` but no query, the query field exists with an empty string. The `exists` query can be used to differentiate between the two cases. -type: ip +type: keyword + +{yes-icon} {ecs-ref}[ECS] field. -- -*`source.nat.port`*:: +*`url.registered_domain`*:: + -- -Translated port of source based NAT sessions. (e.g. internal client to internet) -Typically used with load balancers, firewalls, or routers. +The highest registered url domain, stripped of the subdomain. +For example, the registered domain for "foo.example.com" is "example.com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". -type: long +type: keyword -format: string +example: example.com + +{yes-icon} {ecs-ref}[ECS] field. -- -*`source.packets`*:: +*`url.scheme`*:: + -- -Packets sent from the source to the destination. +Scheme of the request, such as "https". +Note: The `:` is not part of the scheme. -type: long +type: keyword -example: 12 +example: https + +{yes-icon} {ecs-ref}[ECS] field. -- -*`source.port`*:: +*`url.subdomain`*:: + -- -Port of the source. +The subdomain portion of a fully qualified domain name includes all of the names except the host name under the registered_domain. In a partially qualified domain, or if the the qualification level of the full name cannot be determined, subdomain contains all of the names below the registered domain. +For example the subdomain portion of "www.east.mydomain.co.uk" is "east". If the domain has multiple levels of subdomain, such as "sub2.sub1.example.com", the subdomain field should contain "sub2.sub1", with no trailing period. -type: long +type: keyword -format: string +example: east -- -*`source.registered_domain`*:: +*`url.top_level_domain`*:: + -- -The highest registered source domain, stripped of the subdomain. -For example, the registered domain for "foo.google.com" is "google.com". -This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". +The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for example.com is "com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last label will not work well for effective TLDs such as "co.uk". type: keyword -example: google.com +example: co.uk + +{yes-icon} {ecs-ref}[ECS] field. -- -*`source.top_level_domain`*:: +*`url.username`*:: + -- -The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for google.com is "com". -This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last label will not work well for effective TLDs such as "co.uk". +Username of the request. type: keyword -example: co.uk +{yes-icon} {ecs-ref}[ECS] field. -- -*`source.user.domain`*:: +[float] +=== user + +The user fields describe information about the user that is relevant to the event. +Fields can have one entry or multiple entries. If a user has more than one id, provide an array that includes all of them. + + +*`user.changes.domain`*:: + -- Name of the directory the user is a member of. @@ -7726,7 +20988,7 @@ type: keyword -- -*`source.user.email`*:: +*`user.changes.email`*:: + -- User email address. @@ -7735,7 +20997,7 @@ type: keyword -- -*`source.user.full_name`*:: +*`user.changes.full_name`*:: + -- User's full name, if available. @@ -7746,14 +21008,14 @@ example: Albert Einstein -- -*`source.user.full_name.text`*:: +*`user.changes.full_name.text`*:: + -- type: text -- -*`source.user.group.domain`*:: +*`user.changes.group.domain`*:: + -- Name of the directory the group is a member of. @@ -7763,7 +21025,7 @@ type: keyword -- -*`source.user.group.id`*:: +*`user.changes.group.id`*:: + -- Unique identifier for the group on the system/platform. @@ -7772,7 +21034,7 @@ type: keyword -- -*`source.user.group.name`*:: +*`user.changes.group.name`*:: + -- Name of the group. @@ -7781,7 +21043,7 @@ type: keyword -- -*`source.user.hash`*:: +*`user.changes.hash`*:: + -- Unique user hash to correlate information for a user in anonymized form. @@ -7791,16 +21053,16 @@ type: keyword -- -*`source.user.id`*:: +*`user.changes.id`*:: + -- -Unique identifiers of the user. +Unique identifier of the user. type: keyword -- -*`source.user.name`*:: +*`user.changes.name`*:: + -- Short name or login of the user. @@ -7811,1055 +21073,1039 @@ example: albert -- -*`source.user.name.text`*:: +*`user.changes.name.text`*:: + -- type: text -- -[float] -=== threat - -Fields to classify events and alerts according to a threat taxonomy such as the Mitre ATT&CK framework. -These fields are for users to classify alerts from all of their sources (e.g. IDS, NGFW, etc.) within a common taxonomy. The threat.tactic.* are meant to capture the high level category of the threat (e.g. "impact"). The threat.technique.* fields are meant to capture which kind of approach is used by this detected threat, to accomplish the goal (e.g. "endpoint denial of service"). - - -*`threat.framework`*:: -+ --- -Name of the threat framework used to further categorize and classify the tactic and technique of the reported threat. Framework classification can be provided by detecting systems, evaluated at ingest time, or retrospectively tagged to events. - -type: keyword - -example: MITRE ATT&CK - --- - -*`threat.tactic.id`*:: +*`user.changes.roles`*:: + -- -The id of tactic used by this threat. You can use the Mitre ATT&CK Matrix Tactic categorization, for example. (ex. https://attack.mitre.org/tactics/TA0040/ ) +Array of user roles at the time of the event. type: keyword -example: TA0040 +example: ["kibana_admin", "reporting_user"] -- -*`threat.tactic.name`*:: +*`user.domain`*:: + -- -Name of the type of tactic used by this threat. You can use the Mitre ATT&CK Matrix Tactic categorization, for example. (ex. https://attack.mitre.org/tactics/TA0040/ ) +Name of the directory the user is a member of. +For example, an LDAP or Active Directory domain name. type: keyword -example: impact +{yes-icon} {ecs-ref}[ECS] field. -- -*`threat.tactic.reference`*:: +*`user.effective.domain`*:: + -- -The reference url of tactic used by this threat. You can use the Mitre ATT&CK Matrix Tactic categorization, for example. (ex. https://attack.mitre.org/tactics/TA0040/ ) +Name of the directory the user is a member of. +For example, an LDAP or Active Directory domain name. type: keyword -example: https://attack.mitre.org/tactics/TA0040/ - -- -*`threat.technique.id`*:: +*`user.effective.email`*:: + -- -The id of technique used by this tactic. You can use the Mitre ATT&CK Matrix Tactic categorization, for example. (ex. https://attack.mitre.org/techniques/T1499/ ) +User email address. type: keyword -example: T1499 - -- -*`threat.technique.name`*:: +*`user.effective.full_name`*:: + -- -The name of technique used by this tactic. You can use the Mitre ATT&CK Matrix Tactic categorization, for example. (ex. https://attack.mitre.org/techniques/T1499/ ) +User's full name, if available. type: keyword -example: endpoint denial of service +example: Albert Einstein -- -*`threat.technique.name.text`*:: +*`user.effective.full_name.text`*:: + -- type: text -- -*`threat.technique.reference`*:: +*`user.effective.group.domain`*:: + -- -The reference url of technique used by this tactic. You can use the Mitre ATT&CK Matrix Tactic categorization, for example. (ex. https://attack.mitre.org/techniques/T1499/ ) +Name of the directory the group is a member of. +For example, an LDAP or Active Directory domain name. type: keyword -example: https://attack.mitre.org/techniques/T1499/ - -- -[float] -=== tls - -Fields related to a TLS connection. These fields focus on the TLS protocol itself and intentionally avoids in-depth analysis of the related x.509 certificate files. - - -*`tls.cipher`*:: +*`user.effective.group.id`*:: + -- -String indicating the cipher used during the current connection. +Unique identifier for the group on the system/platform. type: keyword -example: TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 - -- -*`tls.client.certificate`*:: +*`user.effective.group.name`*:: + -- -PEM-encoded stand-alone certificate offered by the client. This is usually mutually-exclusive of `client.certificate_chain` since this value also exists in that list. +Name of the group. type: keyword -example: MII... - -- -*`tls.client.certificate_chain`*:: +*`user.effective.hash`*:: + -- -Array of PEM-encoded certificates that make up the certificate chain offered by the client. This is usually mutually-exclusive of `client.certificate` since that value should be the first certificate in the chain. +Unique user hash to correlate information for a user in anonymized form. +Useful if `user.id` or `user.name` contain confidential information and cannot be used. type: keyword -example: ['MII...', 'MII...'] - -- -*`tls.client.hash.md5`*:: +*`user.effective.id`*:: + -- -Certificate fingerprint using the MD5 digest of DER-encoded version of certificate offered by the client. For consistency with other hash values, this value should be formatted as an uppercase hash. +Unique identifier of the user. type: keyword -example: 0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC - -- -*`tls.client.hash.sha1`*:: +*`user.effective.name`*:: + -- -Certificate fingerprint using the SHA1 digest of DER-encoded version of certificate offered by the client. For consistency with other hash values, this value should be formatted as an uppercase hash. +Short name or login of the user. type: keyword -example: 9E393D93138888D288266C2D915214D1D1CCEB2A +example: albert -- -*`tls.client.hash.sha256`*:: +*`user.effective.name.text`*:: + -- -Certificate fingerprint using the SHA256 digest of DER-encoded version of certificate offered by the client. For consistency with other hash values, this value should be formatted as an uppercase hash. - -type: keyword - -example: 0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0 +type: text -- -*`tls.client.issuer`*:: +*`user.effective.roles`*:: + -- -Distinguished name of subject of the issuer of the x.509 certificate presented by the client. +Array of user roles at the time of the event. type: keyword -example: CN=MyDomain Root CA, OU=Infrastructure Team, DC=mydomain, DC=com +example: ["kibana_admin", "reporting_user"] -- -*`tls.client.ja3`*:: +*`user.email`*:: + -- -A hash that identifies clients based on how they perform an SSL/TLS handshake. +User email address. type: keyword -example: d4e5b18d6b55c71272893221c96ba240 +{yes-icon} {ecs-ref}[ECS] field. -- -*`tls.client.not_after`*:: +*`user.full_name`*:: + -- -Date/Time indicating when client certificate is no longer considered valid. - -type: date - -example: 2021-01-01T00:00:00.000Z - --- +User's full name, if available. -*`tls.client.not_before`*:: -+ --- -Date/Time indicating when client certificate is first considered valid. +type: keyword -type: date +example: Albert Einstein -example: 1970-01-01T00:00:00.000Z +{yes-icon} {ecs-ref}[ECS] field. -- -*`tls.client.server_name`*:: +*`user.full_name.text`*:: + -- -Also called an SNI, this tells the server which hostname to which the client is attempting to connect. When this value is available, it should get copied to `destination.domain`. - -type: keyword - -example: www.elastic.co +type: text -- -*`tls.client.subject`*:: +*`user.group.domain`*:: + -- -Distinguished name of subject of the x.509 certificate presented by the client. +Name of the directory the group is a member of. +For example, an LDAP or Active Directory domain name. type: keyword -example: CN=myclient, OU=Documentation Team, DC=mydomain, DC=com +{yes-icon} {ecs-ref}[ECS] field. -- -*`tls.client.supported_ciphers`*:: +*`user.group.id`*:: + -- -Array of ciphers offered by the client during the client hello. +Unique identifier for the group on the system/platform. type: keyword -example: ['TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384', 'TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384', '...'] +{yes-icon} {ecs-ref}[ECS] field. -- -*`tls.curve`*:: +*`user.group.name`*:: + -- -String indicating the curve used for the given cipher, when applicable. +Name of the group. type: keyword -example: secp256r1 - --- - -*`tls.established`*:: -+ --- -Boolean flag indicating if the TLS negotiation was successful and transitioned to an encrypted tunnel. - -type: boolean +{yes-icon} {ecs-ref}[ECS] field. -- -*`tls.next_protocol`*:: +*`user.hash`*:: + -- -String indicating the protocol being tunneled. Per the values in the IANA registry (https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), this string should be lower case. +Unique user hash to correlate information for a user in anonymized form. +Useful if `user.id` or `user.name` contain confidential information and cannot be used. type: keyword -example: http/1.1 - --- - -*`tls.resumed`*:: -+ --- -Boolean flag indicating if this TLS connection was resumed from an existing TLS negotiation. - -type: boolean +{yes-icon} {ecs-ref}[ECS] field. -- -*`tls.server.certificate`*:: +*`user.id`*:: + -- -PEM-encoded stand-alone certificate offered by the server. This is usually mutually-exclusive of `server.certificate_chain` since this value also exists in that list. +Unique identifier of the user. type: keyword -example: MII... +{yes-icon} {ecs-ref}[ECS] field. -- -*`tls.server.certificate_chain`*:: +*`user.name`*:: + -- -Array of PEM-encoded certificates that make up the certificate chain offered by the server. This is usually mutually-exclusive of `server.certificate` since that value should be the first certificate in the chain. +Short name or login of the user. type: keyword -example: ['MII...', 'MII...'] +example: albert + +{yes-icon} {ecs-ref}[ECS] field. -- -*`tls.server.hash.md5`*:: +*`user.name.text`*:: + -- -Certificate fingerprint using the MD5 digest of DER-encoded version of certificate offered by the server. For consistency with other hash values, this value should be formatted as an uppercase hash. - -type: keyword - -example: 0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC +type: text -- -*`tls.server.hash.sha1`*:: +*`user.roles`*:: + -- -Certificate fingerprint using the SHA1 digest of DER-encoded version of certificate offered by the server. For consistency with other hash values, this value should be formatted as an uppercase hash. +Array of user roles at the time of the event. type: keyword -example: 9E393D93138888D288266C2D915214D1D1CCEB2A +example: ["kibana_admin", "reporting_user"] + +{yes-icon} {ecs-ref}[ECS] field. -- -*`tls.server.hash.sha256`*:: +*`user.target.domain`*:: + -- -Certificate fingerprint using the SHA256 digest of DER-encoded version of certificate offered by the server. For consistency with other hash values, this value should be formatted as an uppercase hash. +Name of the directory the user is a member of. +For example, an LDAP or Active Directory domain name. type: keyword -example: 0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0 - -- -*`tls.server.issuer`*:: +*`user.target.email`*:: + -- -Subject of the issuer of the x.509 certificate presented by the server. +User email address. type: keyword -example: CN=MyDomain Root CA, OU=Infrastructure Team, DC=mydomain, DC=com - -- -*`tls.server.ja3s`*:: +*`user.target.full_name`*:: + -- -A hash that identifies servers based on how they perform an SSL/TLS handshake. +User's full name, if available. type: keyword -example: 394441ab65754e2207b1e1b457b3641d +example: Albert Einstein -- -*`tls.server.not_after`*:: +*`user.target.full_name.text`*:: + -- -Timestamp indicating when server certificate is no longer considered valid. - -type: date - -example: 2021-01-01T00:00:00.000Z +type: text -- -*`tls.server.not_before`*:: +*`user.target.group.domain`*:: + -- -Timestamp indicating when server certificate is first considered valid. - -type: date +Name of the directory the group is a member of. +For example, an LDAP or Active Directory domain name. -example: 1970-01-01T00:00:00.000Z +type: keyword -- -*`tls.server.subject`*:: +*`user.target.group.id`*:: + -- -Subject of the x.509 certificate presented by the server. +Unique identifier for the group on the system/platform. type: keyword -example: CN=www.mydomain.com, OU=Infrastructure Team, DC=mydomain, DC=com - -- -*`tls.version`*:: +*`user.target.group.name`*:: + -- -Numeric part of the version parsed from the original string. +Name of the group. type: keyword -example: 1.2 - -- -*`tls.version_protocol`*:: +*`user.target.hash`*:: + -- -Normalized lowercase protocol name parsed from original string. - -type: keyword - -example: tls - --- - -[float] -=== tracing +Unique user hash to correlate information for a user in anonymized form. +Useful if `user.id` or `user.name` contain confidential information and cannot be used. -Distributed tracing makes it possible to analyze performance throughout a microservice architecture all in one view. This is accomplished by tracing all of the requests - from the initial web request in the front-end service - to queries made through multiple back-end services. +type: keyword +-- -*`tracing.trace.id`*:: +*`user.target.id`*:: + -- -Unique identifier of the trace. -A trace groups multiple events like transactions that belong together. For example, a user request handled by multiple inter-connected services. +Unique identifier of the user. type: keyword -example: 4bf92f3577b34da6a3ce929d0e0e4736 - -- -*`tracing.transaction.id`*:: +*`user.target.name`*:: + -- -Unique identifier of the transaction. -A transaction is the highest level of work measured within a service, such as a request to a server. +Short name or login of the user. type: keyword -example: 00f067aa0ba902b7 +example: albert -- -[float] -=== url - -URL fields provide support for complete or partial URLs, and supports the breaking down into scheme, domain, path, and so on. +*`user.target.name.text`*:: ++ +-- +type: text +-- -*`url.domain`*:: +*`user.target.roles`*:: + -- -Domain of the url, such as "www.elastic.co". -In some cases a URL may refer to an IP and/or port directly, without a domain name. In this case, the IP address would go to the `domain` field. +Array of user roles at the time of the event. type: keyword -example: www.elastic.co +example: ["kibana_admin", "reporting_user"] -- -*`url.extension`*:: +[float] +=== user_agent + +The user_agent fields normally come from a browser request. +They often show up in web service logs coming from the parsed user agent string. + + +*`user_agent.device.name`*:: + -- -The field contains the file extension from the original request url. -The file extension is only set if it exists, as not every url has a file extension. -The leading period must not be included. For example, the value must be "png", not ".png". +Name of the device. type: keyword -example: png +example: iPhone + +{yes-icon} {ecs-ref}[ECS] field. -- -*`url.fragment`*:: +*`user_agent.name`*:: + -- -Portion of the url after the `#`, such as "top". -The `#` is not part of the fragment. +Name of the user agent. type: keyword +example: Safari + +{yes-icon} {ecs-ref}[ECS] field. + -- -*`url.full`*:: +*`user_agent.original`*:: + -- -If full URLs are important to your use case, they should be stored in `url.full`, whether this field is reconstructed or present in the event source. +Unparsed user_agent string. type: keyword -example: https://www.elastic.co:443/search?q=elasticsearch#top +example: Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1 + +{yes-icon} {ecs-ref}[ECS] field. -- -*`url.full.text`*:: +*`user_agent.original.text`*:: + -- type: text -- -*`url.original`*:: +*`user_agent.os.family`*:: + -- -Unmodified original url as seen in the event source. -Note that in network monitoring, the observed URL may be a full URL, whereas in access logs, the URL is often just represented as a path. -This field is meant to represent the URL as it was observed, complete or not. +OS family (such as redhat, debian, freebsd, windows). type: keyword -example: https://www.elastic.co:443/search?q=elasticsearch#top or /search?q=elasticsearch +example: debian + +{yes-icon} {ecs-ref}[ECS] field. -- -*`url.original.text`*:: +*`user_agent.os.full`*:: + -- -type: text +Operating system name, including the version or code name. + +type: keyword + +example: Mac OS Mojave + +{yes-icon} {ecs-ref}[ECS] field. -- -*`url.password`*:: +*`user_agent.os.full.text`*:: + -- -Password of the request. - -type: keyword +type: text -- -*`url.path`*:: +*`user_agent.os.kernel`*:: + -- -Path of the request, such as "/search". +Operating system kernel version as a raw string. type: keyword +example: 4.4.0-112-generic + +{yes-icon} {ecs-ref}[ECS] field. + -- -*`url.port`*:: +*`user_agent.os.name`*:: + -- -Port of the request, such as 443. +Operating system name, without the version. -type: long +type: keyword -example: 443 +example: Mac OS X -format: string +{yes-icon} {ecs-ref}[ECS] field. -- -*`url.query`*:: +*`user_agent.os.name.text`*:: + -- -The query field describes the query string of the request, such as "q=elasticsearch". -The `?` is excluded from the query string. If a URL contains no `?`, there is no query field. If there is a `?` but no query, the query field exists with an empty string. The `exists` query can be used to differentiate between the two cases. - -type: keyword +type: text -- -*`url.registered_domain`*:: +*`user_agent.os.platform`*:: + -- -The highest registered url domain, stripped of the subdomain. -For example, the registered domain for "foo.google.com" is "google.com". -This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". +Operating system platform (such centos, ubuntu, windows). type: keyword -example: google.com +example: darwin + +{yes-icon} {ecs-ref}[ECS] field. -- -*`url.scheme`*:: +*`user_agent.os.type`*:: + -- -Scheme of the request, such as "https". -Note: The `:` is not part of the scheme. +Use the `os.type` field to categorize the operating system into one of the broad commercial families. +One of these following values should be used (lowercase): linux, macos, unix, windows. +If the OS you're dealing with is not in the list, the field should not be populated. Please let us know by opening an issue with ECS, to propose its addition. type: keyword -example: https +example: macos -- -*`url.top_level_domain`*:: +*`user_agent.os.version`*:: + -- -The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for google.com is "com". -This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last label will not work well for effective TLDs such as "co.uk". +Operating system version as a raw string. type: keyword -example: co.uk +example: 10.14.1 + +{yes-icon} {ecs-ref}[ECS] field. -- -*`url.username`*:: +*`user_agent.version`*:: + -- -Username of the request. +Version of the user agent. type: keyword +example: 12.0 + +{yes-icon} {ecs-ref}[ECS] field. + -- [float] -=== user +=== vlan -The user fields describe information about the user that is relevant to the event. -Fields can have one entry or multiple entries. If a user has more than one id, provide an array that includes all of them. +The VLAN fields are used to identify 802.1q tag(s) of a packet, as well as ingress and egress VLAN associations of an observer in relation to a specific packet or connection. +Network.vlan fields are used to record a single VLAN tag, or the outer tag in the case of q-in-q encapsulations, for a packet or connection as observed, typically provided by a network sensor (e.g. Zeek, Wireshark) passively reporting on traffic. +Network.inner VLAN fields are used to report inner q-in-q 802.1q tags (multiple 802.1q encapsulations) as observed, typically provided by a network sensor (e.g. Zeek, Wireshark) passively reporting on traffic. Network.inner VLAN fields should only be used in addition to network.vlan fields to indicate q-in-q tagging. +Observer.ingress and observer.egress VLAN values are used to record observer specific information when observer events contain discrete ingress and egress VLAN information, typically provided by firewalls, routers, or load balancers. -*`user.domain`*:: +*`vlan.id`*:: + -- -Name of the directory the user is a member of. -For example, an LDAP or Active Directory domain name. +VLAN ID as reported by the observer. type: keyword +example: 10 + -- -*`user.email`*:: +*`vlan.name`*:: + -- -User email address. +Optional VLAN name as reported by the observer. type: keyword --- +example: outside -*`user.full_name`*:: -+ -- -User's full name, if available. -type: keyword +[float] +=== vulnerability -example: Albert Einstein +The vulnerability fields describe information about a vulnerability that is relevant to an event. --- -*`user.full_name.text`*:: +*`vulnerability.category`*:: + -- -type: text +The type of system or architecture that the vulnerability affects. These may be platform-specific (for example, Debian or SUSE) or general (for example, Database or Firewall). For example (https://qualysguard.qualys.com/qwebhelp/fo_portal/knowledgebase/vulnerability_categories.htm[Qualys vulnerability categories]) +This field must be an array. + +type: keyword + +example: ["Firewall"] + +{yes-icon} {ecs-ref}[ECS] field. -- -*`user.group.domain`*:: +*`vulnerability.classification`*:: + -- -Name of the directory the group is a member of. -For example, an LDAP or Active Directory domain name. +The classification of the vulnerability scoring system. For example (https://www.first.org/cvss/) type: keyword +example: CVSS + +{yes-icon} {ecs-ref}[ECS] field. + -- -*`user.group.id`*:: +*`vulnerability.description`*:: + -- -Unique identifier for the group on the system/platform. +The description of the vulnerability that provides additional context of the vulnerability. For example (https://cve.mitre.org/about/faqs.html#cve_entry_descriptions_created[Common Vulnerabilities and Exposure CVE description]) type: keyword +example: In macOS before 2.12.6, there is a vulnerability in the RPC... + +{yes-icon} {ecs-ref}[ECS] field. + -- -*`user.group.name`*:: +*`vulnerability.description.text`*:: + -- -Name of the group. - -type: keyword +type: text -- -*`user.hash`*:: +*`vulnerability.enumeration`*:: + -- -Unique user hash to correlate information for a user in anonymized form. -Useful if `user.id` or `user.name` contain confidential information and cannot be used. +The type of identifier used for this vulnerability. For example (https://cve.mitre.org/about/) type: keyword +example: CVE + +{yes-icon} {ecs-ref}[ECS] field. + -- -*`user.id`*:: +*`vulnerability.id`*:: + -- -Unique identifiers of the user. +The identification (ID) is the number portion of a vulnerability entry. It includes a unique identification number for the vulnerability. For example (https://cve.mitre.org/about/faqs.html#what_is_cve_id)[Common Vulnerabilities and Exposure CVE ID] type: keyword +example: CVE-2019-00001 + +{yes-icon} {ecs-ref}[ECS] field. + -- -*`user.name`*:: +*`vulnerability.reference`*:: + -- -Short name or login of the user. +A resource that provides additional information, context, and mitigations for the identified vulnerability. type: keyword -example: albert +example: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-6111 + +{yes-icon} {ecs-ref}[ECS] field. -- -*`user.name.text`*:: +*`vulnerability.report_id`*:: + -- -type: text +The report or scan identification number. --- +type: keyword -[float] -=== user_agent +example: 20191018.0001 -The user_agent fields normally come from a browser request. -They often show up in web service logs coming from the parsed user agent string. +{yes-icon} {ecs-ref}[ECS] field. +-- -*`user_agent.device.name`*:: +*`vulnerability.scanner.vendor`*:: + -- -Name of the device. +The name of the vulnerability scanner vendor. type: keyword -example: iPhone +example: Tenable + +{yes-icon} {ecs-ref}[ECS] field. -- -*`user_agent.name`*:: +*`vulnerability.score.base`*:: + -- -Name of the user agent. +Scores can range from 0.0 to 10.0, with 10.0 being the most severe. +Base scores cover an assessment for exploitability metrics (attack vector, complexity, privileges, and user interaction), impact metrics (confidentiality, integrity, and availability), and scope. For example (https://www.first.org/cvss/specification-document) -type: keyword +type: float -example: Safari +example: 5.5 + +{yes-icon} {ecs-ref}[ECS] field. -- -*`user_agent.original`*:: +*`vulnerability.score.environmental`*:: + -- -Unparsed user_agent string. +Scores can range from 0.0 to 10.0, with 10.0 being the most severe. +Environmental scores cover an assessment for any modified Base metrics, confidentiality, integrity, and availability requirements. For example (https://www.first.org/cvss/specification-document) -type: keyword +type: float -example: Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1 +example: 5.5 + +{yes-icon} {ecs-ref}[ECS] field. -- -*`user_agent.original.text`*:: +*`vulnerability.score.temporal`*:: + -- -type: text +Scores can range from 0.0 to 10.0, with 10.0 being the most severe. +Temporal scores cover an assessment for code maturity, remediation level, and confidence. For example (https://www.first.org/cvss/specification-document) + +type: float + +{yes-icon} {ecs-ref}[ECS] field. -- -*`user_agent.os.family`*:: +*`vulnerability.score.version`*:: + -- -OS family (such as redhat, debian, freebsd, windows). +The National Vulnerability Database (NVD) provides qualitative severity rankings of "Low", "Medium", and "High" for CVSS v2.0 base score ranges in addition to the severity ratings for CVSS v3.0 as they are defined in the CVSS v3.0 specification. +CVSS is owned and managed by FIRST.Org, Inc. (FIRST), a US-based non-profit organization, whose mission is to help computer security incident response teams across the world. For example (https://nvd.nist.gov/vuln-metrics/cvss) type: keyword -example: debian +example: 2.0 + +{yes-icon} {ecs-ref}[ECS] field. -- -*`user_agent.os.full`*:: +*`vulnerability.severity`*:: + -- -Operating system name, including the version or code name. +The severity of the vulnerability can help with metrics and internal prioritization regarding remediation. For example (https://nvd.nist.gov/vuln-metrics/cvss) type: keyword -example: Mac OS Mojave +example: Critical --- +{yes-icon} {ecs-ref}[ECS] field. -*`user_agent.os.full.text`*:: -+ -- -type: text --- +[float] +=== x509 -*`user_agent.os.kernel`*:: +This implements the common core fields for x509 certificates. This information is likely logged with TLS sessions, digital signatures found in executable binaries, S/MIME information in email bodies, or analysis of files on disk. +When the certificate relates to a file, use the fields at `file.x509`. When hashes of the DER-encoded certificate are available, the `hash` data set should be populated as well (e.g. `file.hash.sha256`). +Events that contain certificate information about network connections, should use the x509 fields under the relevant TLS fields: `tls.server.x509` and/or `tls.client.x509`. + + +*`x509.alternative_names`*:: + -- -Operating system kernel version as a raw string. +List of subject alternative names (SAN). Name types vary by certificate authority and certificate type but commonly contain IP addresses, DNS names (and wildcards), and email addresses. type: keyword -example: 4.4.0-112-generic +example: *.elastic.co -- -*`user_agent.os.name`*:: +*`x509.issuer.common_name`*:: + -- -Operating system name, without the version. +List of common name (CN) of issuing certificate authority. type: keyword -example: Mac OS X +example: Example SHA2 High Assurance Server CA -- -*`user_agent.os.name.text`*:: +*`x509.issuer.country`*:: + -- -type: text +List of country (C) codes + +type: keyword + +example: US -- -*`user_agent.os.platform`*:: +*`x509.issuer.distinguished_name`*:: + -- -Operating system platform (such centos, ubuntu, windows). +Distinguished name (DN) of issuing certificate authority. type: keyword -example: darwin +example: C=US, O=Example Inc, OU=www.example.com, CN=Example SHA2 High Assurance Server CA -- -*`user_agent.os.version`*:: +*`x509.issuer.locality`*:: + -- -Operating system version as a raw string. +List of locality names (L) type: keyword -example: 10.14.1 +example: Mountain View -- -*`user_agent.version`*:: +*`x509.issuer.organization`*:: + -- -Version of the user agent. +List of organizations (O) of issuing certificate authority. type: keyword -example: 12.0 +example: Example Inc -- -[float] -=== vlan +*`x509.issuer.organizational_unit`*:: ++ +-- +List of organizational units (OU) of issuing certificate authority. -The VLAN fields are used to identify 802.1q tag(s) of a packet, as well as ingress and egress VLAN associations of an observer in relation to a specific packet or connection. -Network.vlan fields are used to record a single VLAN tag, or the outer tag in the case of q-in-q encapsulations, for a packet or connection as observed, typically provided by a network sensor (e.g. Zeek, Wireshark) passively reporting on traffic. -Network.inner VLAN fields are used to report inner q-in-q 802.1q tags (multiple 802.1q encapsulations) as observed, typically provided by a network sensor (e.g. Zeek, Wireshark) passively reporting on traffic. Network.inner VLAN fields should only be used in addition to network.vlan fields to indicate q-in-q tagging. -Observer.ingress and observer.egress VLAN values are used to record observer specific information when observer events contain discrete ingress and egress VLAN information, typically provided by firewalls, routers, or load balancers. +type: keyword +example: www.example.com -*`vlan.id`*:: +-- + +*`x509.issuer.state_or_province`*:: + -- -VLAN ID as reported by the observer. +List of state or province names (ST, S, or P) type: keyword -example: 10 +example: California -- -*`vlan.name`*:: +*`x509.not_after`*:: + -- -Optional VLAN name as reported by the observer. +Time at which the certificate is no longer considered valid. -type: keyword +type: date -example: outside +example: 2020-07-16 03:15:39+00:00 -- -[float] -=== vulnerability +*`x509.not_before`*:: ++ +-- +Time at which the certificate is first considered valid. -The vulnerability fields describe information about a vulnerability that is relevant to an event. +type: date +example: 2019-08-16 01:40:25+00:00 -*`vulnerability.category`*:: +-- + +*`x509.public_key_algorithm`*:: + -- -The type of system or architecture that the vulnerability affects. These may be platform-specific (for example, Debian or SUSE) or general (for example, Database or Firewall). For example (https://qualysguard.qualys.com/qwebhelp/fo_portal/knowledgebase/vulnerability_categories.htm[Qualys vulnerability categories]) -This field must be an array. +Algorithm used to generate the public key. type: keyword -example: ["Firewall"] +example: RSA -- -*`vulnerability.classification`*:: +*`x509.public_key_curve`*:: + -- -The classification of the vulnerability scoring system. For example (https://www.first.org/cvss/) +The curve used by the elliptic curve public key algorithm. This is algorithm specific. type: keyword -example: CVSS +example: nistp521 -- -*`vulnerability.description`*:: +*`x509.public_key_exponent`*:: + -- -The description of the vulnerability that provides additional context of the vulnerability. For example (https://cve.mitre.org/about/faqs.html#cve_entry_descriptions_created[Common Vulnerabilities and Exposure CVE description]) +Exponent used to derive the public key. This is algorithm specific. -type: keyword +type: long -example: In macOS before 2.12.6, there is a vulnerability in the RPC... +example: 65537 + +Field is not indexed. -- -*`vulnerability.description.text`*:: +*`x509.public_key_size`*:: + -- -type: text +The size of the public key space in bits. + +type: long + +example: 2048 -- -*`vulnerability.enumeration`*:: +*`x509.serial_number`*:: + -- -The type of identifier used for this vulnerability. For example (https://cve.mitre.org/about/) +Unique serial number issued by the certificate authority. For consistency, if this value is alphanumeric, it should be formatted without colons and uppercase characters. type: keyword -example: CVE +example: 55FBB9C7DEBF09809D12CCAA -- -*`vulnerability.id`*:: +*`x509.signature_algorithm`*:: + -- -The identification (ID) is the number portion of a vulnerability entry. It includes a unique identification number for the vulnerability. For example (https://cve.mitre.org/about/faqs.html#what_is_cve_id)[Common Vulnerabilities and Exposure CVE ID] +Identifier for certificate signature algorithm. We recommend using names found in Go Lang Crypto library. See https://github.com/golang/go/blob/go1.14/src/crypto/x509/x509.go#L337-L353. type: keyword -example: CVE-2019-00001 +example: SHA256-RSA -- -*`vulnerability.reference`*:: +*`x509.subject.common_name`*:: + -- -A resource that provides additional information, context, and mitigations for the identified vulnerability. +List of common names (CN) of subject. type: keyword -example: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-6111 +example: shared.global.example.net -- -*`vulnerability.report_id`*:: +*`x509.subject.country`*:: + -- -The report or scan identification number. +List of country (C) code type: keyword -example: 20191018.0001 +example: US -- -*`vulnerability.scanner.vendor`*:: +*`x509.subject.distinguished_name`*:: + -- -The name of the vulnerability scanner vendor. +Distinguished name (DN) of the certificate subject entity. type: keyword -example: Tenable +example: C=US, ST=California, L=San Francisco, O=Example, Inc., CN=shared.global.example.net -- -*`vulnerability.score.base`*:: +*`x509.subject.locality`*:: + -- -Scores can range from 0.0 to 10.0, with 10.0 being the most severe. -Base scores cover an assessment for exploitability metrics (attack vector, complexity, privileges, and user interaction), impact metrics (confidentiality, integrity, and availability), and scope. For example (https://www.first.org/cvss/specification-document) +List of locality names (L) -type: float +type: keyword -example: 5.5 +example: San Francisco -- -*`vulnerability.score.environmental`*:: +*`x509.subject.organization`*:: + -- -Scores can range from 0.0 to 10.0, with 10.0 being the most severe. -Environmental scores cover an assessment for any modified Base metrics, confidentiality, integrity, and availability requirements. For example (https://www.first.org/cvss/specification-document) +List of organizations (O) of subject. -type: float +type: keyword -example: 5.5 +example: Example, Inc. -- -*`vulnerability.score.temporal`*:: +*`x509.subject.organizational_unit`*:: + -- -Scores can range from 0.0 to 10.0, with 10.0 being the most severe. -Temporal scores cover an assessment for code maturity, remediation level, and confidence. For example (https://www.first.org/cvss/specification-document) +List of organizational units (OU) of subject. -type: float +type: keyword -- -*`vulnerability.score.version`*:: +*`x509.subject.state_or_province`*:: + -- -The National Vulnerability Database (NVD) provides qualitative severity rankings of "Low", "Medium", and "High" for CVSS v2.0 base score ranges in addition to the severity ratings for CVSS v3.0 as they are defined in the CVSS v3.0 specification. -CVSS is owned and managed by FIRST.Org, Inc. (FIRST), a US-based non-profit organization, whose mission is to help computer security incident response teams across the world. For example (https://nvd.nist.gov/vuln-metrics/cvss) +List of state or province names (ST, S, or P) type: keyword -example: 2.0 +example: California -- -*`vulnerability.severity`*:: +*`x509.version_number`*:: + -- -The severity of the vulnerability can help with metrics and internal prioritization regarding remediation. For example (https://nvd.nist.gov/vuln-metrics/cvss) +Version of x509 format. type: keyword -example: Critical +example: 3 -- @@ -8933,6 +22179,16 @@ type: keyword -- +*`kubernetes.pod.ip`*:: ++ +-- +Kubernetes Pod IP + + +type: ip + +-- + *`kubernetes.namespace`*:: + -- @@ -8949,6 +22205,16 @@ type: keyword Kubernetes node name +type: keyword + +-- + +*`kubernetes.node.hostname`*:: ++ +-- +Kubernetes hostname as reported by the node’s kernel + + type: keyword -- @@ -8969,6 +22235,16 @@ type: object Kubernetes annotations map +type: object + +-- + +*`kubernetes.selectors.*`*:: ++ +-- +Kubernetes selectors map + + type: object -- @@ -9006,7 +22282,7 @@ type: keyword *`kubernetes.container.name`*:: + -- -Kubernetes container name +Kubernetes container name (different than the name from the runtime) type: keyword @@ -9019,7 +22295,9 @@ type: keyword Kubernetes container image -type: keyword +type: alias + +alias to: container.image.name -- @@ -9040,8 +22318,41 @@ alias to: process.executable -- +[float] +=== owner + +Process owner information. + + +*`process.owner.id`*:: ++ +-- +Unique identifier of the user. + +type: keyword + +-- + +*`process.owner.name`*:: ++ +-- +Short name or login of the user. + +type: keyword + +example: albert + +-- + +*`process.owner.name.text`*:: ++ +-- +type: text + +-- + [[exported-fields-system]] -== APM System Metrics fields +== System Metrics fields System status metrics, like CPU and memory usage, that are collected from the operating system. @@ -9174,26 +22485,105 @@ Metrics and limits for the cgroup, collected by APM agents on Linux. [float] -=== memory +=== cpu -Memory-specific cgroup metrics and limits. +CPU-specific cgroup metrics and limits. -*`system.process.cgroup.memory.mem.limit.bytes`*:: +*`system.process.cgroup.cpu.id`*:: + -- -Memory limit for the current cgroup slice. +ID for the current cgroup CPU. + +type: keyword + +-- + +[float] +=== cfs + +Completely Fair Scheduler (CFS) cgroup metrics. + + +*`system.process.cgroup.cpu.cfs.period.us`*:: ++ +-- +CFS period in microseconds. type: long -format: bytes +-- +*`system.process.cgroup.cpu.cfs.quota.us`*:: ++ -- +CFS quota in microseconds. -*`system.process.cgroup.memory.mem.usage.bytes`*:: +type: long + +-- + +*`system.process.cgroup.cpu.stats.periods`*:: + -- -Memory usage by the current cgroup slice. +Number of periods seen by the CPU. + +type: long + +-- + +*`system.process.cgroup.cpu.stats.throttled.periods`*:: ++ +-- +Number of throttled periods seen by the CPU. + +type: long + +-- + +*`system.process.cgroup.cpu.stats.throttled.ns`*:: ++ +-- +Nanoseconds spent throttled seen by the CPU. + +type: long + +-- + +[float] +=== cpuacct + +CPU Accounting-specific cgroup metrics and limits. + + +*`system.process.cgroup.cpuacct.id`*:: ++ +-- +ID for the current cgroup CPU. + +type: keyword + +-- + +*`system.process.cgroup.cpuacct.total.ns`*:: ++ +-- +Total CPU time for the current cgroup CPU in nanoseconds. + +type: long + +-- + +[float] +=== memory + +Memory-specific cgroup metrics and limits. + + +*`system.process.cgroup.memory.mem.limit.bytes`*:: ++ +-- +Memory limit for the current cgroup slice. type: long @@ -9201,10 +22591,10 @@ format: bytes -- -*`system.process.cgroup.memory.stats.inactive_file.bytes`*:: +*`system.process.cgroup.memory.mem.usage.bytes`*:: + -- -Inactive memory for the current cgroup slice. +Memory usage by the current cgroup slice. type: long diff --git a/docs/getting-started-apm-server.asciidoc b/docs/getting-started-apm-server.asciidoc index 83e47e45d2b..caf15be56c8 100644 --- a/docs/getting-started-apm-server.asciidoc +++ b/docs/getting-started-apm-server.asciidoc @@ -232,13 +232,39 @@ Any changes are automatically appended to the `apm-server.yml` configuration fil Full details are available in the {cloud}/ec-manage-apm-settings.html[APM user settings] documentation. [float] -[[index-template-config]] ==== Self installation -It is recommend that you run the `setup` command before starting {beatname_uc}. +[float] +===== APM integration for Elastic Agent + +If you're running the <>, you can configure settings via {fleet-guide}/agent-policy.html[Elastic Agent policies]. + +[float] +===== Standalone + +You can edit the `apm-server.yml` configuration file to customize it to your needs. +The location of this file varies by platform, but the <> will help you locate it. +All available configuration options are outlined in +{apm-server-ref-v}/configuring-howto-apm-server.html[configuring APM Server]. + +[float] +====== Standalone setup (data streams) + +If you're running APM Server as a standalone process, we recommend that you configure <> +to write events to data streams in the same way as the <>. You will need to install the APM +integration package to set up Elasticsearch before APM Server becomes operative. + +[float] +[[index-template-config]] +====== Standalone setup (classic indices) + +deprecated::[7.16.0,Users should now use the <> or configure <>.] + +When running APM Server standalone with classic indices, +we recommend that you run the `setup` command before starting {beatname_uc}. The `setup` command sets up the initial environment, including the Elasticsearch index template, -and ILM write alias. In Elasticsearch, {ref}/indices-templates.html[index templates] +and ILM write alias. In Elasticsearch, {ref}/index-templates.html[index templates] are used to define field settings and mappings. IMPORTANT: The {kibana-ref}/xpack-apm.html[Kibana APM UI] relies on specific mappings. @@ -349,14 +375,16 @@ brew services start elastic/tap/apm-server-full If you haven't already, you can now install APM Agents in your services! * {apm-go-ref-v}/introduction.html[Go agent] +* {apm-ios-ref-v}/intro.html[iOS agent] * {apm-java-ref-v}/intro.html[Java agent] * {apm-dotnet-ref-v}/intro.html[.NET agent] * {apm-node-ref-v}/intro.html[Node.js agent] +* {apm-php-ref-v}/intro.html[PHP agent] * {apm-py-ref-v}/getting-started.html[Python agent] * {apm-ruby-ref-v}/introduction.html[Ruby agent] * {apm-rum-ref-v}/intro.html[JavaScript Real User Monitoring (RUM) agent] -Once you have at least one Agent sending data to APM Server, +Once you have at least one APM agent sending data to APM Server, you can start visualizing your data in the {kibana-ref}/xpack-apm.html[APM app]. If you're migrating from Jaeger, see <>. diff --git a/docs/guide/agent-server-compatibility.asciidoc b/docs/guide/agent-server-compatibility.asciidoc index 5c73fe14cd2..f8c94a425c3 100644 --- a/docs/guide/agent-server-compatibility.asciidoc +++ b/docs/guide/agent-server-compatibility.asciidoc @@ -5,41 +5,42 @@ The chart below outlines the compatibility between different versions of the APM [options="header"] |==== -|Agent |Agent Version |APM Server Version +|Agent |Agent version |APM Server version // Go -.1+|**Go Agent** +.1+|**Go agent** |`1.x` |≥ `6.5` +// iOS +.1+|**iOS agent** +|`0.x` |≥ `7.14` + // Java -.1+|**Java Agent** +.1+|**Java agent** |`1.x`|≥ `6.5` // .NET -.1+|**.NET Agent** +.1+|**.NET agent** |`1.x` |≥ `6.5` // Node -.3+|**Node.js Agent** -|`1.x` |`6.2`-`6.x` -|`2.x` |≥ `6.5` +.1+|**Node.js agent** |`3.x` |≥ `6.6` +// PHP +.1+|**PHP agent** +|`1.x` |≥ `7.0` + // Python -.3+|**Python Agent** -|`2.x`, `3.x` |`6.2`-`6.x` -|`4.x` |≥ `6.5` -|`5.x` |≥ `6.6` +.1+|**Python agent** +|`6.x` |≥ `6.6` // Ruby -.3+|**Ruby Agent** -|`1.x` |`6.4`-`6.x` -|`2.x` |≥ `6.5` +.2+|**Ruby agent** |`3.x` |≥ `6.5` +|`4.x` |≥ `6.5` // RUM -.4+|**JavaScript RUM Agent** -|`0.x` |`6.3`-`6.4` -|`1.x` |`6.4` -|`2.x`, `3.x`, `4.x` |≥ `6.5` +.2+|**JavaScript RUM agent** +|`4.x` |≥ `6.5` |`5.x` |≥ `7.0` |==== diff --git a/docs/guide/apm-breaking-changes.asciidoc b/docs/guide/apm-breaking-changes.asciidoc index baaa7a412dc..5be4fec3e0a 100644 --- a/docs/guide/apm-breaking-changes.asciidoc +++ b/docs/guide/apm-breaking-changes.asciidoc @@ -3,6 +3,11 @@ This section discusses the changes that you need to be aware of when migrating your application from one version of APM to another. +* <> +* <> +* <> +* <> +* <> * <> * <> * <> @@ -19,13 +24,77 @@ This section discusses the changes that you need to be aware of when migrating y * <> * <> -Also see <>. +Also see {observability-guide}/whats-new.html[What's new in Observability {minor-version}]. //NOTE: The notable-breaking-changes tagged regions are re-used in the //Installation and Upgrade Guide -// tag::notable-v8-breaking-changes[] +// tag::notable-v8-breaking-changes[] // end::notable-v8-breaking-changes[] +// tag::715-bc[] +// end::715-bc[] + +[[breaking-7.14.0]] +=== 7.14.0 APM Breaking changes + +// tag::714-bc[] +No breaking changes. +// end::714-bc[] + +[[breaking-7.13.0]] +=== 7.13.0 APM Breaking changes + +// tag::713-bc[] +No breaking changes. +// end::713-bc[] + +[[breaking-7.12.0]] +=== 7.12.0 APM Breaking changes + +// tag::712-bc[] +There are three breaking changes to be aware of; +these changes only impact users ingesting data with +{apm-server-ref-v}/jaeger.html[Jaeger clients]. + +* Leading 0s are no longer removed from Jaeger client trace/span ids. ++ +-- +This change ensures distributed tracing continues to work across platforms by creating +consistent, full trace/span IDs from Jaeger clients, Elastic APM agents, +and OpenTelemetry SDKs. +-- + +* Jaeger spans will now have a type of "app" where they previously were "custom". ++ +-- +If the Jaeger span type is not inferred, it will now be "app". +This aligns with the OpenTelemetry Collector exporter +and improves the functionality of the _time spent by span type_ charts in the APM app. +-- + +* Jaeger spans may now have a more accurate outcome of "unknown". ++ +-- +Previously, a "success" outcome was assumed when a span didn't fail. +The new default assigns "unknown", and only sets an outcome of "success" or "failure" when +the outcome is explicitly known. +This change aligns with Elastic APM agents and the OpenTelemetry Collector exporter. +-- +// end::712-bc[] + +[[breaking-7.11.0]] +=== 7.11.0 APM Breaking changes + +// tag::notable-breaking-changes[] +No breaking changes. +// end::notable-breaking-changes[] + +[[breaking-7.10.0]] +=== 7.10.0 APM Breaking changes + +// tag::notable-breaking-changes[] +No breaking changes. +// end::notable-breaking-changes[] [[breaking-7.9.0]] === 7.9.0 APM Breaking changes diff --git a/docs/guide/apm-data-model.asciidoc b/docs/guide/apm-data-model.asciidoc index 3145697f624..130cced9f5e 100644 --- a/docs/guide/apm-data-model.asciidoc +++ b/docs/guide/apm-data-model.asciidoc @@ -50,9 +50,11 @@ When this occurs, the APM app will display the number of spans dropped. To configure the number of spans recorded per transaction, see the relevant Agent documentation: * Go: {apm-go-ref-v}/configuration.html#config-transaction-max-spans[`ELASTIC_APM_TRANSACTION_MAX_SPANS`] +* iOS: _Not yet supported_ * Java: {apm-java-ref-v}/config-core.html#config-transaction-max-spans[`transaction_max_spans`] * .NET: {apm-dotnet-ref-v}/config-core.html#config-transaction-max-spans[`TransactionMaxSpans`] * Node.js: {apm-node-ref-v}/configuration.html#transaction-max-spans[`transactionMaxSpans`] +* PHP: {apm-php-ref-v}/configuration-reference.html#config-transaction-max-spans[`transaction_max_spans`] * Python: {apm-py-ref-v}/configuration.html#config-transaction-max-spans[`transaction_max_spans`] * Ruby: {apm-ruby-ref-v}/configuration.html#config-transaction-max-spans[`transaction_max_spans`] @@ -206,9 +208,10 @@ Defining too many unique fields in an index is a condition that can lead to a ===== Agent API reference * Go: {apm-go-ref-v}/api.html#context-set-label[`SetLabel`] -* Java: {apm-java-ref-v}/public-api.html#api-transaction-add-tag[`addLabel`] +* Java: {apm-java-ref-v}/public-api.html#api-transaction-add-tag[`setLabel`] * .NET: {apm-dotnet-ref-v}/public-api.html#api-transaction-tags[`Labels`] -* Node.js: {apm-node-ref-v}/agent-api.html#apm-set-label[`setLabel`] | {apm-node-ref-v}/agent-api.html#apm-add-labels[`addLabel`] +* Node.js: {apm-node-ref-v}/agent-api.html#apm-set-label[`setLabel`] | {apm-node-ref-v}/agent-api.html#apm-add-labels[`addLabels`] +* PHP: {apm-php-ref}/public-api.html#api-transaction-interface-set-label[`Transaction` `setLabel`] | {apm-php-ref}/public-api.html#api-span-interface-set-label[`Span` `setLabel`] * Python: {apm-py-ref-v}/api.html#api-label[`elasticapm.label()`] * Ruby: {apm-ruby-ref-v}/api.html#api-agent-set-label[`set_label`] * Rum: {apm-rum-ref-v}/agent-api.html#apm-add-labels[`addLabels`] @@ -238,9 +241,11 @@ IMPORTANT: Setting a circular object, a large object, or a non JSON serializable ===== Agent API reference * Go: {apm-go-ref-v}/api.html#context-set-custom[`SetCustom`] +* iOS: _coming soon_ * Java: {apm-java-ref-v}/public-api.html#api-transaction-add-custom-context[`addCustomContext`] * .NET: _coming soon_ * Node.js: {apm-node-ref-v}/agent-api.html#apm-set-custom-context[`setCustomContext`] +* PHP: _coming soon_ * Python: {apm-py-ref-v}/api.html#api-set-custom-context[`set_custom_context`] * Ruby: {apm-ruby-ref-v}/api.html#api-agent-set-custom-context[`set_custom_context`] * Rum: {apm-rum-ref-v}/agent-api.html#apm-set-custom-context[`setCustomContext`] @@ -262,9 +267,11 @@ Indexed means the data is searchable and aggregatable in Elasticsearch. * Go: {apm-go-ref-v}/api.html#context-set-username[`SetUsername`] | {apm-go-ref-v}/api.html#context-set-user-id[`SetUserID`] | {apm-go-ref-v}/api.html#context-set-user-email[`SetUserEmail`] +* iOS: _coming soon_ * Java: {apm-java-ref-v}/public-api.html#api-transaction-set-user[`setUser`] * .NET _coming soon_ * Node.js: {apm-node-ref-v}/agent-api.html#apm-set-user-context[`setUserContext`] +* PHP: _coming soon_ * Python: {apm-py-ref-v}/api.html#api-set-user-context[`set_user_context`] * Ruby: {apm-ruby-ref-v}/api.html#api-agent-set-user[`set_user`] * Rum: {apm-rum-ref-v}/agent-api.html#apm-set-user-context[`setUserContext`] diff --git a/docs/guide/apm-doc-directory.asciidoc b/docs/guide/apm-doc-directory.asciidoc index 4f2792be63f..cd368843bfc 100644 --- a/docs/guide/apm-doc-directory.asciidoc +++ b/docs/guide/apm-doc-directory.asciidoc @@ -1,12 +1,12 @@ [[components]] -== Components and documentation +=== Components and documentation -Elastic APM consists of four components: *APM Agents*, *APM Server*, *Elasticsearch*, and *Kibana*. +Elastic APM consists of four components: *APM agents*, *APM Server*, *Elasticsearch*, and *Kibana*. image::images/apm-architecture-cloud.png[Architecture of Elastic APM] [float] -=== APM Agents +==== APM Agents APM agents are open source libraries written in the same language as your service. You may only need one, or you might use all of them. @@ -17,24 +17,26 @@ This data is buffered for a short period and sent on to APM Server. Each agent has its own documentation: * {apm-go-ref-v}/introduction.html[Go agent] +* {apm-ios-ref-v}/intro.html[iOS agent] * {apm-java-ref-v}/intro.html[Java agent] * {apm-dotnet-ref-v}/intro.html[.NET agent] * {apm-node-ref-v}/intro.html[Node.js agent] +* {apm-php-ref-v}/intro.html[PHP agent] * {apm-py-ref-v}/getting-started.html[Python agent] * {apm-ruby-ref-v}/introduction.html[Ruby agent] * {apm-rum-ref-v}/intro.html[JavaScript Real User Monitoring (RUM) agent] [float] -=== APM Server +==== APM Server -APM Server is an open source application that receives performance data from your APM agents. +APM Server is a free and open application that receives performance data from your APM agents. It's a {apm-server-ref-v}/overview.html#why-separate-component[separate component by design], which helps keep the agents light, prevents certain security risks, and improves compatibility across the Elastic Stack. After the APM Server has validated and processed events from the APM agents, the server transforms the data into Elasticsearch documents and stores them in corresponding {apm-server-ref-v}/exploring-es-data.html[Elasticsearch indices]. -In a matter of seconds you can start viewing your application performance data in the Kibana APM app. +In a matter of seconds, you can start viewing your application performance data in the Kibana APM app. The {apm-server-ref-v}/index.html[APM Server reference] provides everything you need when it comes to working with the server. Here you can learn more about {apm-server-ref-v}/getting-started-apm-server.html[installation], @@ -43,16 +45,16 @@ Here you can learn more about {apm-server-ref-v}/getting-started-apm-server.html {apm-server-ref-v}/monitoring.html[monitoring], and more. [float] -=== Elasticsearch +==== Elasticsearch -{ref}/index.html[Elasticsearch] is a highly scalable open source full-text search and analytics engine. +{ref}/index.html[Elasticsearch] is a highly scalable free and open full-text search and analytics engine. It allows you to store, search, and analyze large volumes of data quickly and in near real time. Elasticsearch is used to store APM performance metrics and make use of its aggregations. [float] -=== Kibana APM app +==== Kibana APM app -{kibana-ref}/index.html[Kibana] is an open source analytics and visualization platform designed to work with Elasticsearch. +{kibana-ref}/index.html[Kibana] is a free and open analytics and visualization platform designed to work with Elasticsearch. You use Kibana to search, view, and interact with data stored in Elasticsearch. Since application performance monitoring is all about visualizing data and detecting bottlenecks, @@ -63,4 +65,6 @@ The following sections will help you get started: * {apm-app-ref}/apm-getting-started.html[Get started] * {apm-app-ref}/apm-how-to.html[How-to guides] -APM also has built-in integrations with Machine Learning. To learn more about this feature, refer to the Kibana documentation for {kibana-ref}/machine-learning-integration.html[Machine learning integration]. +APM also has built-in integrations with Machine learning. To learn more about this feature, +or the anomaly detection feature that's built on top of it, +refer to {kibana-ref}/machine-learning-integration.html[Machine learning integration]. diff --git a/docs/guide/apm-release-notes.asciidoc b/docs/guide/apm-release-notes.asciidoc deleted file mode 100644 index 384c04d4a01..00000000000 --- a/docs/guide/apm-release-notes.asciidoc +++ /dev/null @@ -1,547 +0,0 @@ -[[apm-release-notes]] -== Release highlights - -This section summarizes the most important APM changes in each release. - -For a full list of changes, see the -{apm-server-ref-v}/release-notes.html[APM Server Release Notes] or the -{kibana-ref}/release-notes.html[Kibana Release Notes]. - -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> - -//NOTE: The notable-highlights tagged regions are re-used in the -//Installation and Upgrade Guide -// tag::notable-v8-highlights[] - -// end::notable-v8-highlights[] - -[[release-highlights-7.9.0]] -=== APM version 7.9.0 - -// tag::notable-v79-highlights[] -OpenTelemetry integration:: - -Elastic’s OpenTelemetry integration allows you to reuse your existing OpenTelemetry instrumentation to quickly analyze distributed traces and metrics with the Elastic Stack. -+ -Elastic’s integration is designed to drop right into your current OpenTelemetry setup. We’ve done this by extending the "contrib" OpenTelemetry collector and adding an Elastic exporter. This exporter translates the OpenTelemetry trace data collected from your services to Elastic’s protocol, before sending the data to the Elastic Stack. By extending the OpenTelemetry collector, no changes are needed in your instrumented services in order to begin using the Elastic Stack. -+ -[role="screenshot"] -image::images/open-telemetry-elastic-arch.png[OpenTelemetry Elastic architecture diagram] -+ -See {apm-overview-ref-v}/open-telemetry-elastic.html[OpenTelemetry integration] -for more information. - -Machine learning integration improvements:: - -The Machine learning integration initiates a new job predefined to calculate anomaly scores on APM transaction durations. -With this integration, you can quickly pinpoint anomalous transactions and see the health of -any upstream and downstream services. -+ -In previous releases, machine learning jobs were initiated at the service level. -In 7.9, we've moved jobs to the environment level—this means any new services added to your architecture will automatically be able to take advantage of preexisting machine learning jobs without any additional configuration. -+ -[role="screenshot"] -image::images/7.8-service-map-anomaly.png[APM Service maps] -+ -See {kibana-ref}/machine-learning-integration.html[machine learning integration] for more information. - -// end::notable-v79-highlights[] - -[[release-highlights-7.8.0]] -=== APM version 7.8.0 - -// tag::notable-v78-highlights[] -Service maps:: - -Service maps now integrate with machine learning, offering real-time health indicators based -on anomaly detection scores, to help you quickly assess the status and health of your services. -+ -Specifically, machine learning jobs can be created to calculate anomaly scores on APM transaction -durations within the selected service. When these jobs are active, -service maps will display a color-coded anomaly indicator based on the detected anomaly score. -From these scores, you can jump straight to the anomaly detection metric viewier in the Machine learning app to learn more. -+ -[role="screenshot"] -image::images/7.8-service-map-anomaly.png[APM Service maps] -+ -See {kibana-ref}/service-maps.html[service maps] for more information. - -Deployment annotations:: - -For enhanced visibility into your deployments, we offer deployment annotations on all transaction charts. -Starting in 7.8, you can now explicitly create deployment annotations with our annotation API. -The API can integrate into your CI/CD pipeline, so that each time you deploy, a POST request is sent to the annotation API endpoint: -+ -[source, curl] ----- -curl -X POST \ - http://localhost:5601/api/apm/services/${SERVICE_NAME}/annotation \ --H 'Content-Type: application/json' \ --H 'kbn-xsrf: true' \ --H 'Authorization: Basic ${API_KEY}' \ --d '{ - "@timestamp": "${DEPLOY_TIME}", - "service": { - "version": "${SERVICE_VERSION}" - }, - "message": "${MESSAGE}" - }' ----- -+ -See {kibana-ref}/transactions-annotations.html[Track deployments with annotations] for more information. - -RUM + Elastic Maps:: - -There is now a dedicated “Observability” solution layer in the Maps app. -Import your data in just three clicks to begin exploring. -+ -Learn more about Elastic Maps in the {kibana-ref}/maps.html[documentation]. - -RUM event payload compression:: - -APM Server has a new event intake API for RUM events. -This intake API supports compressed event payloads from the JavaScript RUM Agent. -The difference is impressive--a reduction of around ~45% in the payload size for an average web page! -+ -Configure the {apm-rum-ref}/configuration.html#api-version[`api-version`] in your RUM Agent to begin using this feature. - -.NET Agent SqlClient support:: - -Auto instrumentation for `System.Data.SqlClient` is now available for both .NET Core and .NET Framework applications. -This means you can get out-of-the-box visibility, including service maps and distributed traces, for the SqlClient calls made from your .NET applications. -+ -Learn more in {apm-dotnet-ref}/setup.html#setup-sqlclient[set up SQLClient], -and upgrade to the latest version of the .NET agent to get started. - -// end::notable-v78-highlights[] - -[[release-highlights-7.7.0]] -=== APM version 7.7.0 - -// tag::notable-v77-highlights[] -Service maps:: - -{apm-app-ref}/service-maps.html[Service maps] are now available in the APM app. -A service map is a real-time visual representation of the instrumented services in your application’s architecture. -It shows you how these services are connected, -along with high-level metrics like average transaction duration, requests per minute, -and errors per minute, that allow you to quickly assess the status of your services. -+ -[role="screenshot"] -image::images/7.7-service-maps-java.png[APM Service maps] - -Alerting:: - -Want to receive an email when the duration of your transactions are slower than a set threshold? -How about a slack message when your application's error rate spikes? -Now you can! -The APM app integrates with Kibana’s {apm-app-ref}/apm-alerts.html[alerting and actions] feature. -It provides a set of built-in actions and APM specific threshold alerts for you to easily set up and use. -+ -[role="screenshot"] -image::images/7.7-apm-alert.png[APM app alerts] - -APM Agent central configuration:: - -{apm-app-ref}/agent-configuration.html[Central configuration] now supports even more configuration options, -so you can fine-tune your agents without needing to redeploy. -Open up the configuration page in the APM app for a list of all supported options. -+ -[role="screenshot"] -image::images/7.7-apm-agent-configuration.png[APM Service maps] - -Custom links:: - -Elastic’s custom links feature allows you to easily create up to 500 dynamic links based on your specific APM data. -Custom links can be filtered to only appear in the APM app for relevant services, environments, transaction types, or transaction names. -+ -Need ideas? -Recipes for setting up custom links for emails, GitHub issues, Jira tasks, and more are available in the {apm-app-ref}/custom-links.html[documentation]. - -Inferred spans with async profiler:: - -Identifying a problematic service is only half of the battle when diagnosing application slowdowns. -The Elastic APM Java Agent provides a new way to get method-level insights into your code: -https://www.elastic.co/guide/en/apm/agent/java/master/java-method-monitoring.html[inferred spans with async-profiler]. -This can help you diagnose slow requests due to heavy computations, inefficient algorithms, -or similar problems not related to interactions between services. -// end::notable-v77-highlights[] - -[[release-highlights-7.6.0]] -=== APM version 7.6.0 - -// tag::notable-v76-highlights[] -Experimental Jaeger integration:: - -Elastic APM now integrates with https://www.jaegertracing.io/[Jaeger], an open-source, distributed tracing system. -This integration allows users with an existing Jaeger setup to switch from the default Jaeger backend, -to ingesting data with Elastic's APM Server, storing data in {es}, and visualizing traces in the APM app. -Best of all, this can be done without any instrumentation changes in your application code. -+ -See {apm-server-ref-v}/jaeger.html[Jaeger integration] for more information. - -Experimental API Key authentication:: - -You can now secure the communication between APM Agents and APM Server with -{apm-server-ref-v}/api-key.html[API keys]. -API keys are easy to create and manage, and provide finer authorization rules than secret tokens. - -Elastic Security integration:: - -There are two, brand-new integrations with Elastic Security: -+ -HTTP data collected with Elastic APM is now available in the Security app, -enabling you to hunt for security threats with your APM data. -+ -The Detections feature automatically searches for threats and creates alerts when they are detected. -Elastic Security ships with four prebuilt rules, specifically for the APM use case: No User Agent, POST Request Declined, Unauthorized Method, and sqlmap User Agent. -+ -See the {security-guide}/es-ui-overview.html[Security hosts UI] and {security-guide}/prebuilt-rules.html[Security prebuilt rules] -for more information on using Elastic Security. -+ -[role="screenshot"] -image::images/siem-apm-integration.png[Chained exceptions] - -Chained exceptions:: - -Sometimes, an exception can cause or be caused by another exception. -APM Agents and the APM app now support chained exceptions, -and you can visualize when this happens in the APM app! -+ -[role="screenshot"] -image::images/chained-exceptions.png[Chained exceptions] - -Deployment annotations:: - -The APM app now displays {apm-app-ref}/transactions.html[deployment annotations] on all transaction charts. -This feature automatically tags new deployments, so you can easily see if your deploy has increased response times -for an end-user, or if the memory/CPU footprint of your application has increased. -Being able to quickly identify bad deployments enables you to rollback and fix issues without causing costly outages. -+ -Deployment annotations are automatically enabled, and appear when the `service.version` of your app changes. -+ -[role="screenshot"] -image::images/apm-transactions-overview.png[APM Transactions overview] - -Index configuration:: - -Index defaults can now be changed in Kibana. -If you use custom index names for your APM data, this feature makes it very easy to update the indices that the APM app queries. -Simply navigate to *APM > Settings > Indices*, and make your changes! -Index settings in the APM app take precedence over those set in kibana.yml. -+ -[role="screenshot"] -image::images/apm-settings-kib.png[APM Settings in Kibana] -+ -See {apm-app-ref}/apm-settings-in-kibana.html[APM Settings in Kibana] for more details. - -Java Agent support for Kafka and JMS:: - -The Java Agent now supports Kafka and JMS, -enabling you to visualize end to end traces that include the messaging service used to communicate between services. -In addition, you can now see the time your request spent waiting in the queue. -+ -[role="screenshot"] -image::images/java-kafka.png[Java Kafka trace sample] -+ -[role="screenshot"] -image::images/java-metadata.png[Java metadata] -+ -See the APM Java Agent's {apm-java-ref}/supported-technologies-details.html[Supported technologies] for more information. -// end::notable-v76-highlights[] - -[[release-highlights-7.5.0]] -=== APM version 7.5.0 - -// tag::notable-v75-highlights[] - -IMPORTANT: To make use of all the new features introduced in 7.5, -you must ensure you are using version 7.5+ of APM Server and version 7.5+ of Kibana. - -*APM app in Kibana* - -* {kibana-ref}/transactions.html[Aggregate Service Breakdown Charts] is GA. -Visualize where your applications and services are spending most of their -time, and find the root cause of a performance problem quickly. -Not yet available for the .NET Agent. - -[role="screenshot"] -image::images/breakdown-release-notes.png[Aggregate Service Breakdown Charts] - -* {kibana-ref}/agent-configuration.html[APM Agent remote configuration] is GA. -View and edit certain configurations directly in Kibana without having to restart your Agents. -In 7.5, we're introducing two additional configurations: -** `CAPTURE_BODY` - Optionally capture the request body of an HTTP transaction. -** `TRANSACTION_MAX_SPANS` - Limit the number of spans recorded per transaction. - -In addition, Agent remote configuration now supports all services and environments. -This means you can configure multiple services and environments in just one setting. - -[role="screenshot"] -image::images/remote-config-release-notes.png[APM Agent configuration] - -* {apm-java-ref}/metrics.html[JVM instance level visibility]: -It's easier than ever to troubleshoot your individual JVM instances. -Select a JVM to see individual CPU usage, memory usage, heap or non-heap memory, -thread count, garbage collection rate, and garbage collection time spent per minute. - -[role="screenshot"] -image::images/jvm-release-notes.png[JVM instance level visibility] - -// end::notable-v75-highlights[] - -[[release-highlights-7.4.0]] -=== APM version 7.4.0 - -// tag::notable-v74-highlights[] - -*APM app in Kibana* - -* {kibana-ref}/filters.html#contextual-filters[Contextual filters]: -Explore your APM data in new ways with our new local filters. -With just a click, you can filter your transactions by type, result, host name, and/or agent name. - -[role="screenshot"] -image::images/structured-filters.jpg[Structured filters in the APM UI] - -* {kibana-ref}/transactions.html#rum-transaction-overview[Geo-location performance visualization chart]: -Visualize performance information about your end users' -experience based on their geo-location. - -[role="screenshot"] -image::images/geo-location.jpg[Geo-location visualization] - -*APM Agents* - -* {apm-overview-ref-v}/observability-integrations.html[Log integration]: -Navigate from a distributed trace to any relevant logs -- without using trace context -- via the APM app. -* {apm-rum-ref}/angular-integration.html[RUM Angular instrumentation]: -Out of the box Angular instrumentation is here! -Instrument your single page applications written in Angular.js without the need to manually create or rename transactions. -* https://github.com/elastic/java-ecs-logging[JAVA ECS Logging library]: -Easily convert your logs to ECS-compatible JSON without creating an additional pipeline. -* {apm-dotnet-ref}/supported-technologies.html[.NET agent full framework support]: -Out of the box instrumentation for the .NET framework. -Say goodbye to APIs, your ASP.NET web applications are now plug and play ready with Elastic APM. - -// end::notable-v74-highlights[] - -[[release-highlights-7.3.0]] -=== APM version 7.3.0 - -// tag::notable-v73-highlights[] - -[discrete] -==== Elastic APM .NET Agent GA - -https://github.com/elastic/apm-agent-dotnet/[Elastic APM agent for .NET] is now -generally available! The .NET Agent adds automatic instrumentation for ASP.NET -Core 2.x+ and Entity Framework Core 2.x+, while also providing a -{apm-dotnet-ref}/public-api.html[Public API] for the .NET agent that will allow -you to instrument any .NET custom application code. - -[discrete] -==== Aggregate service breakdown charts - -beta[] In addition to the transaction duration and throughput charts, the 7.3 -release adds aggregated service breakdown charts for services. These charts help -you visualize where your application and services are spending most of their -time, allowing you to get to the root cause of a performance problem quickly. -These charts are available in Beta with support for certain APM agents: - -* Java added[1.8.0] -* Go added[1.5.0] -* Node.js added[2.13.0] -* Python added[5.0.0] - -[role="screenshot"] -image::images/apm-highlight-breakdown-charts.png[Aggregate service breakdown charts] - -[discrete] -==== Agent sample rate configuration from APM UI - -beta[] Configuring sampling rate for your services is a whole lot easier with this -release. The new settings page now lets you view and configure the sampling rate -for your services from within the APM UI without restarting them. To learn more -about this configuration, see -{kibana-ref}/agent-configuration.html[APM Agent configuration]. - -[role="screenshot"] -image::images/apm-highlight-sample-rate.png[APM sample rate configuration in Kibana] - -[discrete] -==== React support for Single Page Applications - -The 7.3 release also brings a lot of exciting changes to the Real User -Monitoring (RUM) agent. We have furthered our support of Single Page -Applications (SPA). You can now use the RUM agent to instrument your SPA written -in React.js without the need to manually create or rename transactions. For -more information, see {apm-rum-ref}/react-integration.html[React integration]. - -[discrete] -===== APM RUM integration with Elastic Maps - -This release also makes both the geo-ip and user-agent modules enabled by -default, which makes it easier for you to integrate with -https://www.elastic.co/products/maps[Maps] so you can better understand the -performance of your RUM applications. - -[role="screenshot"] -image::images/apm-highlight-rum-maps.png[APM sample rate configuration in Kibana] - -// end::notable-v73-highlights[] - -[[release-highlights-7.2.0]] -=== APM version 7.2.0 - -[float] -==== New features - -*APM Server* - -* Added support for {apm-server-ref-v}/ilm.html[index lifecycle management (ILM)]: -ILM enables you to automate how you want to manage your indices over time, -by automating rollovers to a new index when the existing index reaches a specified size or age. -* Added {ref}/geoip-processor.html[Geo-IP] processing to the default ingest pipeline: -Pipelines are still disabled by default, but activation now includes a new Geo-IP pipeline. -The Geo-IP pipeline takes an extracted IP address from RUM events and stores it in the `client.geo` field. -This makes it much easier to use location data in Kibana's Visualize maps and Maps app directly: - -[role="screenshot"] -image::images/kibana-geo-data.png[Kibana maps app] - -*APM UI* - -* APM + Uptime integration: APM transactions now include links to the Uptime UI when data is available. -* Added a global filter for {kibana-ref}/filters.html#environment-selector[service environments]: -You can now easily name and switch between environments in the APM UI. -* Added support for {kibana-ref}/metrics.html[agent specific metrics]: -Java is the first to get custom metrics in the APM UI, with more agents to follow. - -[[release-highlights-7.1.0]] -=== APM version 7.1.0 - -No new features. - - -[[release-highlights-7.0.0]] -=== APM version 7.0.0 - -[float] -==== Breaking Changes - -See <> - -[float] -==== New features - -*APM UI* - -* Added support for frozen indices. - -[[release-highlights-6.8.0]] -=== APM version 6.8.0 - -No new features. - -[[release-notes-6.7.0]] -=== APM version 6.7.0 - -No new features. - -[[release-notes-6.6.0]] -=== APM version 6.6.0 - -[float] -==== New features - -* Elastic APM agents now automatically record certain <>. -* Elastic APM agents support the W3C Trace Context. -All agents now have <>. -* <> is generally available. - -[[release-notes-6.5.0]] -=== APM version 6.5.0 - -[float] -==== New features - -Elastic APM now enables {apm-overview-ref-v}/distributed-tracing.html[distributed tracing]. - -*APM Server* - -* Intake protocol v2 with distributed tracing support -* Ingest node pipeline registration and use when ingesting documents -* apm-server monitoring - -*APM UI* - -* Distributed tracing UI -* Monitoring UI for apm-server - -*APM agents* - -* Intake protocol v2 with distributed tracing support in all Elastic agents -* Java is now GA -* Go is now GA -* Python switched to contextvars instead of thread locals for context tracking in Python 3.7 -* Node added support for Restify Framework, dropped support for Node.js 4 and 9 - -[[release-notes-6.4.1]] -=== APM version 6.4.1 - -[float] -==== Bug Fixes -Changes introduced in 6.4.0 potentially caused an empty APM Kibana UI. -This happened in case the APM Server was using an outdated configuration file, not configured to index events into separate indices. -To fix this, the APM Kibana UI now falls back to use `apm-*` as default indices to query. -Users can still leverage separate indices for queries by overriding the default values described in {kibana-ref}/apm-settings-kb.html[Kibana APM settings]. - - -[[release-notes-6.4.0]] -=== APM version 6.4.0 - -[float] -==== Breaking changes - -See <>. - -[float] -==== New features - -*APM Server* - -* Logstash output -* Kafka output - - -*APM UI* - -* Query bar -* Machine Learning integration: Anomaly detection on service response times -* Kibana objects (index pattern, dashboards, etc.) can now be imported via the Kibana setup instructions - - -*APM agents* - -* RUM is now GA -* Ruby is now GA -* Java is now Beta -* Go is now Beta -* Python added instrumentation for Cassandra, PyODBC and PyMSSQL -* Node.js added instrumentation for Cassandra and broader MySQL support diff --git a/docs/guide/cross-cluster-search.asciidoc b/docs/guide/cross-cluster-search.asciidoc index 00f34165b47..7039be7dcda 100644 --- a/docs/guide/cross-cluster-search.asciidoc +++ b/docs/guide/cross-cluster-search.asciidoc @@ -1,5 +1,5 @@ [[apm-cross-cluster-search]] -== Cross-cluster search +=== Cross-cluster search Elastic APM utilizes Elasticsearch's cross-cluster search functionality. Cross-cluster search lets you run a single search request against one or more @@ -10,7 +10,7 @@ and allowing for better performance while managing multiple observability use ca [float] [[set-up-ccs]] -=== Set up cross-cluster search +==== Set up cross-cluster search *Step 1. Set up remote clusters.* @@ -19,7 +19,7 @@ If you're using the Hosted Elasticsearch Service, see {cloud}/ec-enable-ccs.html You can add remote clusters directly in Kibana, under *Management* > *Elasticsearch* > *Remote clusters*. All you need is a name for the remote cluster and the seed node(s). Remember the names of your remote clusters, you'll need them in step two. -See {kibana-ref}/working-remote-clusters.html#managing-remote-clusters[managing remote clusters] for detailed information on the setup process. +See {ref}/ccr-getting-started.html[managing remote clusters] for detailed information on the setup process. Alternatively, you can {ref}/modules-remote-clusters.html#configuring-remote-clusters[configure remote clusters] in Elasticsearch's `elasticsearch.yml` file. diff --git a/docs/guide/data-security.asciidoc b/docs/guide/data-security.asciidoc new file mode 100644 index 00000000000..d9c4dc736eb --- /dev/null +++ b/docs/guide/data-security.asciidoc @@ -0,0 +1,442 @@ +[[data-security]] +=== Data security + +When setting up Elastic APM, it's essential to review all captured data carefully to ensure +it does not contain sensitive information. +When it does, we offer several different ways to filter, manipulate, or obfuscate this data. + +**Built-in data filters** + +APM agents provide built-in support for filtering the following types of data: + +[options="header"] +|==== +|Data type |Common sensitive data +|<> |Passwords, credit card numbers, authorization, etc. +|<> |Passwords, credit card numbers, etc. +|<> |URLs visited, click events, user browser errors, resources used, etc. +|<> |Sensitive user or business information +|==== + +**Custom filters** + +There are two ways to filter other types APM data: + +|==== +|<> | Applied at ingestion time. +All agents and fields are supported. Data leaves the instrumented service. +There are no performance overhead implications on the instrumented service. + +|<> | Not supported by all agents. +Data is sanitized before leaving the instrumented service. +Potential overhead implications on the instrumented service +|==== + +[discrete] +[[built-in-filtering]] +=== Built-in data filtering + +APM agents provide built-in support for filtering or obfuscating the following types of data. + +[discrete] +[[filter-http-header]] +==== HTTP headers + +By default, APM agents capture HTTP request and response headers (including cookies). +Most Elastic APM agents provide the ability to sanitize HTTP header fields, +including cookies and `application/x-www-form-urlencoded` data (POST form fields). +Query string and captured request bodies, like `application/json` data, are not sanitized. + +The default list of sanitized fields attempts to target common field names for data relating to +passwords, credit card numbers, authorization, etc., but can be customized to fit your data. +This sensitive data never leaves the instrumented service. + +This setting supports {kibana-ref}/agent-configuration.html[Central configuration], +which means the list of sanitized fields can be updated without needing to redeploy your services: + +* Go: {apm-go-ref-v}/configuration.html#config-sanitize-field-names[`ELASTIC_APM_SANITIZE_FIELD_NAMES`] +* Java: {apm-java-ref-v}/config-core.html#config-sanitize-field-names[`sanitize_field_names`] +* .NET: {apm-dotnet-ref-v}/config-core.html#config-sanitize-field-names[`sanitizeFieldNames`] +* Node.js: {apm-node-ref-v}/configuration.html#sanitize-field-names[`sanitizeFieldNames`] +// * PHP: {apm-php-ref-v}[``] +* Python: {apm-py-ref-v}/configuration.html#config-sanitize-field-names[`sanitize_field_names`] +* Ruby: {apm-ruby-ref-v}/configuration.html#config-sanitize-field-names[`sanitize_field_names`] + +Alternatively, you can completely disable the capturing of HTTP headers. +This setting also supports {kibana-ref}/agent-configuration.html[Central configuration]: + +* Go: {apm-go-ref-v}/configuration.html#config-capture-headers[`ELASTIC_APM_CAPTURE_HEADERS`] +* Java: {apm-java-ref-v}/config-core.html#config-sanitize-field-names[`capture_headers`] +* .NET: {apm-dotnet-ref-v}/config-http.html#config-capture-headers[`CaptureHeaders`] +* Node.js: {apm-node-ref-v}/configuration.html#capture-headers[`captureHeaders`] +// * PHP: {apm-php-ref-v}[``] +* Python: {apm-py-ref-v}/configuration.html#config-capture-headers[`capture_headers`] +* Ruby: {apm-ruby-ref-v}/configuration.html#config-capture-headers[`capture_headers`] + +[discrete] +[[filter-http-body]] +==== HTTP bodies + +By default, the body of HTTP requests is not recorded. +Request bodies often contain sensitive data like passwords or credit card numbers, +so use care when enabling this feature. + +This setting supports {kibana-ref}/agent-configuration.html[Central configuration], +which means the list of sanitized fields can be updated without needing to redeploy your services: + +* Go: {apm-go-ref-v}/configuration.html#config-capture-body[`ELASTIC_APM_CAPTURE_BODY`] +* Java: {apm-java-ref-v}/config-core.html#config-sanitize-field-names[`capture_body`] +* .NET: {apm-dotnet-ref-v}/config-http.html#config-capture-body[`CaptureBody`] +* Node.js: {apm-node-ref-v}//configuration.html#capture-body[`captureBody`] +// * PHP: {apm-php-ref-v}[``] +* Python: {apm-py-ref-v}/configuration.html#config-capture-body[`capture_body`] +* Ruby: {apm-ruby-ref-v}/configuration.html#config-capture-body[`capture_body`] + +[discrete] +[[filter-real-user-data]] +==== Real user monitoring data + +Protecting user data is important. +For that reason, individual RUM instrumentations can be disabled in the RUM agent with the +{apm-rum-ref-v}/configuration.html#disable-instrumentations[`disableInstrumentations`] configuration variable. +Disabled instrumentations produce no spans or transactions. + +[options="header"] +|==== +|Disable |Configuration value +|HTTP requests |`fetch` and `xmlhttprequest` +|Page load metrics including static resources |`page-load` +|JavaScript errors on the browser |`error` +|User click events including URLs visited, mouse clicks, and navigation events |`eventtarget` +|Single page application route changes |`history` +|==== + +[discrete] +[[filter-database-statements]] +==== Database statements + +For SQL databases, APM agents do not capture the parameters of prepared statements. +Note that Elastic APM currently does not make an effort to strip parameters of regular statements. +Not using prepared statements makes your code vulnerable to SQL injection attacks, +so be sure to use prepared statements. + +For non-SQL data stores, such as Elasticsearch or MongoDB, +Elastic APM captures the full statement for queries. +For inserts or updates, the full document is not stored. +To filter or obfuscate data in non-SQL database statements, +or to remove the statement entirely, +you can set up an ingest node pipeline. + +[discrete] +[[filter-agent-specific]] +==== Agent-specific options + +Certain agents offer additional filtering and obfuscating options: + +**Agent configuration options** + +* (Node.js) Remove errors raised by the server-side process: +Disable with {apm-node-ref-v}/configuration.html#capture-exceptions[captureExceptions]. + +* (Java) Remove process arguments from transactions: +* Disabled by default with {apm-java-ref-v}/config-reporter.html#config-include-process-args[`include_process_args`]. + +[discrete] +[[custom-filters]] +=== Custom filters + +There are two ways to filter or obfuscate other types of APM data: + +* <> +* <> + +[discrete] +[[filter-ingest-pipeline]] +==== Create an ingest node pipeline filter + +Ingest node pipelines specify a series of processors that transform data in a specific way. +Transformation happens prior to indexing–inflicting no performance overhead on the monitored application. +Pipelines are a flexible and easy way to filter or obfuscate Elastic APM data. + +**Example** + +Say you decide to <>, +but quickly notice that sensitive information is being collected in the +`http.request.body.original` field: + +[source,json] +---- +{ + "email": "test@abc.com", + "password": "hunter2" +} +---- + +To obfuscate the passwords stored in the request body, +use a series of {ref}/processors.html[ingest processors]. +To start, create a pipeline with a simple description and an empty array of processors: + +[source,json] +---- +{ + "pipeline": { + "description": "redact http.request.body.original.password", + "processors": [] <1> + } +} +---- +<1> The processors defined below will go in this array + +Add the first processor to the processors array. +Because the agent captures the request body as a string, use the +{ref}/json-processor.html[JSON processor] to convert the original field value into a structured JSON object. +Save this JSON object in a new field: + +[source,json] +---- +{ + "json": { + "field": "http.request.body.original", + "target_field": "http.request.body.original_json", + "ignore_failure": true + } +} +---- + +If `body.original_json` is not `null`, redact the `password` with the {ref}/set-processor.html[set processor], +by setting the value of `body.original_json.password` to `"redacted"`: + +[source,json] +---- +{ + "set": { + "field": "http.request.body.original_json.password", + "value": "redacted", + "if": "ctx?.http?.request?.body?.original_json != null" + } +} +---- + +Use the {ref}/convert-processor.html[convert processor] to convert the JSON value of `body.original_json` to a string and set it as the `body.original` value: + +[source,json] +---- +{ + "convert": { + "field": "http.request.body.original_json", + "target_field": "http.request.body.original", + "type": "string", + "if": "ctx?.http?.request?.body?.original_json != null", + "ignore_failure": true + } +} +---- + +Finally, use the {ref}/remove-processor.html[remove processor] to remove the `body.original_json` field: + +[source,json] +---- +{ + "remove": { + "field": "http.request.body.original", + "if": "ctx?.http?.request?.body?.original_json != null", + "ignore_failure": true + } +} +---- + +Now that the pipeline has been defined, +use the {ref}/put-pipeline-api.html[create or update pipeline API] to register the new pipeline in {es}. +Name the pipeline `apm_redacted_body_password`: + +[source,console] +---- +PUT _ingest/pipeline/apm_redacted_body_password +{ + "description": "redact http.request.body.original.password", + "processors": [ + { + "json": { + "field": "http.request.body.original", + "target_field": "http.request.body.original_json", + "ignore_failure": true + } + }, + { + "set": { + "field": "http.request.body.original_json.password", + "value": "redacted", + "if": "ctx?.http?.request?.body?.original_json != null" + } + }, + { + "convert": { + "field": "http.request.body.original_json", + "target_field": "http.request.body.original", + "type": "string", + "if": "ctx?.http?.request?.body?.original_json != null", + "ignore_failure": true + } + }, + { + "remove": { + "field": "http.request.body.original_json", + "if": "ctx?.http?.request?.body?.original_json != null", + "ignore_failure": true + } + } + ] +} +---- + +To make sure the `apm_redacted_body_password` pipeline works correctly, +test it with the {ref}/simulate-pipeline-api.html[simulate pipeline API]. +This API allows you to run multiple documents through a pipeline to ensure it is working correctly. + +The request below simulates running three different documents through the pipeline: + +[source,console] +---- +POST _ingest/pipeline/apm_redacted_body_password/_simulate +{ + "docs": [ + { + "_source": { <1> + "http": { + "request": { + "body": { + "original": """{"email": "test@abc.com", "password": "hunter2"}""" + } + } + } + } + }, + { + "_source": { <2> + "some-other-field": true + } + }, + { + "_source": { <3> + "http": { + "request": { + "body": { + "original": """["invalid json" """ + } + } + } + } + } + ] +} +---- +<1> This document features the same sensitive data from the original example above +<2> This document only contains an unrelated field +<3> This document contains invalid JSON + +The API response should be similar to this: + +[source,json] +---- +{ + "docs" : [ + { + "doc" : { + "_source" : { + "http" : { + "request" : { + "body" : { + "original" : { + "password" : "redacted", + "email" : "test@abc.com" + } + } + } + } + } + } + }, + { + "doc" : { + "_source" : { + "nobody" : true + } + } + }, + { + "doc" : { + "_source" : { + "http" : { + "request" : { + "body" : { + "original" : """["invalid json" """ + } + } + } + } + } + } + ] +} +---- + +As you can see, only the first simulated document has a redacted password field. +As expected, all other documents are unaffected. + +The final step in this process is to add the newly created `apm_redacted_body_password` pipeline +to the default `apm` pipeline. This ensures that all APM data ingested into {es} runs through the pipeline. + +Get the current list of `apm` pipelines: + +[source,console] +---- +GET _ingest/pipeline/apm +---- + +Append the newly created pipeline to the end of the processors array and register the `apm` pipeline. +Your request will look similar to this: + +[source,console] +---- +{ + "apm" : { + "processors" : [ + { + "pipeline" : { + "name" : "apm_user_agent" + } + }, + { + "pipeline" : { + "name" : "apm_user_geo" + } + }, + { + "pipeline": { + "name": "apm_redacted_body_password" + } + ], + "description" : "Default enrichment for APM events" + } +} +---- + +That's it! Sit back and relax–passwords have been redacted from your APM HTTP body data. + +TIP: See {apm-server-ref-v}/configuring-ingest-node.html[parse data using ingest node pipelines] +to learn more about the default `apm` pipeline. + +[discrete] +[[filter-in-agent]] +==== APM agent filters + +Some APM agents offer a way to manipulate or drop APM events _before_ they are sent to the APM Server. +Please see the relevant agent's documentation for more information and examples: + +// * Go: {apm-go-ref-v}/[] +// * Java: {apm-java-ref-v}/[] +* .NET: {apm-dotnet-ref-v}/public-api.html#filter-api[Filter API]. +* Node.js: {apm-node-ref-v}/agent-api.html#apm-add-filter[`addFilter()`]. +// * PHP: {apm-php-ref-v}[] +* Python: {apm-py-ref-v}/sanitizing-data.html[custom processors]. +// * Ruby: {apm-ruby-ref-v}/[] diff --git a/docs/guide/distributed-tracing.asciidoc b/docs/guide/distributed-tracing.asciidoc index b5b5843566f..51a69d2b2b9 100644 --- a/docs/guide/distributed-tracing.asciidoc +++ b/docs/guide/distributed-tracing.asciidoc @@ -1,17 +1,119 @@ [[distributed-tracing]] -== Distributed tracing +=== Distributed tracing -Together, <> and <> form a `Trace`. -Traces are not events, but group together events that have a common root. +A `trace` is a group of <> and <> with a common root. +Each `trace` tracks the entirety of a single request. +When a `trace` travels through multiple services, as is common in a microservice architecture, +it is known as a distributed trace. -Elastic APM supports distributed tracing. -Distributed tracing enables you to analyze performance throughout your microservices architecture all in one view. -This is accomplished by tracing all of the requests - from the initial web request to your front-end service - to queries made to your back-end services. -This makes finding possible bottlenecks throughout your application much easier and faster. -Best of all, there's no additional configuration needed for distributed tracing, just ensure you're using the latest version of the applicable {apm-agents-ref}/index.html[agent]. +[float] +=== Why is distributed tracing important? -The APM app in Kibana also supports distributed tracing. -The Timeline visualization has been redesigned to show all of the transactions from individual services that are connected in a trace: +Distributed tracing enables you to analyze performance throughout your microservice architecture +by tracing the entirety of a request -- from the initial web request on your front-end service +all the way to database queries made on your back-end services. + +Tracking requests as they propagate through your services provides an end-to-end picture of +where your application is spending time, where errors are occurring, and where bottlenecks are forming. +Distributed tracing eliminates individual service's data silos and reveals what's happening outside of +service borders. + +For supported technologies, distributed tracing works out-of-the-box, with no additional configuration required. + +[float] +=== How distributed tracing works + +Distributed tracing works by injecting a custom `traceparent` HTTP header into outgoing requests. +This header includes information, like `trace-id`, which is used to identify the current trace, +and `parent-id`, which is used to identify the parent of the current span on incoming requests +or the current span on an outgoing request. + +When a service is working on a request, it checks for the existence of this HTTP header. +If it's missing, the service starts a new trace. +If it exists, the service ensures the current action is added as a child of the existing trace, +and continues to propagate the trace. + +[float] +==== Trace propagation examples + +In this example, Elastic's Ruby agent communicates with Elastic's Java agent. +Both support the `traceparent` header, and trace data is successfully propagated. + +image::images/dt-trace-ex1.png[How traceparent propagation works] + +In this example, Elastic's Ruby agent communicates with OpenTelemetry's Java agent. +Both support the `traceparent` header, and trace data is successfully propagated. + +image::images/dt-trace-ex2.png[How traceparent propagation works] + +In this example, the trace meets a piece of middleware that doesn't propagate the `traceparent` header. +The distributed trace ends and any further communication will result in a new trace. + +image::images/dt-trace-ex3.png[How traceparent propagation works] + + +[float] +[[w3c-tracecontext]] +==== W3C Tracecontext spec + +All Elastic agents now support the official W3C tracecontext spec and `traceparent` header. +See the table below for the minimum required agent version: + +[options="header"] +|==== +|Agent name |Agent Version +|**Go Agent**| ≥`1.6` +|**Java Agent**| ≥`1.14` +|**.NET Agent**| ≥`1.3` +|**Node.js Agent**| ≥`3.4` +|**Python Agent**| ≥`5.4` +|**Ruby Agent**| ≥`3.5` +|**RUM Agent**| ≥`5.0` +|==== + +NOTE: Older Elastic agents use a unique `elastic-apm-traceparent` header. +For backward-compatibility purposes, new versions of Elastic agents still support this header. + +[float] +=== Visualize distributed tracing + +The APM app's timeline visualization provides a visual deep-dive into each of your application's traces: [role="screenshot"] image::images/apm-distributed-tracing.png[Distributed tracing in the APM UI] + +[float] +=== Manual distributed tracing + +Elastic agents automatically propagate distributed tracing context for supported technologies. +If your service communicates over a different, unsupported protocol, +you can manually propagate distributed tracing context from a sending service to a receiving service +with each agent's API. + +[float] +==== Add the `traceparent` header to outgoing requests + +Sending services must add the `traceparent` header to outgoing requests. + +-- +include::../tab-widgets/distributed-trace-send-widget.asciidoc[] +-- + +[float] +==== Parse the `traceparent` header on incoming requests + +Receiving services must parse the incoming `traceparent` header, +and start a new transaction or span as a child of the received context. + +-- +include::../tab-widgets/distributed-trace-receive-widget.asciidoc[] +-- + +[float] +=== Distributed tracing with RUM + +Some additional setup may be required to correlate requests correctly with the Real User Monitoring (RUM) agent. + +See the {apm-rum-ref}/distributed-tracing-guide.html[RUM distributed tracing guide] +for information on enabling cross-origin requests, setting up server configuration, +and working with dynamically-generated HTML. diff --git a/docs/guide/features.asciidoc b/docs/guide/features.asciidoc new file mode 100644 index 00000000000..2758c432081 --- /dev/null +++ b/docs/guide/features.asciidoc @@ -0,0 +1,31 @@ +[[apm-features]] +== Elastic APM features + +++++ +Features +++++ + +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> + +include::./data-security.asciidoc[] + +include::./distributed-tracing.asciidoc[] + +include::./rum.asciidoc[] + +include::./trace-sampling.asciidoc[] + +include::./opentracing.asciidoc[] + +include::./opentelemetry-elastic.asciidoc[] + +include::./obs-integrations.asciidoc[] + +include::./cross-cluster-search.asciidoc[] \ No newline at end of file diff --git a/docs/guide/images/apm-distributed-tracing.png b/docs/guide/images/apm-distributed-tracing.png index ba9b308d227..7d51e273f9d 100644 Binary files a/docs/guide/images/apm-distributed-tracing.png and b/docs/guide/images/apm-distributed-tracing.png differ diff --git a/docs/guide/images/dt-sampling-example.png b/docs/guide/images/dt-sampling-example.png new file mode 100644 index 00000000000..015b7c67e7f Binary files /dev/null and b/docs/guide/images/dt-sampling-example.png differ diff --git a/docs/guide/images/dt-trace-ex1.png b/docs/guide/images/dt-trace-ex1.png new file mode 100644 index 00000000000..ca97955ee8b Binary files /dev/null and b/docs/guide/images/dt-trace-ex1.png differ diff --git a/docs/guide/images/dt-trace-ex2.png b/docs/guide/images/dt-trace-ex2.png new file mode 100644 index 00000000000..3df0827f586 Binary files /dev/null and b/docs/guide/images/dt-trace-ex2.png differ diff --git a/docs/guide/images/dt-trace-ex3.png b/docs/guide/images/dt-trace-ex3.png new file mode 100644 index 00000000000..1bb666b030a Binary files /dev/null and b/docs/guide/images/dt-trace-ex3.png differ diff --git a/docs/guide/images/ecommerce-dashboard.png b/docs/guide/images/ecommerce-dashboard.png new file mode 100644 index 00000000000..f68dc3cc568 Binary files /dev/null and b/docs/guide/images/ecommerce-dashboard.png differ diff --git a/docs/guide/images/open-telemetry-exporter-arch.png b/docs/guide/images/open-telemetry-exporter-arch.png new file mode 100644 index 00000000000..4499d65ec6b Binary files /dev/null and b/docs/guide/images/open-telemetry-exporter-arch.png differ diff --git a/docs/guide/images/open-telemetry-protocol-arch.png b/docs/guide/images/open-telemetry-protocol-arch.png new file mode 100644 index 00000000000..31a382ad393 Binary files /dev/null and b/docs/guide/images/open-telemetry-protocol-arch.png differ diff --git a/docs/guide/index.asciidoc b/docs/guide/index.asciidoc index 32c99fd21fa..3ca57004d30 100644 --- a/docs/guide/index.asciidoc +++ b/docs/guide/index.asciidoc @@ -1,6 +1,8 @@ include::../version.asciidoc[] include::{asciidoc-dir}/../../shared/attributes.asciidoc[] +:apm-ref-all: https://www.elastic.co/guide/en/apm/get-started/ + ifdef::env-github[] NOTE: For the best reading experience, please view this documentation at https://www.elastic.co/guide/en/apm/get-started[elastic.co] @@ -15,19 +17,11 @@ include::./apm-doc-directory.asciidoc[] include::./install-and-run.asciidoc[] -include::./apm-data-model.asciidoc[] - -include::./distributed-tracing.asciidoc[] - -include::./rum.asciidoc[] +include::./quick-start-overview.asciidoc[] -include::./opentracing.asciidoc[] - -include::./opentelemetry-elastic.asciidoc[] - -include::./obs-integrations.asciidoc[] +include::./apm-data-model.asciidoc[] -include::./cross-cluster-search.asciidoc[] +include::./features.asciidoc[] include::./agent-server-compatibility.asciidoc[] @@ -35,6 +29,4 @@ include::./troubleshooting.asciidoc[] include::./apm-breaking-changes.asciidoc[] -include::./apm-release-notes.asciidoc[] - include::./redirects.asciidoc[] diff --git a/docs/guide/install-and-run.asciidoc b/docs/guide/install-and-run.asciidoc index 41e32d8bfc1..cacd1595316 100644 --- a/docs/guide/install-and-run.asciidoc +++ b/docs/guide/install-and-run.asciidoc @@ -1,269 +1,100 @@ [[install-and-run]] -== Install and run +== Quick start guide -The easiest way to get started with Elastic APM is by using our -https://www.elastic.co/cloud/elasticsearch-service[hosted {es} Service] on -Elastic Cloud. The {es} Service is available on AWS, GCP, and Azure, -and automatically configures APM Server to work with {es} and {kib}. +This guide describes how to get started quickly with Elastic APM. You’ll learn how to: -[float] -=== Hosted Elasticsearch Service - -image::images/apm-architecture-cloud.png[Install Elastic APM on cloud] - -Skip managing your own {es}, {kib}, and APM Server by using our -https://www.elastic.co/cloud/elasticsearch-service[hosted {es} Service] on -Elastic Cloud. - -{ess-trial}[Try out the {es} Service for free], -then jump straight to <>. +* Spin up {es}, {kib}, and APM Server on {ess} +* Install APM agents +* Basic configuration options +* Visualize your APM data in {kib} [float] [[before-installation]] -=== Install and manage the stack yourself - -image::images/apm-architecture-diy.png[Install Elastic APM yourself] +=== Step 1: Spin up the Elastic Stack -To install the stack yourself, first see the https://www.elastic.co/support/matrix[Elastic Support Matrix] for information about supported operating systems and product compatibility. -We recommend you use the same version of Elasticsearch, Kibana, and APM Server. - -* <> -* <> -* <> -* <> +include::../tab-widgets/spin-up-stack-widget.asciidoc[] [float] -[[quick-start]] -=== Quick start with Docker - -If you're interested in Elastic APM and want a quick and easy development environment to try things out, -see <>. - -[[install-elasticsearch]] -=== Step 1: Install Elasticsearch - -// This will move to tagged regions so we can pull the installation instructions -// here (instead of linking out) - -Install an Elasticsearch cluster, start it up, and make sure it's running. - -. Verify that your system meets the -https://www.elastic.co/support/matrix#matrix_jvm[minimum JVM requirements] for {es}. -. {stack-gs}/get-started-elastic-stack.html#install-elasticsearch[Install Elasticsearch]. -. {stack-gs}/get-started-elastic-stack.html#_make_sure_elasticsearch_is_up_and_running[Make sure elasticsearch is up and running]. - -[[install-kibana]] -=== Step 2: Install Kibana - -// This will move to tagged regions so we can pull the installation instructions -// here (instead of linking out) - -Install Kibana, start it up, and open up the web interface: - -. {stack-gs}/get-started-elastic-stack.html#install-kibana[Install Kibana]. -. {stack-gs}/get-started-elastic-stack.html#_launch_the_kibana_web_interface[Launch the Kibana Web Interface]. - -[[apm-server]] -=== Step 3: Install APM Server - -Install, set up, and run APM Server. - -. {apm-server-ref-v}/installing.html[Install APM Server]. -. {apm-server-ref-v}/apm-server-configuration.html[Set up APM Server] -. {apm-server-ref-v}/setting-up-and-running.html[Start APM Server]. - -Next, use the config file if you need to change the default configuration that APM Server uses to connect to Elasticsearch, -or if you need to specify credentials: - -* {apm-server-ref-v}/configuring-howto-apm-server.html[Configuring APM Server] -** {apm-server-ref-v}/configuration-process.html[General configuration options] -** {apm-server-ref-v}/elasticsearch-output.html[Configure the Elasticsearch output] - -[[secure-api-access]] -If you do change the listen address from `localhost` to something that is accessible from outside of the machine, -we recommend setting up firewall rules to ensure that only your own systems can access the API. -Alternatively, -you can use a {apm-server-ref-v}/securing-apm-server.html[secret token and TLS]. - -If you have APM Server running on the same host as your service, -you can configure it to listen on a Unix domain socket. - -[[more-information]] -TIP: For detailed instructions on how to install and secure APM Server in your server environment, -including details on how to run APM Server in a highly available environment, -please see the full {apm-server-ref-v}/index.html[APM Server documentation]. - [[agents]] -=== Step 4: Install APM agents +=== Step 2: Install APM agents // This tagged region is reused in the Observability docs. // tag::apm-agent[] -Agents are written in the same language as your service. -Monitoring a new service requires installing the agent -and configuring it with the address of your APM Server, a secret token (if necessary), and a service name. - -[cols="h,,"] -|======================================================================= -|Agent -2+| - -.2+|Go -2+|The Go agent automatically instruments Gorilla and Gin, and has support for Go’s built-in net/http and database/sql drivers. -|{apm-go-ref-v}/supported-tech.html[Supported technologies] -|{apm-go-ref-v}/getting-started.html[Set up the Go Agent] - -.2+|Java -2+|The Java agent automatically instruments Servlet API, Spring MVC, and Spring Boot out of the box. -|{apm-java-ref-v}/supported-technologies-details.html[Supported technologies] -|{apm-java-ref-v}/setup.html[Set up the Java Agent] - -.2+|.NET -2+|The .NET agent automatically instruments ASP.NET Core applications, and .NET Framework applications. -|{apm-dotnet-ref-v}/supported-technologies.html[Supported technologies] -|{apm-dotnet-ref-v}/setup.html[Set up the .NET Agent] - -.2+|Node.js -2+|The Node.js agent automatically instruments Express, hapi, Koa, and Restify out of the box. -|{apm-node-ref-v}/supported-technologies.html[Supported technologies] -|{apm-node-ref-v}/set-up.html[Set up the Node.js Agent] - -.2+|Python -2+|The Python agent automatically instruments Django and Flask out of the box. -|{apm-py-ref-v}/supported-technologies.html[Supported technologies] -|{apm-py-ref-v}/set-up.html[Set up the Python Agent] - -.2+|Ruby -2+|The Ruby agent automatically instruments Rails out of the box. -|{apm-ruby-ref-v}/supported-technologies.html[Supported technologies] -|{apm-ruby-ref-v}/set-up.html[Set up the Ruby Agent] - -.2+|RUM -2+|Real User Monitoring (RUM) captures user interactions with clients such as web browsers. -|{apm-rum-ref-v}/supported-technologies.html[Supported technologies] -|{apm-rum-ref-v}/getting-started.html[Set up the RUM Agent] - -|======================================================================= - -TIP: Check the {apm-overview-ref-v}/agent-server-compatibility.html[Agent/Server compatibility matrix] to ensure you're using agents that are compatible with your version of Elasticsearch. +APM agents are written in the same language as your service. +To monitor a new service, you must install the agent and configure it with a service name, APM Server URL, and Secret token or API key. [[choose-service-name]] -[float] -==== Choose a service name - -The service name is used by Elastic APM to differentiate between data coming from different services. - +* *Service name*: Service names are used to differentiate data from each of your services. Elastic APM includes the service name field on every document that it saves in Elasticsearch. If you change the service name after using Elastic APM, you will see the old service name and the new service name as two separate services. Make sure you choose a good service name before you get started. - ++ The service name can only contain alphanumeric characters, spaces, underscores, and dashes (must match `^[a-zA-Z0-9 _-]+$`). -// end::apm-agent[] +* *APM Server URL*: The host and port that APM Server listens for events on. -[[configure-apm]] -=== Step 5: Configure APM +* *Secret token or API key*: Authentication method for Agent/Server communication. +See {apm-server-ref-v}/secure-communication-agents.html[secure communication with APM Agents] to learn more. -Now that you're up and running with Elastic APM, you may want to adjust some configuration settings. -Luckily, there are many different ways to tweak and tune the Elastic ecosystem to adapt it to your needs. - -[float] -==== Configure APM agents - -// This tagged region is reused in the Observability docs. -// tag::configure-agents[] -APM agents have a number of configuration options that allow you to fine tune things like -environment names, sampling rates, instrumentations, metrics, and more. - -|=== -|*Agent configuration documentation* -a| - -* {apm-go-ref-v}/configuration.html[Go Agent configuration] -* {apm-java-ref-v}/configuration.html[Java Agent configuration] -* {apm-dotnet-ref-v}/configuration.html[.NET Agent configuration] -* {apm-node-ref}/configuring-the-agent.html[Node.js Agent configuration] -* {apm-py-ref-v}/configuration.html[Python Agent configuration] -* {apm-ruby-ref-v}/configuration.html[Ruby Agent configuration] -* {apm-rum-ref-v}/configuration.html[RUM Agent configuration] -|=== - -A select number of configuration options can be changed directly in Kibana, without needing to redeploy the Agent. -See {apm-app-ref}/agent-configuration.html[Agent configuration in Kibana] for more information. +Select your service's language for installation instructions: +// end::apm-agent[] -[float] -==== Configure Elastic Cloud +-- +include::../tab-widgets/install-agents-widget.asciidoc[] +-- -If you're running APM Server in Elastic cloud, you can configure your own user settings right in the Elasticsearch Service Console. -Any changes are automatically appended to the `apm-server.yml` configuration file for your instance. +TIP: Check the {apm-overview-ref-v}/agent-server-compatibility.html[Agent/Server compatibility matrix] to ensure you're using agents that are compatible with your version of {es}. -Full details are available in the {cloud}/ec-manage-apm-settings.html[APM user settings] documentation. [float] -==== Configure a self installation - -If you've installed APM Server yourself, you can edit the `apm-server.yml` configuration file to make changes. -More information is available in {apm-server-ref-v}/configuring-howto-apm-server.html[configuring APM Server]. - -Don't forget to also read about -{apm-server-ref-v}/securing-apm-server.html[securing APM Server], and -{apm-server-ref-v}/monitoring.html[monitoring APM Server]. -// end::configure-agents[] - -[[quick-start-overview]] -=== Quick start development environment +[[configure-apm]] +=== Step 3: Advanced configuration (optional) // This tagged region is reused in the Observability docs. -// tag::dev-environment[] -ifeval::["{release-state}"=="unreleased"] - -Version {version} of APM Server has not yet been released. - -endif::[] - -ifeval::["{release-state}"!="unreleased"] - -If you're just looking for a quick way to try out Elastic APM, you can easily get started with Docker. -Just follow the steps below. - -**Create a docker-compose.yml file** - -The https://www.docker.elastic.co/[Elastic Docker registry] contains Docker images for all of the products -in the {stack}. -You can use Docker compose to easily get the default distributions of {es}, {kib}, -and APM Server up and running in Docker. +// tag::configure-agents[] +There are many different ways to tweak and tune the Elastic APM ecosystem to your needs. -Create a `docker-compose.yml` file and copy and paste in the following: +*Configure APM agents* -["source","yaml",subs="attributes"] --------------------------------------------- -include::./docker-compose.yml[] --------------------------------------------- +APM agents have a number of configuration options that allow you to fine tune things like +environment names, sampling rates, instrumentations, metrics, and more. +Broadly speaking, there are two ways to configure APM agents: +// end::configure-agents[] -**Compose** +include::../tab-widgets/configure-agent-widget.asciidoc[] -Run `docker-compose up`. -Compose will download the official docker containers and start {es}, {kib}, and APM Server. +*Configure APM Server* -**Install Agents** +include::../tab-widgets/configure-server-widget.asciidoc[] -When Compose finishes, navigate to http://localhost:5601/app/kibana#/home/tutorial/apm. -Complete steps 4-6 to configure your application to collect and report APM data. +[float] +[[visualize-kibana]] +=== Step 4: Visualize in {kib} -**Visualize** +The APM app in {kib} allows you to monitor your software services and applications in real-time; +visualize detailed performance information on your services, identify and analyze errors, +and monitor host-level and agent-specific metrics like JVM and Go runtime metrics. -Use the APM app at http://localhost:5601/app/apm to visualize your application performance data! +To open the APM app: -When you're done, `ctrl+c` will stop all of the containers. +. Lauch {kib}: ++ +-- +include::../tab-widgets/open-kibana-widget.asciidoc[] +-- -**Advanced Docker usage** +. In the side navigation, under *Observability*, select *APM*. -If you're interested in learning more about all of the APM features available, -or running the Elastic stack on Docker in a production environment, see the following documentation: +[float] +[[what-next]] +=== What's next? -* {apm-server-ref-v}/running-on-docker.html[Running APM Server on Docker] -* {stack-gs}/get-started-docker.html[Running the Elastic Stack on Docker] +Now that you have APM data streaming into {ES}, +head over to the {kibana-ref}/xpack-apm.html[APM app reference] to learn more about what you can +do with {kib}'s APM app. -endif::[] -// end::dev-environment[] +// Need to add more here +// Get a deeper understanding by learning about [[concepts]] +// Learn how to do things with [[how-to guides]] \ No newline at end of file diff --git a/docs/guide/obs-integrations.asciidoc b/docs/guide/obs-integrations.asciidoc index 5e0f43dadae..cae4cb78204 100644 --- a/docs/guide/obs-integrations.asciidoc +++ b/docs/guide/obs-integrations.asciidoc @@ -1,18 +1,18 @@ [[observability-integrations]] -== Observability integrations +=== Observability integrations Elastic APM supports integrations with other observability solutions. // remove float tag once other integrations are added [float] [[apm-logging-integration]] -=== Logging integration +==== Logging integration Many applications use logging frameworks to help record, format, and append an application's logs. Elastic APM now offers a way to make your application logs even more useful, by integrating with the most popular logging frameworks in their respective languages. This means you can easily inject trace information into your logs, -allowing you to explore logs in the {kibana-ref}/xpack-logs.html[Logs app], +allowing you to explore logs in the {observability-guide}/monitor-logs.html[Logs app], then jump straight into the corresponding APM traces -- all while preserving the trace context. To get started: @@ -22,7 +22,7 @@ To get started: . Ingest your logs into Elasticsearch [float] -==== Enable Log correlation +===== Enable Log correlation // temporary attribute for ECS 1.1 // Remove after 7.4 release @@ -42,9 +42,9 @@ See the relevant https://www.elastic.co/guide/en/apm/agent/index.html[Agent docu // * *Rum*: {apm-rum-ref-v}/[] [float] -==== Add APM identifiers to your logs +===== Add APM identifiers to your logs -Once log correlation is enabled, +Once log correlation is enabled, you must ensure your logs contain APM identifiers. In some supported frameworks, this is already done for you. In other scenarios, like for unstructured logs, @@ -53,7 +53,7 @@ you'll need to add APM identifiers to your logs in any easy to parse manner. The identifiers we're interested in are: {ecs-ref}/ecs-tracing.html[`trace.id`] and {ecs-ref}/ecs-tracing.html[`transaction.id`]. Certain Agents also support the `span.id` field. -This process for adding these fields will differ based the Agent you're using, the logging framework, +This process for adding these fields will differ based the Agent you're using, the logging framework, and the type and structure of your logs. See the relevant https://www.elastic.co/guide/en/apm/agent/index.html[Agent documentation] to learn more. @@ -69,7 +69,7 @@ See the relevant https://www.elastic.co/guide/en/apm/agent/index.html[Agent docu // * *Rum*: {apm-rum-ref-v}/[] [float] -==== Ingest your logs into Elasticsearch +===== Ingest your logs into Elasticsearch Once your logs contain the appropriate identifiers (fields), you need to ingest them into Elasticsearch. Luckily, we've got a tool for that -- Filebeat is Elastic's log shipper. diff --git a/docs/guide/opentelemetry-elastic.asciidoc b/docs/guide/opentelemetry-elastic.asciidoc index 854cdc40bcb..ae2538be83d 100644 --- a/docs/guide/opentelemetry-elastic.asciidoc +++ b/docs/guide/opentelemetry-elastic.asciidoc @@ -1,181 +1,393 @@ [[open-telemetry-elastic]] -== OpenTelemetry integration +=== OpenTelemetry integration :ot-spec: https://github.com/open-telemetry/opentelemetry-specification/blob/master/README.md +:ot-contrib: https://github.com/open-telemetry/opentelemetry-collector-contrib :ot-repo: https://github.com/open-telemetry/opentelemetry-collector -:ot-pipelines: {ot-repo}/blob/master/docs/pipelines.md +:ot-pipelines: https://opentelemetry.io/docs/collector/configuration/#service :ot-extension: {ot-repo}/blob/master/extension/README.md :ot-scaling: {ot-repo}/blob/master/docs/performance.md -:ot-collector: https://opentelemetry.io/docs/collector/about/ -:ot-dockerhub: https://hub.docker.com/r/otel/opentelemetry-collector-contrib-dev +:ot-collector: https://opentelemetry.io/docs/collector/getting-started/ +:ot-dockerhub: https://hub.docker.com/r/otel/opentelemetry-collector-contrib -Elastic's OpenTelemetry integration allows you to reuse your existing OpenTelemetry -instrumentation to quickly analyze distributed traces and metrics with the Elastic Stack. +https://opentelemetry.io/docs/concepts/what-is-opentelemetry/[OpenTelemetry] is a set +of APIs, SDKs, tooling, and integrations that enable the capture and management of +telemetry data from your services for greater observability. For more information about the +OpenTelemetry project, see the {ot-spec}[spec]. -[float] -[[what-is-opentelemetry]] -=== What is OpenTelemetry? +Elastic OpenTelemetry integrations allow you to reuse your existing OpenTelemetry +instrumentation to quickly analyze distributed traces and metrics to help you monitor +business KPIs and technical components with the {stack}. -> OpenTelemetry is a set of APIs, SDKs, tooling, and integrations that enable the creation and -management of telemetry data. It formed through a merger of the OpenTracing and OpenCensus projects. +[[open-telemetry-elastic-protocol]] +==== APM Server native support of OpenTelemetry protocol -OpenTelemetry is an open-source project that provides the components necessary to observe your applications and services. -If you're unfamiliar with the project, see the {ot-spec}[spec] for more information. -[float] -[[open-telemetry-elastic-exporter]] -=== Elastic exporter +IMPORTANT: The https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/elasticexporter#legacy-opentelemetry-collector-exporter-for-elastic[OpenTelemetry Collector exporter for Elastic] +was deprecated in 7.13 and replaced by the native support of the OpenTelemetry Line Protocol in +Elastic Observability (OTLP). To learn more, see +https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/elasticexporter#migration[migration]. -Elastic's integration is designed to drop into your current OpenTelemetry setup. -We've done this by extending the "contrib" OpenTelemetry collector and adding an Elastic exporter. -This exporter translates the OpenTelemetry trace data collected from your services to Elastic's protocol, -before sending the data to the Elastic Stack. -By extending the OpenTelemetry collector, -no changes are needed in your instrumented services in order to begin using the Elastic Stack. +Elastic APM Server natively supports the OpenTelemetry protocol. +This means trace data and metrics collected from your applications and infrastructure can +be sent directly to Elastic APM Server using the OpenTelemetry protocol. -[role="screenshot"] -image::images/open-telemetry-elastic-arch.png[OpenTelemetry Elastic architecture diagram] +image::images/open-telemetry-protocol-arch.png[OpenTelemetry Elastic architecture diagram] [float] -[[open-telemetry-elastic-works]] -=== How the OpenTelemetry Collector works +[[instrument-apps-apm-server]] +====== Instrument applications + +To export traces and metrics to APM Server, ensure that you have instrumented your services and applications +with the OpenTelemetry API, SDK, or both. For example, if you are a Java developer, you need to instrument your Java app using the +https://github.com/open-telemetry/opentelemetry-java-instrumentation[OpenTelemetry agent for Java]. -The OpenTelemetry collector uses three different types of components to handle data: `receivers`, `processors`, and `exporters`. +By defining the following environment variables, you can configure the OTLP endpoint so that the OpenTelemetry agent communicates with +APM Server. -* `receivers`: Configure how data gets to the collector. At least one receiver must be configured. -* `processors`: Defines optional transformations that occurs between receiving and exporting data. -* `exporters`: Configures how data is sent to its destination--in this case, the Elastic Stack. +[source,bash] +---- +export OTEL_RESOURCE_ATTRIBUTES=service.name=checkoutService,service.version=1.1,deployment.environment=production +export OTEL_EXPORTER_OTLP_ENDPOINT=https://apm_server_url:8200 +export OTEL_EXPORTER_OTLP_HEADERS="Authorization=Bearer an_apm_secret_token" +java -javaagent:/path/to/opentelemetry-javaagent-all.jar \ + -classpath lib/*:classes/ \ + com.mycompany.checkout.CheckoutServiceServer +---- -Once a `receiver`, `processor`, and `exporter` is defined, `pipelines` can be configured in the `services` section of your configuration. Specifically, a `traces` pipeline will define the path of trace data through your collector, and bring all three of these components together. +|=== -TIP: More information is available in the -{ot-pipelines}[OpenTelemetry pipeline docs] +| `OTEL_RESOURCE_ATTRIBUTES` | The service name to identify your application. -A final note: `extensions` can also be enabled for tasks like monitoring your collectors health. -See the {ot-extension}[OpenTelemetry extension readme] -for a list of supported extensions. +| `OTEL_EXPORTER_OTLP_ENDPOINT` | APM Server URL. The host and port that APM Server listens for events on. -[[open-telemetry-elastic-get-started]] -=== Get started +| `OTEL_EXPORTER_OTLP_HEADERS` | Authorization header that includes the Elastic APM Secret token or API key: `"Authorization=Bearer an_apm_secret_token"` or `"Authorization=ApiKey an_api_key"`. -NOTE: This guide assumes you've already instrumented your services with the OpenTelemetry API and/or SDK. -If you haven't, see the Elastic APM <> to get started with Elastic APM Agents instead. +For information on how to format an API key, see our {apm-server-ref-v}/api-key.html[API key] docs. -[[open-telemetry-elastic-deployment-planning]] -==== Plan your deployment +Please note the required space between `Bearer` and `an_apm_secret_token`, and `APIKey` and `an_api_key`. -OpenTelemetry Collectors can be run as an Agent, or as standalone collectors. -They can be deployed as often as necessary and scaled up or out. +| `OTEL_EXPORTER_OTLP_CERTIFICATE` | Certificate for TLS credentials of the gRPC client. (optional) -Deployment planning resources are available in OpenTelemetry's {ot-collector}[Getting Started] -documentation, and {ot-scaling}[Collector Performance] research. +|=== -[[open-telemetry-elastic-download]] -==== Download the collector +You are now ready to collect traces and <> before <> +and <> in {kib}. -The Elastic exporter lives in the {ot-repo}[`opentelemetry-collector-contrib repository`], -and the latest release can be downloaded from {ot-repo}/releases[GitHub releases page]. +[[open-telemetry-collector]] +===== Connect OpenTelemetry Collector instances -Docker images are available on {ot-dockerhub}[dockerhub]: +Using the OpenTelemetry collector instances in your architecture, you can connect them to Elastic Observability using the OTLP exporter. -[source,bash] +[source,yaml] ---- -docker pull otel/opentelemetry-collector-contrib-dev +receivers: <1> + # ... + otlp: + +processors: <2> + # ... + memory_limiter: + check_interval: 1s + limit_mib: 2000 + batch: + +exporters: + logging: + loglevel: warn <3> + otlp/elastic: <4> + # Elastic APM server https endpoint without the "https://" prefix + endpoint: "${ELASTIC_APM_SERVER_ENDPOINT}" <5> <7> + headers: + # Elastic APM Server secret token + Authorization: "Bearer ${ELASTIC_APM_SERVER_TOKEN}" <6> <7> + +service: + pipelines: + traces: + receivers: [otlp] + exporters: [logging, otlp/elastic] + metrics: + receivers: [otlp] + exporters: [logging, otlp/elastic] ---- +<1> The receivers, such as +the https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver/otlpreceiver[OTLP receiver], that forward data emitted by APM agents or the https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver/hostmetricsreceiver[host metrics receiver]. +<2> We recommend using the https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/batchprocessor/README.md[Batch processor] and also suggest using the https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/memorylimiter/README.md[memory limiter processor]. For more information, see https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/README.md#recommended-processors[Recommended processors]. +<3> The https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/loggingexporter[logging exporter] is helpful for troubleshooting and supports various logging levels: `debug`, `info`, `warn`, and `error`. +<4> Elastic Observability endpoint configuration. To learn more, see https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/otlpexporter[OpenTelemetry Collector > OTLP gRPC exporter]. +<5> Hostname and port of the APM Server endpoint. For example, `elastic-apm-server:8200`. +<6> Credential for Elastic APM {apm-server-ref-v}/secret-token.html[secret token authorization] (`Authorization: "Bearer a_secret_token"`) or {apm-server-ref-v}/api-key.html[API key authorization] (`Authorization: "ApiKey an_api_key"`). +<7> Environment-specific configuration parameters can be conveniently passed in as environment variables documented https://opentelemetry.io/docs/collector/configuration/#configuration-environment-variables[here] (e.g. `ELASTIC_APM_SERVER_ENDPOINT` and `ELASTIC_APM_SERVER_TOKEN`). -You can also build the collector-contrib repository by cloning it and running: +TIP: When collecting infrastructure metrics, we recommend evaluating {metricbeat-ref}/metricbeat-overview.html[{metricbeat}] to get a mature collector with more integrations +and built-in dashboards. -[source,bash] +You're now ready to export traces and metrics from your services and applications. + +[[open-telemetry-elastic-metrics]] +==== Collect metrics + +IMPORTANT: When collecting metrics, please note that the https://www.javadoc.io/doc/io.opentelemetry/opentelemetry-api/latest/io/opentelemetry/api/metrics/DoubleValueRecorder.html[`DoubleValueRecorder`] +and https://www.javadoc.io/doc/io.opentelemetry/opentelemetry-api/latest/io/opentelemetry/api/metrics/LongValueObserver.html[`LongValueRecorder`] metrics are not yet supported. + +Here's an example of how to capture business metrics from a Java application. + +[source,java] ---- -make otelcontribcol +// initialize metric +Meter meter = GlobalMetricsProvider.getMeter("my-frontend"); +DoubleCounter orderValueCounter = meter.doubleCounterBuilder("order_value").build(); + +public void createOrder(HttpServletRequest request) { + + // create order in the database + ... + // increment business metrics for monitoring + orderValueCounter.add(orderPrice); +} ---- -[[open-telemetry-elastic-configure]] -==== Configure the collector +See the https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/api.md[Open Telemetry Metrics API] +for more information. + +[[open-telemetry-elastic-verify]] +===== Verify OpenTelemetry metrics data + +Use *Discover* to validate that metrics are successfully reported to {kib}. + +. Launch {kib}: ++ +-- +include::../tab-widgets/open-kibana-widget.asciidoc[] +-- + +. Open the main menu, then click *Discover*. +. Select `apm-*` as your index pattern. +. Filter the data to only show documents with metrics: `processor.name :"metric"` +. Narrow your search with a known OpenTelemetry field. For example, if you have an `order_value` field, add `order_value: *` to your search to return +only OpenTelemetry metrics documents. -Create a `yaml` configuration file. +[[open-telemetry-elastic-kibana]] +===== Visualize in {kib} -At a minimum, you must define the URL of the APM Server instance you are sending data to. -For example: +TSVB within {kib} is the recommended visualization for OpenTelemetry metrics. TSVB is a time series data visualizer that allows you to use the +{es} aggregation framework's full power. With TSVB, you can combine an infinite number of aggregations to display complex data. -[source,yml] +In this example eCommerce OpenTelemetry dashboard, there are four visualizations: sales, order count, product cache, and system load. The dashboard provides us with business +KPI metrics, along with performance-related metrics. + +[role="screenshot"] +image::images/ecommerce-dashboard.png[OpenTelemetry visualizations] + +Let's look at how this dashboard was created, specifically the Sales USD and System load visualizations. + +. Open the main menu, then click *Dashboard*. +. Click *Create dashboard*. +. Click *Save*, enter the name of your dashboard, and then click *Save* again. +. Let’s add a Sales USD visualization. Click *Edit*. +. Click *Create new* and then select *TSVB*. +. For the label name, enter Sales USD, and then select the following: ++ +* Aggregation: `Positive Rate`. +* Field: `order_sum`. +* Scale: `auto`. +* Group by: `Everything` +. Click *Save*, enter Sales USD as the visualization name, and then click *Save and return*. +. Now let's create a visualization of load averages on the system. Click *Create new*. +. Select *TSVB*. +. Select the following: ++ +* Aggregation: `Average`. +* Field: `system.cpu.load_average.1m`. +* Group by: `Terms`. +* By: `host.ip`. +* Top: `10`. +* Order by: `Doc Count (default)`. +* Direction: `Descending`. +. Click *Save*, enter System load per host IP as the visualization name, and then click *Save and return*. ++ +Both visualizations are now displayed on your custom dashboard. + +IMPORTANT: By default, Discover shows data for the last 15 minutes. If you have a time-based index +and no data displays, you might need to increase the time range. + +[[open-telemetry-aws-lambda-elastic]] +==== AWS Lambda Support + +AWS Lambda functions can be instrumented with OpenTelemetry and monitored with Elastic Observability. + +To get started, follow the official AWS Distro for OpenTelemetry Lambda https://aws-otel.github.io/docs/getting-started/lambda[getting started documentation] and configure the OpenTelemetry Collector to output traces and metrics to your Elastic cluster. + +[[open-telemetry-aws-lambda-elastic-java]] +===== Instrumenting AWS Lambda Java functions + +NOTE: For a better startup time, we recommend using SDK-based instrumentation, i.e. manual instrumentation of the code, rather than auto instrumentation. + +To instrument AWS Lambda Java functions, follow the official https://aws-otel.github.io/docs/getting-started/lambda/lambda-java[AWS Distro for OpenTelemetry Lambda Support For Java]. + +Noteworthy configuration elements: + +* AWS Lambda Java functions should extend `com.amazonaws.services.lambda.runtime.RequestHandler`, ++ +[source,java] ---- -exporters: - elastic: - apm_server_url: "https://elasticapm.example.com" +public class ExampleRequestHandler implements RequestHandler { + public APIGatewayProxyResponseEvent handleRequest(APIGatewayProxyRequestEvent event, Context context) { + // add your code ... + } +} ---- -See the <> for additional configuration options, -like specifying an API key, secret token, or TLS settings. +* When using SDK-based instrumentation, frameworks you want to gain visibility of should be manually instrumented +** The below example instruments https://square.github.io/okhttp/4.x/okhttp/okhttp3/-ok-http-client/[OkHttpClient] with the OpenTelemetry instrument https://search.maven.org/artifact/io.opentelemetry.instrumentation/opentelemetry-okhttp-3.0/1.3.1-alpha/jar[io.opentelemetry.instrumentation:opentelemetry-okhttp-3.0:1.3.1-alpha] ++ +[source,java] +---- +import io.opentelemetry.instrumentation.okhttp.v3_0.OkHttpTracing; -The Elastic exporter must also be defined in `service.pipelines.traces.exporters`. -For example: +OkHttpClient httpClient = new OkHttpClient.Builder() + .addInterceptor(OkHttpTracing.create(GlobalOpenTelemetry.get()).newInterceptor()) + .build(); +---- -[source,yml] +* The configuration of the OpenTelemetry Collector, with the definition of the Elastic Observability endpoint, can be added to the root directory of the Lambda binaries (e.g. defined in `src/main/resources/opentelemetry-collector.yaml`) ++ +[source,yaml] ---- +# Copy opentelemetry-collector.yaml in the root directory of the lambda function +# Set an environment variable 'OPENTELEMETRY_COLLECTOR_CONFIG_FILE' to '/var/task/opentelemetry-collector.yaml' +receivers: + otlp: + protocols: + http: + grpc: + +exporters: + logging: + loglevel: debug + otlp/elastic: + # Elastic APM server https endpoint without the "https://" prefix + endpoint: "${ELASTIC_OTLP_ENDPOINT}" <1> + headers: + # Elastic APM Server secret token + Authorization: "Bearer ${ELASTIC_OTLP_TOKEN}" <1> + service: pipelines: traces: - exporters: [elastic] + receivers: [otlp] + exporters: [logging, otlp/elastic] + metrics: + receivers: [otlp] + exporters: [logging, otlp/elastic] ---- +<1> Environment-specific configuration parameters can be conveniently passed in as environment variables: `ELASTIC_OTLP_ENDPOINT` and `ELASTIC_OTLP_TOKEN` + +* Configure the AWS Lambda Java function with: +** https://docs.aws.amazon.com/lambda/latest/dg/API_Layer.html[Function +layer]: The latest https://aws-otel.github.io/docs/getting-started/lambda/lambda-java[AWS +Lambda layer for OpenTelemetry] (e.g. `arn:aws:lambda:eu-west-1:901920570463:layer:aws-otel-java-wrapper-ver-1-2-0:1`) +** https://docs.aws.amazon.com/lambda/latest/dg/API_TracingConfig.html[TracingConfig / Mode] set to `PassTrough` +** https://docs.aws.amazon.com/lambda/latest/dg/API_FunctionConfiguration.html[FunctionConfiguration / Timeout] set to more than 10 seconds to support the longer cold start inherent to the Lambda Java Runtime +** Export the environment variables: +*** `AWS_LAMBDA_EXEC_WRAPPER="/opt/otel-proxy-handler"` for wrapping handlers proxied through the API Gateway (see https://aws-otel.github.io/docs/getting-started/lambda/lambda-java#enable-auto-instrumentation-for-your-lambda-function[here]) +*** `OTEL_PROPAGATORS="tracecontext, baggage"` to override the default setting that also enables X-Ray headers causing interferences between OpenTelemetry and X-Ray +*** `OPENTELEMETRY_COLLECTOR_CONFIG_FILE="/var/task/opentelemetry-collector.yaml"` to specify the path to your OpenTelemetry Collector configuration + +[[open-telemetry-aws-lambda-elastic-java-terraform]] +===== Instrumenting AWS Lambda Java functions with Terraform + +We recommend using an infrastructure as code solution like Terraform or Ansible to manage the configuration of your AWS Lambda functions. -If we put everything together, here's an example configuration file that accepts input from an OpenTelemetry Agent, -processes the data, and sends it to an {ess} instance. +Here is an example of AWS Lambda Java function managed with Terraform and the https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_function[AWS Provider / Lambda Functions]: -[source,yml] +* Sample Terraform code: https://github.com/cyrille-leclerc/my-serverless-shopping-cart/tree/main/checkout-function/deploy +* Note that the Terraform code to manage the HTTP API Gateway (https://github.com/cyrille-leclerc/my-serverless-shopping-cart/tree/main/utils/terraform/api-gateway-proxy[here]) is copied from the official OpenTelemetry Lambda sample https://github.com/open-telemetry/opentelemetry-lambda/tree/e72467a085a2a6e57af133032f85ac5b8bbbb8d1/utils[here] + +[[open-telemetry-aws-lambda-elastic-nodejs]] +===== Instrumenting AWS Lambda Node.js functions + +NOTE: For a better startup time, we recommend using SDK-based instrumentation for manual instrumentation of the code rather than auto instrumentation. + +To instrument AWS Lambda Node.js functions, see https://aws-otel.github.io/docs/getting-started/lambda/lambda-js[AWS Distro for OpenTelemetry Lambda Support For JS]. + +The configuration of the OpenTelemetry Collector, with the definition of the Elastic Observability endpoint, can be added to the root directory of the Lambda binaries: `src/main/resources/opentelemetry-collector.yaml`. + +[source,yaml] ---- +# Copy opentelemetry-collector.yaml in the root directory of the lambda function +# Set an environment variable 'OPENTELEMETRY_COLLECTOR_CONFIG_FILE' to '/var/task/opentelemetry-collector.yaml' receivers: otlp: - endpoint: localhost:55680 -processors: - batch: - timeout: 1s - send_batch_size: 1024 + protocols: + http: + grpc: + exporters: - elastic: - apm_server_url: "https://elasticapm.example.com" - secret_token: "ESS_TOKEN" + logging: + loglevel: debug + otlp/elastic: + # Elastic APM server https endpoint without the "https://" prefix + endpoint: "${ELASTIC_OTLP_ENDPOINT}" <1> + headers: + # Elastic APM Server secret token + Authorization: "Bearer ${ELASTIC_OTLP_TOKEN}" <1> + service: pipelines: traces: receivers: [otlp] - processors: [batch] - exporters: [elastic] + exporters: [logging, otlp/elastic] + metrics: + receivers: [otlp] + exporters: [logging, otlp/elastic] ---- +<1> Environment-specific configuration parameters can be conveniently passed in as environment variables: `ELASTIC_OTLP_ENDPOINT` and `ELASTIC_OTLP_TOKEN` + +Configure the AWS Lambda Node.js function: + +* https://docs.aws.amazon.com/lambda/latest/dg/API_Layer.html[Function +layer]: The latest https://aws-otel.github.io/docs/getting-started/lambda/lambda-js[AWS +Lambda layer for OpenTelemetry]. For example, `arn:aws:lambda:eu-west-1:901920570463:layer:aws-otel-nodejs-ver-0-23-0:1`) +* https://docs.aws.amazon.com/lambda/latest/dg/API_TracingConfig.html[TracingConfig / Mode] set to `PassTrough` +* https://docs.aws.amazon.com/lambda/latest/dg/API_FunctionConfiguration.html[FunctionConfiguration / Timeout] set to more than 10 seconds to support the cold start of the Lambda JS Runtime +* Export the environment variables: +** `AWS_LAMBDA_EXEC_WRAPPER="/opt/otel-handler"` for wrapping handlers proxied through the API Gateway. See https://aws-otel.github.io/docs/getting-started/lambda/lambda-js#enable-auto-instrumentation-for-your-lambda-function[enable auto instrumentation for your lambda-function]. +** `OTEL_PROPAGATORS="tracecontext"` to override the default setting that also enables X-Ray headers causing interferences between OpenTelemetry and X-Ray +** `OPENTELEMETRY_COLLECTOR_CONFIG_FILE="/var/task/opentelemetry-collector.yaml"` to specify the path to your OpenTelemetry Collector configuration +** `OTEL_EXPORTER_OTLP_ENDPOINT="http://localhost:55681/v1/traces"` this environment variable is required to be set until https://github.com/open-telemetry/opentelemetry-js/pull/2331[PR #2331] is merged and released. +** `OTEL_TRACES_SAMPLER="AlwaysOn"` define the required sampler strategy if it is not sent from the caller. Note that `Always_on` can potentially create a very large amount of data, so in production set the correct sampling configuration, as per the https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk.md#sampling[specification]. + + +[[open-telemetry-aws-lambda-elastic-nodejs-terraform]] +===== Instrumenting AWS Lambda Node.js functions with Terraform + +To manage the configuration of your AWS Lambda functions, we recommend using an infrastructure as code solution like Terraform or Ansible. + +Here is an example of AWS Lambda Node.js function managed with Terraform and the https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_function[AWS Provider / Lambda Functions]: -NOTE: For more information about getting started with an OpenTelemetry Collector, -see the {ot-collector}[OpenTelemetry collector] docs. +* https://github.com/michaelhyatt/terraform-aws-nodejs-api-worker-otel/tree/v0.23[Sample Terraform code] -[[open-telemetry-elastic-config-ref]] -=== Elastic exporter configuration reference +[[elastic-open-telemetry-known-limitations]] +==== Limitations -[[open-telemetry-config-url]] -==== `apm_server_url` -Elastic APM Server URL. (required) -[[open-telemetry-config-api-key]] -==== `api_key` -Credential for {apm-server-ref-v}/api-key.html[API key authorization]. -Must also be enabled in Elastic APM Server. (optional) +[[elastic-open-telemetry-traces-limitations]] +===== OpenTelemetry traces -[[open-telemetry-config-secret-token]] -==== `secret_token` -Credential for {apm-server-ref-v}/secret-token.html[secret token authorization]. -Must also be enabled in Elastic APM Server. (optional) +* Traces of applications using `messaging` semantics might be wrongly displayed or not shown in the APM UI. You may only see `spans` coming from such services, but no `transaction` https://github.com/elastic/apm-server/issues/5094[#5094] +* Inability to see Stack traces in spans +* Inability in APM views to view the "Time Spent by Span Type" https://github.com/elastic/apm-server/issues/5747[#5747] +* Metrics derived from traces (throughput, latency, and errors) are not accurate when traces are sampled before being ingested by Elastic Observability (ie by an OpenTelemetry Collector or OpenTelemetry APM agent or SDK) https://github.com/elastic/apm/issues/472[#472] -[[open-telemetry-config-ca-file]] -==== `ca_file` -Root Certificate Authority (CA) certificate, for verifying the server's identity if TLS is enabled. (optional) +[[elastic-open-telemetry-metrics-limitations]] +===== OpenTelemetry metrics -[[open-telemetry-config-cert-file]] -==== `cert_file` -Client TLS certificate. (optional) +* Inability to see host metrics in Elastic Metrics Infrastructure view when using the OpenTelemetry Collector host metrics receiver https://github.com/elastic/apm-server/issues/5310[#5310] -[[open-telemetry-config-key-file]] -==== `key_file` -Client TLS key. (optional) +[[elastic-open-telemetry-logs-limitations]] +===== OpenTelemetry logs -[[open-telemetry-config-insecure]] -==== `insecure` -Disable verification of the server's identity if TLS is enabled. (optional) +* OpenTelemetry logs are not yet supported https://github.com/elastic/apm-server/issues/5491[#5491] diff --git a/docs/guide/opentracing.asciidoc b/docs/guide/opentracing.asciidoc index 92e44176043..19d71199e64 100644 --- a/docs/guide/opentracing.asciidoc +++ b/docs/guide/opentracing.asciidoc @@ -1,5 +1,5 @@ [[opentracing]] -== OpenTracing bridge +=== OpenTracing bridge Most Elastic APM agents have https://opentracing.io/[OpenTracing] compatible bridges. @@ -7,13 +7,15 @@ The OpenTracing bridge allows you to create Elastic APM <What is APM? +++++ Elastic APM is an application performance monitoring system built on the Elastic Stack. -It allows you to monitor software services and applications in real time -- -collect detailed performance information on response time for incoming requests, +It allows you to monitor software services and applications in real-time, by +collecting detailed performance information on response time for incoming requests, database queries, calls to caches, external HTTP requests, and more. This makes it easy to pinpoint and fix performance problems quickly. @@ -11,19 +15,14 @@ Elastic APM also automatically collects unhandled errors and exceptions. Errors are grouped based primarily on the stacktrace, so you can identify new errors as they appear and keep an eye on how many times specific errors happen. -Metrics are another important source of information when debugging production systems. -Elastic APM agents automatically pick up basic host-level metrics and agent specific metrics, +Metrics are another vital source of information when debugging production systems. +Elastic APM agents automatically pick up basic host-level metrics and agent-specific metrics, like JVM metrics in the Java Agent, and Go runtime metrics in the Go Agent. -Ready to learn more about the Elastic APM ecosystem? Check out these helpful topics: +[float] +=== Give Elastic APM a try -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> +Learn more about the <> that make up Elastic APM, +or jump right into the <>. -NOTE: This guide will indiscriminately use the word "service" for both services and applications. +NOTE: These docs will indiscriminately use the word "service" for both services and applications. diff --git a/docs/guide/quick-start-overview.asciidoc b/docs/guide/quick-start-overview.asciidoc new file mode 100644 index 00000000000..0d2d31d20d3 --- /dev/null +++ b/docs/guide/quick-start-overview.asciidoc @@ -0,0 +1,57 @@ + +[[quick-start-overview]] +=== Quick start development environment + +// This tagged region is reused in the Observability docs. +// tag::dev-environment[] +ifeval::["{release-state}"=="unreleased"] + +Version {version} of APM Server has not yet been released. + +endif::[] + +ifeval::["{release-state}"!="unreleased"] + +If you're just looking for a quick way to try out Elastic APM, you can easily get started with Docker. +Just follow the steps below. + +**Create a docker-compose.yml file** + +The https://www.docker.elastic.co/[Elastic Docker registry] contains Docker images for all of the products +in the {stack}. +You can use Docker compose to easily get the default distributions of {es}, {kib}, +and APM Server up and running in Docker. + +Create a `docker-compose.yml` file and copy and paste in the following: + +["source","yaml",subs="attributes"] +-------------------------------------------- +include::./docker-compose.yml[] +-------------------------------------------- + +**Compose** + +Run `docker-compose up`. +Compose will download the official docker containers and start {es}, {kib}, and APM Server. + +**Install Agents** + +When Compose finishes, navigate to http://localhost:5601/app/kibana#/home/tutorial/apm. +Complete steps 4-6 to configure your application to collect and report APM data. + +**Visualize** + +Use the APM app at http://localhost:5601/app/apm to visualize your application performance data! + +When you're done, `ctrl+c` will stop all of the containers. + +**Advanced Docker usage** + +If you're interested in learning more about all of the APM features available, +or running the Elastic stack on Docker in a production environment, see the following documentation: + +* {apm-server-ref-v}/running-on-docker.html[Running APM Server on Docker] +* {stack-gs}/get-started-docker.html[Running the Elastic Stack on Docker] + +endif::[] +// end::dev-environment[] diff --git a/docs/guide/redirects.asciidoc b/docs/guide/redirects.asciidoc index 9aef62f4567..bfe81b41155 100644 --- a/docs/guide/redirects.asciidoc +++ b/docs/guide/redirects.asciidoc @@ -37,3 +37,17 @@ This page has moved. Please see <>. === RUM Agent Compatibility This page has moved. Please see <>. + +[role="exclude",id="apm-release-notes"] +=== APM release highlights + +This page has moved. +Please see {observability-guide}/whats-new.html[What's new in Observability {minor-version}]. + +Please see <>. + +[role="exclude",id="whats-new"] +=== What's new in APM {minor-version} + +This page has moved. +Please see {observability-guide}/whats-new.html[What's new in Observability {minor-version}]. diff --git a/docs/guide/rum.asciidoc b/docs/guide/rum.asciidoc index e38b519168d..e7e38bcbd3e 100644 --- a/docs/guide/rum.asciidoc +++ b/docs/guide/rum.asciidoc @@ -1,5 +1,5 @@ [[rum]] -== Real User Monitoring (RUM) +=== Real User Monitoring (RUM) Real User Monitoring captures user interaction with clients such as web browsers. The {apm-rum-ref-v}[JavaScript Agent] is Elastic’s RUM Agent. To use it you need to {apm-server-ref-v}/configuration-rum.html[enable RUM support] in the APM Server. diff --git a/docs/guide/trace-sampling.asciidoc b/docs/guide/trace-sampling.asciidoc new file mode 100644 index 00000000000..7d640eb1a7c --- /dev/null +++ b/docs/guide/trace-sampling.asciidoc @@ -0,0 +1,108 @@ +[[trace-sampling]] +=== Transaction sampling + +Elastic APM supports head-based, probability sampling. +_Head-based_ means the sampling decision for each trace is made when that trace is initiated. +_Probability sampling_ means that each trace has a defined and equal probability of being sampled. + +For example, a sampling value of `.2` indicates a transaction sample rate of `20%`. +This means that only `20%` of traces will send and retain all of their associated information. +The remaining traces will drop contextual information to reduce the transfer and storage size of the trace. + +[float] +==== Why sample? + +Distributed tracing can generate a substantial amount of data, +and storage can be a concern for users running `100%` sampling -- especially as they scale. + +The goal of probability sampling is to provide you with a representative set of data that allows +you to make statistical inferences about the entire group of data. +In other words, in most cases, you can still find anomalous patterns in your applications, detect outages, track errors, +and lower MTTR, even when sampling at less than `100%`. + +[float] +==== What data is sampled? + +A sampled trace retains all data associated with it. + +Non-sampled traces drop <> data. +Spans contain more granular information about what is happening within a transaction, +like external requests or database calls. +Spans also contain contextual information and labels. + +Regardless of the sampling decision, all traces retain transaction and error data. +This means the following data will always accurately reflect *all* of your application's requests, regardless of the configured sampling rate: + +* Transaction duration and transactions per minute +* Transaction breakdown metrics +* Errors, error occurrence, and error rate + +// To turn off the sending of all data, including transaction and error data, set `active` to `false`. + +[float] +==== Sample rates + +What's the best sampling rate? Unfortunately, there isn't one. +Sampling is dependent on your data, the throughput of your application, data retainment policies, and other factors. +A sampling rate from `.1%` to `100%` would all be considered normal. +You may even decide to have a unique sample rate per service -- for example, if a certain service +experiences considerably more or less traffic than another. + +// Regardless, cost conscious customers are likely to be fine with a lower sample rate. + +[float] +==== Sampling with distributed tracing + +The initiating service makes the sampling decision in a distributed trace, +and all downstream services respect that decision. + +In each example below, `Service A` initiates four transactions. +In the first example, `Service A` samples at `.5` (`50%`). In the second, `Service A` samples at `1` (`100%`). +Each subsequent service respects the initial sampling decision, regardless of their configured sample rate. +The result is a sampling percentage that matches the initiating service: + +image::images/dt-sampling-example.png[How sampling impacts distributed tracing] + +[float] +==== APM app implications + +Because the transaction sample rate is respected by downstream services, +the APM app always knows which transactions have and haven't been sampled. +This prevents the app from showing broken traces. +In addition, because transaction and error data is never sampled, +you can always expect metrics and errors to be accurately reflected in the APM app. + +*Service maps* + +Service maps rely on distributed traces to draw connections between services. +A minimum required version of APM agents is required for Service maps to work. +See {kibana-ref}/service-maps.html[Service maps] for more information. + +// Follow-up: Add link from https://www.elastic.co/guide/en/kibana/current/service-maps.html#service-maps-how +// to this page. + +[float] +==== Adjust the sample rate + +There are three ways to adjust the transaction sample rate of your APM agents: + +Dynamic:: +The transaction sample rate can be changed dynamically (no redeployment necessary) on a per-service and per-environment +basis with {kibana-ref}/agent-configuration.html[APM Agent Configuration] in Kibana. + +Kibana API:: +APM Agent configuration exposes an API that can be used to programmatically change +your agents' sampling rate. +An example is provided in the {kibana-ref}/agent-config-api.html[Agent configuration API reference]. + +Configuration:: +Each agent provides a configuration value used to set the transaction sample rate. +See the relevant agent's documentation for more details: + +* Go: {apm-go-ref-v}/configuration.html#config-transaction-sample-rate[`ELASTIC_APM_TRANSACTION_SAMPLE_RATE`] +* Java: {apm-java-ref-v}/config-core.html#config-transaction-sample-rate[`transaction_sample_rate`] +* .NET: {apm-dotnet-ref-v}/config-core.html#config-transaction-sample-rate[`TransactionSampleRate`] +* Node.js: {apm-node-ref-v}/configuration.html#transaction-sample-rate[`transactionSampleRate`] +* PHP: {apm-php-ref-v}/configuration-reference.html#config-transaction-sample-rate[`transaction_sample_rate`] +* Python: {apm-py-ref-v}/configuration.html#config-transaction-sample-rate[`transaction_sample_rate`] +* Ruby: {apm-ruby-ref-v}/configuration.html#config-transaction-sample-rate[`transaction_sample_rate`] \ No newline at end of file diff --git a/docs/guide/troubleshooting.asciidoc b/docs/guide/troubleshooting.asciidoc index a97ce09ddc3..43cfa8b9cf7 100644 --- a/docs/guide/troubleshooting.asciidoc +++ b/docs/guide/troubleshooting.asciidoc @@ -6,13 +6,16 @@ If you run into trouble, there are three places you can look for help. [float] === Troubleshooting documentation -The APM Server and each APM agent has a troubleshooting guide: +The APM Server, APM app, and each APM agent has a troubleshooting guide: * {apm-server-ref-v}/troubleshooting.html[APM Server troubleshooting] +* {kibana-ref}/troubleshooting.html[APM app troubleshooting] * {apm-dotnet-ref-v}/troubleshooting.html[.NET agent troubleshooting] * {apm-go-ref-v}/troubleshooting.html[Go agent troubleshooting] +* {apm-ios-ref-v}/troubleshooting.html[iOS agent troubleshooting] * {apm-java-ref-v}/trouble-shooting.html[Java agent troubleshooting] * {apm-node-ref-v}/troubleshooting.html[Node.js agent troubleshooting] +* {apm-php-ref-v}/troubleshooting.html[PHP agent troubleshooting] * {apm-py-ref-v}/troubleshooting.html[Python agent troubleshooting] * {apm-ruby-ref-v}/debugging.html[Ruby agent troubleshooting] * {apm-rum-ref-v}/troubleshooting.html[RUM troubleshooting] @@ -21,7 +24,7 @@ The APM Server and each APM agent has a troubleshooting guide: === Elastic Support We offer a support experience unlike any other. -Our team of professionals 'speak human and code', and love open source and making your day. +Our team of professionals 'speak human and code' and love making your day. https://www.elastic.co/subscriptions[Learn more about subscriptions]. [float] diff --git a/docs/ilm-reference.asciidoc b/docs/ilm-reference.asciidoc index 8c543bfb6c2..94097f1174f 100644 --- a/docs/ilm-reference.asciidoc +++ b/docs/ilm-reference.asciidoc @@ -2,6 +2,8 @@ [role="xpack"] == Configure Index lifecycle management (ILM) +deprecated::[7.16.0,Users should now use the <>. See <>] + ++++ Index lifecycle management ++++ diff --git a/docs/ilm.asciidoc b/docs/ilm.asciidoc index a5c090c134b..1a2b2eb7868 100644 --- a/docs/ilm.asciidoc +++ b/docs/ilm.asciidoc @@ -2,6 +2,8 @@ [role="xpack"] == Custom index lifecycle management with APM Server +deprecated::[7.16.0,Users should now use the <>. See <>] + ++++ Customize index lifecycle management ++++ diff --git a/docs/images/api-key-copy.png b/docs/images/api-key-copy.png new file mode 100644 index 00000000000..d47fc7cd2de Binary files /dev/null and b/docs/images/api-key-copy.png differ diff --git a/docs/images/api-key-create.png b/docs/images/api-key-create.png new file mode 100644 index 00000000000..1aee7e75baf Binary files /dev/null and b/docs/images/api-key-create.png differ diff --git a/docs/images/ingest-flow.png b/docs/images/ingest-flow.png new file mode 100644 index 00000000000..96476494b06 Binary files /dev/null and b/docs/images/ingest-flow.png differ diff --git a/docs/images/server-api-key-create.png b/docs/images/server-api-key-create.png new file mode 100644 index 00000000000..d21c440b19a Binary files /dev/null and b/docs/images/server-api-key-create.png differ diff --git a/docs/index.asciidoc b/docs/index.asciidoc index 5b07996d4b2..57220d80140 100644 --- a/docs/index.asciidoc +++ b/docs/index.asciidoc @@ -1,6 +1,7 @@ include::./version.asciidoc[] include::{asciidoc-dir}/../../shared/attributes.asciidoc[] +:apm-package-dir: {docdir}/apm-package :libbeat-dir: {docdir}/copied-from-beats/docs :libbeat-outputs-dir: {docdir}/copied-from-beats/outputs :version: {apm_server_version} @@ -36,6 +37,11 @@ include::{asciidoc-dir}/../../shared/attributes.asciidoc[] :win_os: :linux_os: +:github_repo_link: https://github.com/elastic/apm-server/blob/v{version} +ifeval::["{version}" == "8.0.0"] +:github_repo_link: https://github.com/elastic/apm-server/blob/master +endif::[] + :downloads: https://artifacts.elastic.co/downloads/apm-server ifdef::env-github[] @@ -74,4 +80,6 @@ include::./upgrading.asciidoc[] include::./release-notes.asciidoc[] +include::{apm-package-dir}/apm-integration.asciidoc[] + include::./redirects.asciidoc[] diff --git a/docs/jaeger-reference.asciidoc b/docs/jaeger-reference.asciidoc index ecf5950f5ae..05445a4d04b 100644 --- a/docs/jaeger-reference.asciidoc +++ b/docs/jaeger-reference.asciidoc @@ -7,12 +7,10 @@ // this content is reused in the how-to guides // tag::jaeger-intro[] -experimental::["This feature is experimental and may be changed in a future release. It is not yet available on Elastic Cloud. For feature status on Elastic Cloud, see https://github.com/elastic/apm/issues/212[#212]."] - Elastic APM integrates with https://www.jaegertracing.io/[Jaeger], an open-source, distributed tracing system. This integration allows users with an existing Jaeger setup to switch from the default Jaeger backend, -to ingesting data with Elastic's APM Server, storing data in {es}, and visualizing traces in the APM app. -Best of all, this can be done without any instrumentation changes in your application code. +to the Elastic Stack -- transform data with APM Server, store data in {es}, and visualize traces in the Kibana APM app. +Best of all, no instrumentation changes are needed in your application code. // end::jaeger-intro[] Ready to get started? See the <> guide. @@ -22,24 +20,50 @@ Ready to get started? See the <> guide. === Supported architecture Jaeger architecture supports different data formats and transport protocols -that define how data can be sent to a collector. Elastic APM, as a Jaeger Collector, supports: +that define how data can be sent to a collector. Elastic APM, as a Jaeger collector, +supports communication with *Jaeger agents* via gRPC. -* Communication with *Jaeger Agents* via gRPC -+ -The gRPC endpoint supports TLS. If the Jaeger gRPC collector service is enabled, -and `apm-server.ssl` is configured, SSL settings will automatically be applied to APM Server's Jaeger gRPC endpoint. -+ -The gRPC endpoint supports probabilistic sampling. -APM Server automatically enables the sampling endpoint when `grpc.enabled` is set to `true`. -Sampling decisions can be configured <>, with APM Agent configuration, or <>, in each Jaeger client. +* APM Server serves Jaeger gRPC over the same <> as the Elastic APM agent protocol. -* Communication with *Jaeger Clients* via thrift over HTTP -+ -The Client HTTP endpoint does not support TLS or sampling. +* The APM Server gRPC endpoint supports TLS. If `apm-server.ssl` is configured, +SSL settings will automatically be applied to APM Server's Jaeger gRPC endpoint. -TIP: See the https://www.jaegertracing.io/docs/1.14/architecture[Jaeger docs] +* The gRPC endpoint supports probabilistic sampling. +Sampling decisions can be configured <> with APM Agent central configuration, or <> in each Jaeger client. + +See the https://www.jaegertracing.io/docs/1.22/architecture[Jaeger docs] for more information on Jaeger architecture. +[%collapsible] +.Communication with *Jaeger Clients* via thrift over HTTP (deprecated) +==== +**** + +deprecated::[7.13.0, To be removed in 8.0.0] + +If you've configured your Jaeger clients to send spans directly to collectors (bypassing Jaeger agents), +enable the APM Server HTTP endpoint by setting `apm-server.jaeger.http.enabled` to `true`. +Use `apm-server.jaeger.http.host` to change the default port. + +* The Client HTTP endpoint does not support TLS or sampling, +and is not supported by our hosted {ess} on {ecloud}. + +* If you're using an officially supported Jaeger Client library and want to connect directly to APM Server, +you need to update the `JAEGER_ENDPOINT` configuration property. +This is the HTTP endpoint the Client will send spans to. +The `host:port` set here should correspond to the value set in `apm-server.jaeger.http.host`. +See the relevant supported Jaeger library for more information. ++ +** https://github.com/jaegertracing/jaeger-client-go[Go] +** https://github.com/jaegertracing/jaeger-client-java[Java] +** https://github.com/jaegertracing/jaeger-client-node[Node.js] +** https://github.com/jaegertracing/jaeger-client-python[Python] +** https://github.com/jaegertracing/jaeger-client-cpp[C++] +** https://github.com/jaegertracing/jaeger-client-csharp[C#] + +**** +==== + [float] [[jaeger-caveats]] === Caveats @@ -49,14 +73,13 @@ There are some limitations and differences between Elastic APM and Jaeger that y *Jaeger integration limitations:* * Because Jaeger has its own trace context header, and does not currently support W3C trace context headers, -it is not possible to mix and match the use of Elastic's APM Agents and Jaeger's Clients. +it is not possible to mix and match the use of Elastic's APM agents and Jaeger's clients. * Elastic APM only supports probabilistic sampling. -* We currently only support exception logging. Span logs are not supported. *Differences between APM Agents and Jaeger Clients:* -* Jaeger Clients only sends trace data. -APM Agents support a larger number of features, like +* Jaeger clients only sends trace data. +APM agents support a larger number of features, like multiple types of metrics, and application breakdown charts. When using Jaeger, features like this will not be available in the APM app. * Elastic APM's {apm-overview-ref-v}/apm-data-model.html[data model] is different than Jaegers. @@ -69,6 +92,8 @@ https://github.com/opentracing/specification/blob/master/semantic_conventions.md [[jaeger-configuration]] === Configuration options +deprecated::[7.13.0, APM Server serves Jaeger gRPC over the same port as the Elastic APM agent protocol. The following configuration options have been deprecated, and will be removed in 8.0.0] + You can specify the following options in the `apm-server.jaeger.*` section of the +{beatname_lc}.yml+ configuration file. diff --git a/docs/jaeger-support.asciidoc b/docs/jaeger-support.asciidoc index 9381c68c8d8..f7b89d300e2 100644 --- a/docs/jaeger-support.asciidoc +++ b/docs/jaeger-support.asciidoc @@ -11,132 +11,57 @@ include::./jaeger-reference.asciidoc[tag=jaeger-intro] [[jaeger-get-started]] ==== Get started -Connecting your preexisting Jaeger setup to Elastic APM is easy! -Configure APM Server to receive Jaeger data, -set up sampling, and configure your Jaeger Agents or Jaeger Clients to start sending spans to APM Server. +Connect your preexisting Jaeger setup to Elastic APM in three steps: -* <> -* <> * <> +* <> * <> -IMPORTANT: There are important <> to understand about this integration. +IMPORTANT: There are <> to this integration. [float] -[[jaeger-configure-apm-server]] -==== Configure APM Server - -. Enable the correct jaeger endpoint in the `apm-server.yml` configuration file. -+ -In a typical Jaeger deployment, Clients send spans to Agents, who forward them to Collectors. -If this matches your architecture, enable the gRPC endpoint by setting -`apm-server.jaeger.grpc.enabled` to `true`. -+ -Alternatively, if you've configured your Clients to send spans directly to Collectors (bypassing Jaeger Agents), -enable the HTTP endpoint by setting `apm-server.jaeger.http.enabled` to `true`. - -. Configure the host and port that APM Server listens on. -+ -Based on the endpoint enabled in the previous step, configure the relevant host and port: -+ -* `apm-server.jaeger.grpc.host` defaults to `localhost:14250`. -* `apm-server.jaeger.http.host` defaults to `localhost:14268`. +[[jaeger-configure-agent-client]] +==== Configure Jaeger agents + +APM Server serves Jaeger gRPC over the same <> as the Elastic APM agent protocol. + +include::./tab-widgets/jaeger-widget.asciidoc[] [float] [[jaeger-configure-sampling]] ==== Configure Sampling -The gRPC endpoint supports probabilistic sampling, which can be used to reduce the amount of data that your agents collect and send. +APM Server supports probabilistic sampling, which can be used to reduce the amount of data that your agents collect and send. Probabilistic sampling makes a random sampling decision based on the configured sampling value. For example, a value of `.2` means that 20% of traces will be sampled. -APM Server automatically enables the sampling endpoint when `grpc.enabled` is set to `true`. -There are two different ways to configure the sampling rate of your Jaeger Agents: +There are two different ways to configure the sampling rate of your Jaeger agents: -* <>, with APM Agent configuration (default). -* <>, in each Jaeger client. +* <> +* <> [float] [[jaeger-configure-sampling-central]] -===== Central sampling +===== APM Agent central configuration (default) -Central sampling, with APM Agent configuration, -requires the <> to be enabled. -This allows Jaeger clients to poll APM Server for the sampling rate. -To enable the kibana endpoint, set <> to `true`, and point <> at the Kibana host that APM Server will communicate with. +Central sampling, with APM Agent central configuration, +allows Jaeger clients to poll APM Server for the sampling rate. +This means sample rates can be configured on the fly, on a per-service and per-environment basis. -The default sampling ratio, as well as per-service sampling rates, -can then be configured via the {kibana-ref}/agent-configuration.html[Agent configuration] page in the APM app. +include::./tab-widgets/jaeger-sampling-widget.asciidoc[] [float] [[jaeger-configure-sampling-local]] -===== Local sampling +===== Local sampling in each Jaeger client If you don't have access to the APM app, -you'll need to change the Jaeger Client's `sampler.type` and `sampler.param`, -enabling you to set the sampling configuration locally in each Client. -See the official https://www.jaegertracing.io/docs/1.17/sampling/[Jaeger sampling documentation] +you'll need to change the Jaeger client's `sampler.type` and `sampler.param`. +This enables you to set the sampling configuration locally in each Jaeger client. +See the official https://www.jaegertracing.io/docs/1.22/sampling/[Jaeger sampling documentation] for more information. -[float] -[[jaeger-configure-agent-client]] -==== Configure Jaeger communication - -[float] -[[jaeger-configure-grpc]] -===== Jaeger Agent communication with APM Server (gRPC) - -As of this writing, the Jaeger Agent binary offers the `--reporter.grpc.host-port` CLI flag, -which can be used to set a static list of collectors for the Jaeger Agent to connect to. -The `host:port` set here should correspond with the value set in `apm-server.jaeger.grpc.host`. - -*Optional token-based authorization** - -A <> or <> can be used to ensure only authorized -Jaeger Agents can send data to the APM Server. -Authorization is off by default, but can be enabled by setting a value in `apm-server.jaeger.grpc.auth_tag`. -When enabled, APM Server looks for a _Process tag_ in each incoming event, -and uses it to authorize the Jaeger Agent against the configured `auth_tag` and secret token or API key. -Auth tags will be removed from events after being verified. - -Here's an example that sets the `auth_tag` and `secret_token` in APM Server: - -[source,yaml] ----- -apm-server.jaeger.grpc.enabled=true -apm-server.jaeger.grpc.auth_tag=authorization -apm-server.secret_token=qwerty1234 ----- - -To authorize Jaeger Agent communication, use the `--agent.tags` CLI flag to pass the corresponding Process tag to the APM Server: - -[source,console] ----- ---agent.tags "authorization=Bearer qwerty1234" ----- - -See the https://www.jaegertracing.io/docs/1.16/cli/[Jaeger CLI flags documentation] for more information. - -[float] -[[jaeger-configure-http]] -===== Jaeger Client communication with APM Server (HTTP) - -If you're using an officially supported Jaeger Client library and want to connect directly to APM Server, -you need to update the `JAEGER_ENDPOINT` configuration property. -This is the HTTP endpoint the Client will send spans to. -The `host:port` set here should correspond to the value set in `apm-server.jaeger.http.host`. - -See the relevant supported Jaeger library for more information. - -* https://github.com/jaegertracing/jaeger-client-go[Go] -* https://github.com/jaegertracing/jaeger-client-java[Java] -* https://github.com/jaegertracing/jaeger-client-node[Node.js] -* https://github.com/jaegertracing/jaeger-client-python[Python] -* https://github.com/jaegertracing/jaeger-client-cpp[C++] -* https://github.com/jaegertracing/jaeger-client-csharp[C#] - [float] [[jaeger-configure-start]] ==== Start sending span data -Data sent from Jaeger Agents or Clients to APM Server should now be visible in the APM app! +That's it! Data sent from Jaeger clients to the APM Server can now be viewed in the APM app. diff --git a/docs/metadata-api.asciidoc b/docs/metadata-api.asciidoc index 733ac00d21d..52ea228f095 100644 --- a/docs/metadata-api.asciidoc +++ b/docs/metadata-api.asciidoc @@ -7,14 +7,10 @@ This provides general metadata concerning the other objects in the stream. Rather than send this metadata information from the agent multiple times, the APM Server hangs on to this information and applies it to other objects in the stream as necessary. -TIP: Metadata is stored under `context` when viewing documents in Elasticsearch. +TIP: Metadata is stored under `context` when viewing documents in Elasticsearch. * <> * <> -* <> -* <> -* <> -* <> [[kubernetes-data]] [float] @@ -61,45 +57,10 @@ The table below maps these environment variables to the APM metadata event field [float] ==== Metadata Schema -The APM Server uses JSON Schema for validating requests. The specification for metadata is defined below: +APM Server uses JSON Schema to validate requests. The specification for metadata is defined on +{github_repo_link}/docs/spec/v2/metadata.json[GitHub] and included below: [source,json] ---- -include::./spec/metadata.json[] ----- - -[[metadata-service-schema]] -[float] -===== Service Schema - -[source,json] ----- -include::./spec/service.json[] ----- - -[[metadata-process-schema]] -[float] -===== Process Schema - -[source,json] ----- -include::./spec/process.json[] ----- - -[[metadata-system-schema]] -[float] -===== System Schema - -[source,json] ----- -include::./spec/system.json[] ----- - -[[metadata-user-schema]] -[float] -===== User Schema - -[source,json] ----- -include::./spec/user.json[] +include::./spec/v2/metadata.json[] ---- \ No newline at end of file diff --git a/docs/metricset-api.asciidoc b/docs/metricset-api.asciidoc index c9c7dcd63fd..40c770d5c45 100644 --- a/docs/metricset-api.asciidoc +++ b/docs/metricset-api.asciidoc @@ -1,15 +1,16 @@ [[metricset-api]] === Metrics -Metrics contain application metric data captured by an APM agent. +Metrics contain application metric data captured by an APM agent. [[metricset-schema]] [float] ==== Metric Schema -The APM Server uses JSON Schema for validating requests. The specification for metrics is defined below: +APM Server uses JSON Schema to validate requests. The specification for metrics is defined on +{github_repo_link}/docs/spec/v2/metricset.json[GitHub] and included below: [source,json] ---- -include::./spec/metricsets/metricset.json[] +include::./spec/v2/metricset.json[] ---- diff --git a/docs/metricset-indices.asciidoc b/docs/metricset-indices.asciidoc index f1efda592c5..901255b6e24 100644 --- a/docs/metricset-indices.asciidoc +++ b/docs/metricset-indices.asciidoc @@ -1,13 +1,133 @@ [[metricset-indices]] -== Example metric documents +== Metrics documents ++++ Metrics documents ++++ -This example shows what metric documents can look like when indexed in Elasticsearch: +APM Server stores application metrics sent by agents as documents in Elasticsearch. +Metric documents contain a timestamp, one or more metric fields, +and non-numerical fields describing the resource to which the metrics pertain. + +For example, the {apm-java-agent} produces {apm-java-ref-v}/metrics.html#metrics-jvm[JVM-specific metrics]. +This includes garbage collection metrics (`jvm.gc.count`, `jvm.gc.time`) which are related to a specific memory manager, +such as "G1 Young Generation", identified by the field `labels.name`. +See <> for an example document containing these metrics. + +Metric documents can be identified by searching for `processor.event: metric`. + +[float] +[[internal-metrics]] +=== APM-defined metrics + +The APM Agents and APM Server also calculate metrics from trace events, used to power various features of Elastic APM. +These metrics are described below. + +[float] +[[breakdown-metrics-fields]] +==== Breakdown metrics + +To power the {apm-app-ref}/transactions.html[Time spent by span type] graph, +agents collect summarized metrics about the timings of spans and transactions, +broken down by span type. + +*`transaction.breakdown.count`*:: ++ +-- +The number of transactions for which breakdown metrics (`span.self_time`) have been created +in the most recent metrics reporting interval. Some agents measure the breakdown for both +sampled and non-sampled transactions, while others measure only for sampled transactions. + +These metric documents can be identified by searching for `metricset.name: transaction_breakdown`. + +You can filter and group by these dimensions: + +* `transaction.name`: The name of the transaction, for example `GET /` +* `transaction.type`: The type of the transaction, for example `request` +-- + +*`span.self_time.count`* and *`span.self_time.sum.us`*:: ++ +-- +These metrics measure the "self-time" for a span type, and optional subtype, +within a transaction group. Together these metrics can be used to calculate +the average duration and percentage of time spent on each type of operation +within a transaction group. + +These metric documents can be identified by searching for `metricset.name: span_breakdown`. + +You can filter and group by these dimensions: + +* `transaction.name`: The name of the enclosing transaction group, for example `GET /` +* `transaction.type`: The type of the enclosing transaction, for example `request` +* `span.type`: The type of the span, for example `app`, `template` or `db` +* `span.subtype`: The sub-type of the span, for example `mysql` (optional) +-- + +[float] +==== Transaction metrics + +To power {kibana-ref}/xpack-apm.html[APM app] visualizations, +APM Server aggregates transaction events into latency distribution metrics. + +*`transaction.duration.histogram`*:: ++ +-- +This metric measures the latency distribution of transaction groups, +used to power visualizations and analytics in Elastic APM. + +These metric documents can be identified by searching for `metricset.name: transaction`. + +You can filter and group by these dimensions (some of which are optional, for example `container.id`): + +* `transaction.name`: The name of the transaction, for example `GET /` +* `transaction.type`: The type of the transaction, for example `request` +* `transaction.result`: The result of the transaction, for example `HTTP 2xx` +* `transaction.root`: A boolean flag indicating whether the transaction is the root of a trace +* `event.outcome`: The outcome of the transaction, for example `success` +* `agent.name`: The name of the APM agent that instrumented the transaction, for example `java` +* `service.name`: The name of the service that served the transaction +* `service.version`: The version of the service that served the transaction +* `service.environment`: The environment of the service that served the transaction +* `host.hostname`: The hostname of the service that served the transaction +* `container.id`: The container ID of the service that served the transaction +* `kubernetes.pod.name`: The name of the Kubernetes pod running the service that served the transaction +-- + +[float] +==== Service-destination metrics + +To power {kibana-ref}/xpack-apm.html[APM app] visualizations, +APM Server aggregates span events into "service destination" metrics. + +*`span.destination.service.response_time.count`* and *`span.destination.service.response_time.sum.us`*:: ++ +-- +These metrics measure the count and total duration of requests from one service to another service. +These are used to calculate the throughput and latency of requests to backend services such as databases in +{kibana-ref}/service-maps.html[Service maps]. + +These metric documents can be identified by searching for `metricset.name: service_destination`. + +You can filter and group by these dimensions: + +* `span.destination.service.resource`: The destination service resource, for example `mysql` +* `event.outcome`: The outcome of the operation, for example `success` +* `agent.name`: The name of the APM agent that instrumented the operation, for example `java` +* `service.name`: The name of the service that made the request +* `service.environment`: The environment of the service that made the request +-- + +[float] +[[example-metric-document]] +=== Example metric document + +Below is an example of a metric document as stored in Elasticsearch, containing JVM metrics produced by the {apm-java-agent}. +The document contains two related metrics: `jvm.gc.time` and `jvm.gc.count`. These are accompanied by various fields describing +the environment in which the metrics were captured: service name, host name, Kubernetes pod UID, container ID, process ID, and more. +These fields make it possible to search and aggregate across various dimensions, such as by service, host, and Kubernetes pod. [source,json] ---- -include::./data/elasticsearch/generated/metricsets.json[] +include::./data/elasticsearch/metricset.json[] ---- diff --git a/docs/release-notes.asciidoc b/docs/release-notes.asciidoc index 082c582aecd..5dc837a9f13 100644 --- a/docs/release-notes.asciidoc +++ b/docs/release-notes.asciidoc @@ -10,6 +10,12 @@ -- This following sections summarizes the changes in each release. +* <> +* <> +* <> +* <> +* <> +* <> * <> * <> * <> diff --git a/docs/secure-communication-agents.asciidoc b/docs/secure-communication-agents.asciidoc index 852f2063d0e..5b0a5051c5a 100644 --- a/docs/secure-communication-agents.asciidoc +++ b/docs/secure-communication-agents.asciidoc @@ -1,5 +1,5 @@ [[secure-communication-agents]] -== Secure communication with APM Agents +== Secure communication with APM agents Communication between APM agents and APM Server can be both encrypted and authenticated. Encryption is achievable through <>. @@ -15,9 +15,10 @@ In addition, since both mechanisms involve sending a secret as plain text, they should be used in combination with SSL/TLS encryption. As soon as an authenticated communication is enabled, requests without a valid token or API key will be denied by APM Server. -As RUM endpoints cannot be secured through these mechanisms, they are exempt from this rule. +An exception to this rule can be configured with <>, +which is useful for APM agents running on the client side, like the Real User Monitoring (RUM) agent. -In addition, there is a less straightforward and more restrictive way to authenticate clients through +There is a less straightforward and more restrictive way to authenticate clients through <>, which is currently a mainstream option only for the RUM agent (through the browser) and the Jaeger agent. @@ -30,41 +31,44 @@ include::./ssl-input.asciidoc[] [[api-key]] === API keys -experimental::[] - -You can configure API keys to authorize requests to the APM Server. - NOTE: API keys are sent as plain-text, so they only provide security when used in combination with <>. +They are not applicable for agents running on clients, like the RUM agent, +as there is no way to prevent them from being publicly exposed. -By enabling `apm-server.api_key.enabled: true`, you ensure that only agents with a valid API Key -are able to successfully use APM Server's API (except for RUM endpoints). +Configure API keys to authorize requests to the APM Server. +To enable API key authorization, set `apm-server.auth.api_key.enabled` to `true`. -To secure the communication between APM Agents and the APM Server with API keys: +There are multiple, unique privileges you can assign to each API key. +API keys can have one or more of these privileges: -. Make sure <> is enabled -. <> -. <> -. <> +* *Agent configuration* (`config_agent:read`): Required for agents to read +{kibana-ref}/agent-configuration.html[Agent configuration remotely]. +* *Ingest* (`event:write`): Required for ingesting Agent events. +* *Sourcemap* (`sourcemap:write`): Required for <>. -NOTE: API Keys are not applicable for the RUM Agent, -as there is no way to prevent them from being publicly exposed. +To secure the communication between APM Agents and the APM Server with API keys, +make sure <> is enabled, then complete these steps: + +. <> +. <> +. <> [[configure-api-key]] [float] === Enable and configure API keys -API keys are disabled by default. You can change this, and additional settings, -in the `apm-server.api_key` section of the +{beatname_lc}.yml+ configuration file. +API keys are disabled by default. Enable and configure this feature in the `apm-server.auth.api_key` +section of the +{beatname_lc}.yml+ configuration file. At a minimum, you must enable API keys, and should set a limit on the number of unique API keys that APM Server allows per minute. -Here's an example `apm-server.api_key` config using 50 unique API keys: +Here's an example `apm-server.auth.api_key` config using 50 unique API keys: [source,yaml] ---- -apm-server.api_key.enabled: true <1> -apm-server.api_key.limit: 50 <2> +apm-server.auth.api_key.enabled: true <1> +apm-server.auth.api_key.limit: 50 <2> ---- <1> Enables API keys <2> Restricts the number of unique API keys that {es} allows each minute. @@ -74,35 +78,95 @@ All other configuration options are described in <>. [[create-api-key]] [float] -=== Create and validate an API key +=== Create an API key + +[role="screenshot"] +image::images/api-key-create.png[API key creation] + +In {kib}, navigate to **Stack Management** > **API keys** and click **Create API key**. + +Enter a name for your API key and select **Restrict privileges**. +In the role descriptors box, copy and paste the following JSON. +This example creates an API key with privileges for ingesting APM events, +reading agent central configuration, uploading a sourcemap: + +[source,json] +---- +{ + "apm": { + "applications": [ + { + "application": "apm", + "privileges": ["sourcemap:write", "event:write", "config_agent:read"], <1> + "resources": ["*"] + } + ] + } +} +---- +<1> This example adds all three API privileges to the new API key. +Privileges are described <>. Remove any privileges that you do not need. + +To set an expiration date for the API key, select **Expire after time** +and input the lifetime of the API key in days. + +Click **Create API key** and then copy the Base64 encoded API key. +You will need this for the next step, and you will not be able to view it again. + +[role="screenshot"] +image::images/api-key-copy.png[API key copy base64] + +[[set-api-key]] +[float] +=== Set the API key in your APM agents + +You can now apply your newly created API keys in the configuration of each of your APM agents. +See the relevant agent documentation for additional information: + +// Not relevant for RUM and iOS +* *Go agent*: {apm-go-ref}/configuration.html#config-api-key[`ELASTIC_APM_API_KEY`] +* *.NET agent*: {apm-dotnet-ref}/config-reporter.html#config-api-key[`ApiKey`] +* *Java agent*: {apm-java-ref}/config-reporter.html#config-api-key[`api_key`] +* *Node.js agent*: {apm-node-ref}/configuration.html#api-key[`apiKey`] +* *PHP agent*: {apm-php-ref-v}/configuration-reference.html#config-api-key[`api_key`] +* *Python agent*: {apm-py-ref}/configuration.html#config-api-key[`api_key`] +* *Ruby agent*: {apm-ruby-ref}/configuration.html#config-api-key[`api_key`] + +[[configure-api-key-alternative]] +[float] +=== Alternate API key creation methods + +API keys can also be created and validated outside of Kibana: + +* <> +* <> + +[[create-api-key-workflow-apm-server]] +[float] +==== APM Server API key workflow APM Server provides a command line interface for creating, retrieving, invalidating, and verifying API keys. -Keys created using this method can only be used for Agent/Server communication. +Keys created using this method can only be used for communication with APM Server. [[create-api-key-subcommands]] [float] -==== `apikey` subcommands +===== `apikey` subcommands include::{libbeat-dir}/command-reference.asciidoc[tag=apikey-subcommands] [[create-api-key-privileges]] [float] -==== Privileges +===== Privileges -There are three unique privileges you can assign to each API keys. If privileges are not specified at creation time, the created key will have all privileges. -* *Agent configuration*: Required for agents to read -{kibana-ref}/agent-configuration.html[Agent configuration remotely]. -`--agent-config` gives the `config_agent:read` privilege to the created key. -* *Ingest*: Required for ingesting Agent events. -`--ingest` gives the `event:write` privilege to the created key. -* *Sourcemap*: Required for <>. -`--sourcemap` gives the `sourcemap:write` privilege to the created key. +* `--agent-config` grants the `config_agent:read` privilege +* `--ingest` grants the `event:write` privilege +* `--sourcemap` grants the `sourcemap:write` privilege [[create-api-key-workflow]] [float] -==== API key workflow example +===== Create an API key Create an API key with the `create` subcommand. @@ -165,9 +229,9 @@ A full list of `apikey` subcommands and flags is available in the <>] + +In versions prior to 7.14.0, API Key authorization was known as `apm-server.api_key`. In 7.14.0 this was renamed `apm-server.auth.api_key`. +The old configuration will continue to work until 8.0.0, and the new configuration will take precedence. + [[secret-token]] === Secret token @@ -436,11 +499,11 @@ Both the agents and the APM servers have to be configured with the same secret t NOTE: Secret tokens are sent as plain-text, so they only provide security when used in combination with <>. -To secure the communication between APM Agents and the APM Server with a secret token: +To secure the communication between APM agents and the APM Server with a secret token: . Make sure <> is enabled -. <> -. <> +. <> +. <> NOTE: Secret tokens are not applicable for the RUM Agent, as there is no way to prevent them from being publicly exposed. @@ -451,52 +514,85 @@ as there is no way to prevent them from being publicly exposed. **APM Server configuration** +NOTE: {ess} and {ece} deployments provision a secret token when the deployment is created. +The secret token can be found and reset in the {ecloud} console under **Deployments** -- **APM & Fleet**. + Here's how you set the secret token in APM Server: [source,yaml] ---- -apm-server.secret_token: +apm-server.auth.secret_token: ---- We recommend saving the token in the APM Server <>. -IMPORTANT: Secret tokens are not applicable for the RUM Agent, +IMPORTANT: Secret tokens are not applicable for the RUM agent, as there is no way to prevent them from being publicly exposed. **Agent specific configuration** Each Agent has a configuration for setting the value of the secret token: -* *Go Agent*: {apm-go-ref}/configuration.html#config-secret-token[`ELASTIC_APM_SECRET_TOKEN`] -* *Java Agent*: {apm-java-ref}/config-reporter.html#config-secret-token[`secret_token`] -* *.NET Agent*: {apm-dotnet-ref}/config-reporter.html#config-secret-token[`ELASTIC_APM_SECRET_TOKEN`] -* *Node.js Agent*: {apm-node-ref}/configuration.html#secret-token[`Secret Token`] -* *Python Agent*: {apm-py-ref}/configuration.html#config-secret-token[`secret_token`] -* *Ruby Agent*: {apm-ruby-ref}/configuration.html#config-secret-token[`secret_token`] +* *Go agent*: {apm-go-ref}/configuration.html#config-secret-token[`ELASTIC_APM_SECRET_TOKEN`] +* *iOS agent*: {apm-ios-ref-v}/configuration.html#secretToken[`secretToken`] +* *Java agent*: {apm-java-ref}/config-reporter.html#config-secret-token[`secret_token`] +* *.NET agent*: {apm-dotnet-ref}/config-reporter.html#config-secret-token[`ELASTIC_APM_SECRET_TOKEN`] +* *Node.js agent*: {apm-node-ref}/configuration.html#secret-token[`Secret Token`] +* *PHP agent*: {apm-php-ref-v}/configuration-reference.html#config-secret-token[`secret_token`] +* *Python agent*: {apm-py-ref}/configuration.html#config-secret-token[`secret_token`] +* *Ruby agent*: {apm-ruby-ref}/configuration.html#config-secret-token[`secret_token`] [[https-in-agents]] [float] -=== HTTPS communication in APM Agents +=== HTTPS communication in APM agents -To enable secure communication in your Agents, you need to update the configured server URL to use `HTTPS` instead of `HTTP`. +To enable secure communication in your agents, you need to update the configured server URL to use `HTTPS` instead of `HTTP`. -* *Go Agent*: {apm-go-ref}/configuration.html#config-server-url[`ELASTIC_APM_SERVER_URL`] -* *Java Agent*: {apm-java-ref}/config-reporter.html#config-server-urls[`server_urls`] -* *.NET Agent*: {apm-dotnet-ref}/config-reporter.html#config-server-urls[`ServerUrls`] -* *Node.js Agent*: {apm-node-ref}/configuration.html#server-url[`serverUrl`] -* *Python Agent*: {apm-py-ref}/[`server_url`] -* *Ruby Agent*: {apm-ruby-ref}/configuration.html#config-server-url[`server_url`] +* *Go agent*: {apm-go-ref}/configuration.html#config-server-url[`ELASTIC_APM_SERVER_URL`] +* *Java agent*: {apm-java-ref}/config-reporter.html#config-server-urls[`server_urls`] +* *.NET agent*: {apm-dotnet-ref}/config-reporter.html#config-server-url[`ServerUrl`] +* *Node.js agent*: {apm-node-ref}/configuration.html#server-url[`serverUrl`] +* *PHP agent*: {apm-php-ref-v}/configuration-reference.html#config-server-url[`server_url`] +* *Python agent*: {apm-py-ref}/[`server_url`] +* *Ruby agent*: {apm-ruby-ref}/configuration.html#config-server-url[`server_url`] -Some Agents also allow you to specify a custom certificate authority for connecting to APM Server. +Some agents also allow you to specify a custom certificate authority for connecting to APM Server. -* *Go Agent*: {apm-go-ref}/configuration.html#config-server-cert[`ELASTIC_APM_SERVER_CERT`] -* *Python Agent*: {apm-py-ref}/configuration.html#config-server-cert[`ELASTIC_APM_SERVER_CERT`] -* *Ruby Agent*: {apm-ruby-ref}/configuration.html#config-ssl-ca-cert[`server_ca_certedit`] +* *Go agent*: certificate pinning through {apm-go-ref}/configuration.html#config-server-cert[`ELASTIC_APM_SERVER_CERT`] +* *Python agent*: certificate pinning through {apm-py-ref}/configuration.html#config-server-cert[`server_cert`] +* *Ruby agent*: certificate pinning through {apm-ruby-ref}/configuration.html#config-ssl-ca-cert[`server_ca_cert`] +* *.NET agent*: {apm-dotnet-ref}/config-reporter.html#config-server-cert[`ServerCert`] +* *NodeJS agent*: custom CA setting through {apm-node-ref}/configuration.html#server-ca-cert-file[`serverCaCertFile`] +* *Java agent*: adding the certificate to the JVM `trustStore`. +See {apm-java-ref}/ssl-configuration.html#ssl-server-authentication[APM Server authentication] for more details. -Most Agents that don't allow you specify a custom certificate will allow you to +Agents that don't allow you specify a custom certificate will allow you to disable verification of the SSL certificate. This ensures encryption, but does not verify that you are sending data to the correct APM Server. -* *Java Agent*: {apm-java-ref}/config-reporter.html#config-verify-server-cert[`verify_server_cert`] -* *Node.js Agent*: {apm-node-ref}/configuration.html#validate-server-cert[`verifyServerCert`] -* *.NET Agent*: {apm-dotnet-ref}/config-reporter.html#config-verify-server-cert[`VerifyServerCert`] +* *Go agent*: {apm-go-ref}/configuration.html#config-verify-server-cert[`ELASTIC_APM_VERIFY_SERVER_CERT`] +* *.NET agent*: {apm-dotnet-ref}/config-reporter.html#config-verify-server-cert[`VerifyServerCert`] +* *Java agent*: {apm-java-ref}/config-reporter.html#config-verify-server-cert[`verify_server_cert`] +* *PHP agent*: {apm-php-ref-v}/configuration-reference.html#config-verify-server-cert[`verify_server_cert`] +* *Python agent*: {apm-py-ref}/configuration.html#config-verify-server-cert[`verify_server_cert`] +* *Ruby agent*: {apm-ruby-ref}/configuration.html#config-verify-server-cert[`verify_server_cert`] +* *NodeJS agent*: {apm-node-ref}/configuration.html#validate-server-cert[`verifyServerCert`] + +[[secure-communication-unauthenticated]] +=== Anonymous authentication + +Elastic APM agents can send unauthenticated (anonymous) events to the APM Server. +This is useful for agents that run on clients, like the Real User Monitoring (RUM) agent running in a browser, +or the iOS/Swift agent running in a user application. +Incoming requests are considered to be anonymous if no authentication token can be extracted from the incoming request. +By default, these anonymous requests are rejected and an authentication error is returned. + +Anonymous authentication must be enabled to collect RUM data. +To enable anonymous access, set either <> or +<> to `true`. + +Because anyone can send anonymous events to the APM Server, +additional configuration variables are available to rate limit the number anonymous events the APM Server processes; +throughput is equal to the `rate_limit.ip_limit` times the `rate_limit.event_limit`. + +See <> for a complete list of options and a sample configuration file. diff --git a/docs/sourcemap-api.asciidoc b/docs/sourcemap-api.asciidoc index a9b805c99ce..c7cca257a54 100644 --- a/docs/sourcemap-api.asciidoc +++ b/docs/sourcemap-api.asciidoc @@ -10,6 +10,9 @@ IMPORTANT: You must <> in the APM Server f The APM Server exposes an API endpoint to upload source maps for real user monitoring (RUM). See the <> guide to get started. +If you're using the <>, +you must use the Kibana {kibana-ref}/rum-sourcemap-api.html[source map upload API] instead. + [[sourcemap-endpoint]] [float] === Upload endpoint diff --git a/docs/sourcemaps.asciidoc b/docs/sourcemaps.asciidoc index 92d229ee9a4..9b9eb962de1 100644 --- a/docs/sourcemaps.asciidoc +++ b/docs/sourcemaps.asciidoc @@ -5,6 +5,9 @@ Create and upload source maps (RUM) ++++ +NOTE: This guide is only for standalone APM Server users. Users running the <> +need to use the Kibana {kibana-ref}/rum-sourcemap-api.html[source map upload API] instead. + Minifying JavaScript bundles in production is a common practice; it can greatly improve the load time and network latency of your applications. The problem with minifying code is that it can be hard to debug. @@ -69,7 +72,7 @@ APM Server uses the `serviceVersion` to match the correct source map file to eac === Generate a source map To be compatible with Elastic APM, source maps must follow the -https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k[source map revision 3 proposal spec]. +https://sourcemaps.info/spec.html[source map revision 3 proposal spec]. Source maps can be generated and configured in many different ways. For example, if you're using parcel, they are generated by default. diff --git a/docs/span-api.asciidoc b/docs/span-api.asciidoc index 81a71e323fc..11c96b62621 100644 --- a/docs/span-api.asciidoc +++ b/docs/span-api.asciidoc @@ -1,15 +1,16 @@ [[span-api]] === Spans -Spans are events captured by an agent occurring in a monitored service. +Spans are events captured by an agent occurring in a monitored service. [[span-schema]] [float] ==== Span Schema -The APM Server uses JSON Schema for validating requests. The specification for spans is defined below: +APM Server uses JSON Schema to validate requests. The specification for spans is defined on +{github_repo_link}/docs/spec/v2/span.json[GitHub] and included below: [source,json] ---- -include::./spec/spans/span.json[] +include::./spec/v2/span.json[] ---- diff --git a/docs/spec/cloud.json b/docs/spec/cloud.json deleted file mode 100644 index 59fa1687a7c..00000000000 --- a/docs/spec/cloud.json +++ /dev/null @@ -1,108 +0,0 @@ -{ - "$id": "docs/spec/cloud.json", - "title": "Cloud", - "type": [ - "object", - "null" - ], - "properties": { - "account": { - "properties": { - "id": { - "description": "Cloud account ID", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "name": { - "description": "Cloud account name", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "availability_zone": { - "description": "Cloud availability zone name. e.g. us-east-1a", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "instance": { - "properties": { - "id": { - "description": "Cloud instance/machine ID", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "name": { - "description": "Cloud instance/machine name", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "machine": { - "properties": { - "type": { - "description": "Cloud instance/machine type", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "project": { - "properties": { - "id": { - "description": "Cloud project ID", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "name": { - "description": "Cloud project name", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "provider": { - "description": "Cloud provider name. e.g. aws, azure, gcp, digitalocean.", - "type": [ - "string" - ], - "maxLength": 1024 - }, - "region": { - "description": "Cloud region name. e.g. us-east-1", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - }, - "required": [ - "provider" - ] -} \ No newline at end of file diff --git a/docs/spec/context.json b/docs/spec/context.json deleted file mode 100644 index e3d8e28d15f..00000000000 --- a/docs/spec/context.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "$id": "docs/spec/context.json", - "title": "Context", - "description": "Any arbitrary contextual information regarding the event, captured by the agent, optionally provided by the user", - "type": ["object", "null"], - "properties": { - "custom": { - "description": "An arbitrary mapping of additional metadata to store with the event.", - "type": ["object", "null"], - "patternProperties": { - "^[^.*\"]*$": {} - }, - "additionalProperties": false - }, - "response": { - "type": ["object", "null"], - "allOf": [ - { "$ref": "./http_response.json" }, - { - "properties": { - "finished": { - "description": "A boolean indicating whether the response was finished or not", - "type": [ - "boolean", - "null" - ] - }, - "headers_sent": { - "type": [ - "boolean", - "null" - ] - } - } - } - ] - }, - "request": { - "$ref": "request.json" - }, - "tags": { - "$ref": "tags.json" - }, - "user": { - "description": "Describes the correlated user for this event. If user data are provided here, all user related information from metadata is ignored, otherwise the metadata's user information will be stored with the event.", - "$ref": "user.json" - }, - "page": { - "description": "", - "type": ["object", "null"], - "properties": { - "referer": { - "description": "RUM specific field that stores the URL of the page that 'linked' to the current page.", - "type": ["string", "null"] - }, - "url": { - "description": "RUM specific field that stores the URL of the current page", - "type": ["string", "null"] - } - } - }, - "service": { - "description": "Service related information can be sent per event. Provided information will override the more generic information from metadata, non provided fields will be set according to the metadata information.", - "$ref": "service.json" - }, - "message": { - "$ref": "message.json" - } - } -} diff --git a/docs/spec/errors/error.json b/docs/spec/errors/error.json deleted file mode 100644 index 4f2ee27b4c4..00000000000 --- a/docs/spec/errors/error.json +++ /dev/null @@ -1,154 +0,0 @@ -{ - "$id": "docs/spec/errors/error.json", - "type": "object", - "description": "An error or a logged error message captured by an agent occurring in a monitored service", - "allOf": [ - { "$ref": "../timestamp_epoch.json" }, - { - "properties": { - "id": { - "type": ["string"], - "description": "Hex encoded 128 random bits ID of the error.", - "maxLength": 1024 - }, - "trace_id": { - "description": "Hex encoded 128 random bits ID of the correlated trace. Must be present if transaction_id and parent_id are set.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "transaction_id": { - "type": ["string", "null"], - "description": "Hex encoded 64 random bits ID of the correlated transaction. Must be present if trace_id and parent_id are set.", - "maxLength": 1024 - }, - "parent_id": { - "description": "Hex encoded 64 random bits ID of the parent transaction or span. Must be present if trace_id and transaction_id are set.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "transaction": { - "type": ["object", "null"], - "description": "Data for correlating errors with transactions", - "properties": { - "sampled": { - "type": ["boolean", "null"], - "description": "Transactions that are 'sampled' will include all available information. Transactions that are not sampled will not have 'spans' or 'context'. Defaults to true." - }, - "type": { - "type": ["string", "null"], - "description": "Keyword of specific relevance in the service's domain (eg: 'request', 'backgroundjob', etc)", - "maxLength": 1024 - } - } - }, - "context": { - "$ref": "./../context.json" - }, - "culprit": { - "description": "Function call which was the primary perpetrator of this event.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "exception": { - "description": "Information about the originally thrown error.", - "type": ["object", "null"], - "properties": { - "code": { - "type": ["string", "integer", "null"], - "maxLength": 1024, - "description": "The error code set when the error happened, e.g. database error code." - }, - "message": { - "description": "The original error message.", - "type": ["string", "null"] - }, - "module": { - "description": "Describes the exception type's module namespace.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "attributes": { - "type": ["object", "null"] - }, - "stacktrace": { - "type": ["array", "null"], - "items": { - "$ref": "./../stacktrace_frame.json" - }, - "minItems": 0 - }, - "type": { - "type": ["string", "null"], - "maxLength": 1024 - }, - "handled": { - "type": ["boolean", "null"], - "description": "Indicator whether the error was caught somewhere in the code or not." - }, - "cause": { - "type": ["array", "null"], - "items": { - "type": ["object", "null"], - "description": "Recursive exception object" - }, - "minItems": 0, - "description": "Exception tree" - } - }, - "anyOf": [ - {"required": ["message"], "properties": {"message": {"type": "string"}}}, - {"required": ["type"], "properties": {"type": {"type": "string"}}} - ] - }, - "log": { - "type": ["object", "null"], - "description": "Additional information added when logging the error.", - "properties": { - "level": { - "description": "The severity of the record.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "logger_name": { - "description": "The name of the logger instance used.", - "type": ["string", "null"], - "default": "default", - "maxLength": 1024 - }, - "message": { - "description": "The additionally logged error message.", - "type": "string" - }, - "param_message": { - "description": "A parametrized message. E.g. 'Could not connect to %s'. The property message is still required, and should be equal to the param_message, but with placeholders replaced. In some situations the param_message is used to group errors together. The string is not interpreted, so feel free to use whichever placeholders makes sense in the client languange.", - "type": ["string", "null"], - "maxLength": 1024 - - }, - "stacktrace": { - "type": ["array", "null"], - "items": { - "$ref": "./../stacktrace_frame.json" - }, - "minItems": 0 - } - }, - "required": ["message"] - } - }, - "allOf": [ - { "required": ["id"] }, - { "if": {"required": ["transaction_id"], "properties": {"transaction_id": { "type": "string" }}}, - "then": { "required": ["trace_id", "parent_id"], "properties": {"trace_id": { "type": "string" }, "parent_id": {"type": "string"}}}}, - { "if": {"required": ["trace_id"], "properties": {"trace_id": { "type": "string" }}}, - "then": { "required": ["parent_id"], "properties": {"parent_id": { "type": "string" }}} }, - { "if": {"required": ["parent_id"], "properties": {"parent_id": { "type": "string" }}}, - "then": { "required": ["trace_id"], "properties": {"trace_id": { "type": "string" }}} } - ], - "anyOf": [ - { "required": ["exception"], "properties": {"exception": { "type": "object" }} }, - { "required": ["log"], "properties": {"log": { "type": "object" }} } - ] - } - ] -} diff --git a/docs/spec/errors/rum_v3_error.json b/docs/spec/errors/rum_v3_error.json deleted file mode 100644 index bb15a397474..00000000000 --- a/docs/spec/errors/rum_v3_error.json +++ /dev/null @@ -1,154 +0,0 @@ -{ - "$id": "docs/spec/errors/rum_v3_error.json", - "type": "object", - "description": "An error or a logged error message captured by an agent occurring in a monitored service", - "allOf": [ - { "$ref": "../timestamp_epoch.json" }, - { - "properties": { - "id": { - "type": ["string"], - "description": "Hex encoded 128 random bits ID of the error.", - "maxLength": 1024 - }, - "tid": { - "description": "Hex encoded 128 random bits ID of the correlated trace. Must be present if transaction_id and parent_id are set.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "xid": { - "type": ["string", "null"], - "description": "Hex encoded 64 random bits ID of the correlated transaction. Must be present if trace_id and parent_id are set.", - "maxLength": 1024 - }, - "pid": { - "description": "Hex encoded 64 random bits ID of the parent transaction or span. Must be present if trace_id and transaction_id are set.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "x": { - "type": ["object", "null"], - "description": "Data for correlating errors with transactions", - "properties": { - "sm": { - "type": ["boolean", "null"], - "description": "Transactions that are 'sampled' will include all available information. Transactions that are not sampled will not have 'spans' or 'context'. Defaults to true." - }, - "t": { - "type": ["string", "null"], - "description": "Keyword of specific relevance in the service's domain (eg: 'request', 'backgroundjob', etc)", - "maxLength": 1024 - } - } - }, - "c": { - "$ref": "./../rum_v3_context.json" - }, - "cu": { - "description": "Function call which was the primary perpetrator of this event.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "ex": { - "description": "Information about the originally thrown error.", - "type": ["object", "null"], - "properties": { - "cd": { - "type": ["string", "integer", "null"], - "maxLength": 1024, - "description": "The error code set when the error happened, e.g. database error code." - }, - "mg": { - "description": "The original error message.", - "type": ["string", "null"] - }, - "mo": { - "description": "Describes the exception type's module namespace.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "at": { - "type": ["object", "null"] - }, - "st": { - "type": ["array", "null"], - "items": { - "$ref": "./../rum_v3_stacktrace_frame.json" - }, - "minItems": 0 - }, - "t": { - "type": ["string", "null"], - "maxLength": 1024 - }, - "hd": { - "type": ["boolean", "null"], - "description": "Indicator whether the error was caught somewhere in the code or not." - }, - "ca": { - "type": ["array", "null"], - "items": { - "type": ["object", "null"], - "description": "Recursive exception object" - }, - "minItems": 0, - "description": "Exception tree" - } - }, - "anyOf": [ - {"required": ["mg"], "properties": {"mg": {"type": "string"}}}, - {"required": ["t"], "properties": {"t": {"type": "string"}}} - ] - }, - "log": { - "type": ["object", "null"], - "description": "Additional information added when logging the error.", - "properties": { - "lv": { - "description": "The severity of the record.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "ln": { - "description": "The name of the logger instance used.", - "type": ["string", "null"], - "default": "default", - "maxLength": 1024 - }, - "mg": { - "description": "The additionally logged error message.", - "type": "string" - }, - "pmg": { - "description": "A parametrized message. E.g. 'Could not connect to %s'. The property message is still required, and should be equal to the param_message, but with placeholders replaced. In some situations the param_message is used to group errors together. The string is not interpreted, so feel free to use whichever placeholders makes sense in the client languange.", - "type": ["string", "null"], - "maxLength": 1024 - - }, - "st": { - "type": ["array", "null"], - "items": { - "$ref": "./../rum_v3_stacktrace_frame.json" - }, - "minItems": 0 - } - }, - "required": ["mg"] - } - }, - "allOf": [ - { "required": ["id"] }, - { "if": {"required": ["xid"], "properties": {"xid": { "type": "string" }}}, - "then": { "required": ["tid", "pid"], "properties": {"tid": { "type": "string" }, "pid": {"type": "string"}}}}, - { "if": {"required": ["tid"], "properties": {"tid": { "type": "string" }}}, - "then": { "required": ["pid"], "properties": {"pid": { "type": "string" }}} }, - { "if": {"required": ["pid"], "properties": {"pid": { "type": "string" }}}, - "then": { "required": ["tid"], "properties": {"tid": { "type": "string" }}} } - ], - "anyOf": [ - { "required": ["ex"], "properties": {"ex": { "type": "object" }} }, - { "required": ["log"], "properties": {"log": { "type": "object" }} } - ] - } - ] -} diff --git a/docs/spec/http_response.json b/docs/spec/http_response.json deleted file mode 100644 index 1bc5f6970d8..00000000000 --- a/docs/spec/http_response.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "$id": "docs/spec/http_response.json", - "title": "HTTP response object", - "description": "HTTP response object, used by error, span and transction documents", - "type": ["object", "null"], - "properties": { - "status_code": { - "type": ["integer", "null"], - "description": "The status code of the http request." - }, - "transfer_size": { - "type": ["number", "null"], - "description": "Total size of the payload." - }, - "encoded_body_size": { - "type": ["number", "null"], - "description": "The encoded size of the payload." - }, - "decoded_body_size": { - "type": ["number", "null"], - "description": "The decoded size of the payload." - }, - "headers": { - "type": ["object", "null"], - "patternProperties": { - "[.*]*$": { - "type": ["string", "array", "null"], - "items": { - "type": ["string"] - } - } - } - } - } -} diff --git a/docs/spec/message.json b/docs/spec/message.json deleted file mode 100644 index 23217e2f868..00000000000 --- a/docs/spec/message.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "$id": "docs/spec/message.json", - "title": "Message", - "description": "Details related to message receiving and publishing if the captured event integrates with a messaging system", - "type": ["object", "null"], - "properties": { - "queue": { - "type": ["object", "null"], - "properties": { - "name": { - "description": "Name of the message queue where the message is received.", - "type": ["string","null"], - "maxLength": 1024 - } - } - }, - "age": { - "type": ["object", "null"], - "properties": { - "ms": { - "description": "The age of the message in milliseconds. If the instrumented messaging framework provides a timestamp for the message, agents may use it. Otherwise, the sending agent can add a timestamp in milliseconds since the Unix epoch to the message's metadata to be retrieved by the receiving agent. If a timestamp is not available, agents should omit this field.", - "type": ["integer", "null"] - } - } - }, - "body": { - "description": "messsage body, similar to an http request body", - "type": ["string", "null"] - }, - "headers": { - "description": "messsage headers, similar to http request headers", - "type": ["object", "null"], - "patternProperties": { - "[.*]*$": { - "type": ["string", "array", "null"], - "items": { - "type": ["string"] - } - } - } - } - } -} diff --git a/docs/spec/metadata.json b/docs/spec/metadata.json deleted file mode 100644 index 08bea947265..00000000000 --- a/docs/spec/metadata.json +++ /dev/null @@ -1,187 +0,0 @@ -{ - "$id": "docs/spec/metadata.json", - "title": "Metadata", - "description": "Metadata concerning the other objects in the stream.", - "type": "object", - "properties": { - "service": { - "type": [ - "object" - ], - "properties": { - "agent": { - "description": "Name and version of the Elastic APM agent", - "type": [ - "object" - ], - "properties": { - "name": { - "description": "Name of the Elastic APM agent, e.g. \"Python\"", - "type": [ - "string" - ], - "maxLength": 1024, - "minLength": 1 - }, - "version": { - "description": "Version of the Elastic APM agent, e.g.\"1.0.0\"", - "type": [ - "string" - ], - "maxLength": 1024 - }, - "ephemeral_id": { - "description": "Free format ID used for metrics correlation by some agents", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - }, - "required": [ - "name", - "version" - ] - }, - "framework": { - "description": "Name and version of the web framework used", - "type": [ - "object", - "null" - ], - "properties": { - "name": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "version": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "language": { - "description": "Name and version of the programming language used", - "type": [ - "object", - "null" - ], - "properties": { - "name": { - "type": [ - "string" - ], - "maxLength": 1024 - }, - "version": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - }, - "required": [ - "name" - ] - }, - "name": { - "description": "Immutable name of the service emitting this event", - "type": [ - "string" - ], - "pattern": "^[a-zA-Z0-9 _-]+$", - "maxLength": 1024, - "minLength": 1 - }, - "environment": { - "description": "Environment name of the service, e.g. \"production\" or \"staging\"", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "runtime": { - "description": "Name and version of the language runtime running this service", - "type": [ - "object", - "null" - ], - "properties": { - "name": { - "type": [ - "string" - ], - "maxLength": 1024 - }, - "version": { - "type": [ - "string" - ], - "maxLength": 1024 - } - }, - "required": [ - "name", - "version" - ] - }, - "version": { - "description": "Version of the service emitting this event", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "node": { - "description": "Unique meaningful name of the service node.", - "type": [ - "object", - "null" - ], - "properties": { - "configured_name": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - } - }, - "required": [ - "name", - "agent" - ] - }, - "process": { - "$ref": "process.json" - }, - "system": { - "$ref": "system.json" - }, - "user": { - "description": "Describes the authenticated User for a request.", - "$ref": "user.json" - }, - "cloud": { - "$ref": "cloud.json" - }, - "labels": { - "$ref": "tags.json" - } - }, - "required": [ - "service" - ] -} \ No newline at end of file diff --git a/docs/spec/metricsets/metricset.json b/docs/spec/metricsets/metricset.json deleted file mode 100644 index 5216f301464..00000000000 --- a/docs/spec/metricsets/metricset.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "$id": "docs/spec/metricsets/metricset.json", - "type": "object", - "description": "Data captured by an agent representing an event occurring in a monitored service", - "allOf": [ - { "$ref": "../timestamp_epoch.json"}, - { "$ref": "../span_type.json" }, - { "$ref": "../span_subtype.json" }, - { "$ref": "../transaction_name.json" }, - { "$ref": "../transaction_type.json" }, - { - "properties": { - "samples": { - "type": [ - "object" - ], - "description": "Sampled application metrics collected from the agent.", - "patternProperties": { - "^[^*\"]*$": { - "$ref": "sample.json" - } - }, - "additionalProperties": false - }, - "tags": { - "$ref": "../tags.json" - } - }, - "required": ["samples"] - } - ] -} diff --git a/docs/spec/metricsets/rum_v3_metricset.json b/docs/spec/metricsets/rum_v3_metricset.json deleted file mode 100644 index ed6465bc189..00000000000 --- a/docs/spec/metricsets/rum_v3_metricset.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "$id": "docs/spec/metricsets/rum_v3_metricset.json", - "description": "Data captured by an agent representing an event occurring in a monitored service", - "properties": { - "y": { - "type": ["object", "null"], - "description": "span", - "properties": { - "t": { - "type": "string", - "description": "type", - "maxLength": 1024 - }, - "su": { - "type": ["string", "null"], - "description": "subtype", - "maxLength": 1024 - } - } - }, - "sa": { - "type": "object", - "description": "Sampled application metrics collected from the agent.", - "properties": { - "xdc": { - "description": "transaction.duration.count", - "$ref": "rum_v3_sample.json" - }, - "xds": { - "description": "transaction.duration.sum.us", - "$ref": "rum_v3_sample.json" - }, - "xbc": { - "description": "transaction.breakdown.count", - "$ref": "rum_v3_sample.json" - }, - "ysc": { - "description": "span.self_time.count", - "$ref": "rum_v3_sample.json" - }, - "yss": { - "description": "span.self_time.sum.us", - "$ref": "rum_v3_sample.json" - } - } - }, - "g": { - "$ref": "../tags.json" - } - }, - "required": ["sa"] -} diff --git a/docs/spec/metricsets/rum_v3_sample.json b/docs/spec/metricsets/rum_v3_sample.json deleted file mode 100644 index 8fa0a6b0963..00000000000 --- a/docs/spec/metricsets/rum_v3_sample.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "$id": "docs/spec/metricsets/rum_v3_sample.json", - "type": ["object", "null"], - "description": "A single metric sample.", - "properties": { - "v": {"type": "number"} - }, - "required": ["v"] -} diff --git a/docs/spec/metricsets/sample.json b/docs/spec/metricsets/sample.json deleted file mode 100644 index 8902d3bf26b..00000000000 --- a/docs/spec/metricsets/sample.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "$id": "docs/spec/metricsets/sample.json", - "type": ["object", "null"], - "description": "A single metric sample.", - "properties": { - "value": {"type": "number"} - }, - "required": ["value"] -} diff --git a/docs/spec/outcome.json b/docs/spec/outcome.json deleted file mode 100644 index 36231ee8824..00000000000 --- a/docs/spec/outcome.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "$id": "docs/spec/outcome.json", - "title": "Outcome", - "type": ["string", "null"], - "enum": [null, "success", "failure", "unknown"], - "description": "The outcome of the transaction: success, failure, or unknown. This is similar to 'result', but has a limited set of permitted values describing the success or failure of the transaction from the service's perspective. This field can be used for calculating error rates." -} diff --git a/docs/spec/process.json b/docs/spec/process.json deleted file mode 100644 index e5d5a1a8bc2..00000000000 --- a/docs/spec/process.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "$id": "docs/spec/process.json", - "title": "Process", - "type": ["object", "null"], - "properties": { - "pid": { - "description": "Process ID of the service", - "type": ["integer"] - }, - "ppid": { - "description": "Parent process ID of the service", - "type": ["integer", "null"] - }, - "title": { - "type": ["string", "null"], - "maxLength": 1024 - }, - "argv": { - "description": "Command line arguments used to start this process", - "type": ["array", "null"], - "minItems": 0, - "items": { - "type": "string" - } - } - }, - "required": ["pid"] -} diff --git a/docs/spec/request.json b/docs/spec/request.json deleted file mode 100644 index e116e9ab336..00000000000 --- a/docs/spec/request.json +++ /dev/null @@ -1,103 +0,0 @@ -{ - "$id": "docs/spec/request.json", - "title": "Request", - "description": "If a log record was generated as a result of a http request, the http interface can be used to collect this information.", - "type": ["object", "null"], - "properties": { - "body": { - "description": "Data should only contain the request body (not the query string). It can either be a dictionary (for standard HTTP requests) or a raw request body.", - "type": ["object", "string", "null"] - }, - "env": { - "description": "The env variable is a compounded of environment information passed from the webserver.", - "type": ["object", "null"], - "properties": {} - }, - "headers": { - "description": "Should include any headers sent by the requester. Cookies will be taken by headers if supplied.", - "type": ["object", "null"], - "patternProperties": { - "[.*]*$": { - "type": ["string", "array", "null"], - "items": { - "type": ["string"] - } - } - } - }, - "http_version": { - "description": "HTTP version.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "method": { - "description": "HTTP method.", - "type": "string", - "maxLength": 1024 - }, - "socket": { - "type": ["object", "null"], - "properties": { - "encrypted": { - "description": "Indicates whether request was sent as SSL/HTTPS request.", - "type": ["boolean", "null"] - }, - "remote_address": { - "description": "The network address sending the request. Should be obtained through standard APIs and not parsed from any headers like 'Forwarded'.", - "type": ["string", "null"] - } - } - }, - "url": { - "description": "A complete Url, with scheme, host and path.", - "type": "object", - "properties": { - "raw": { - "type": ["string", "null"], - "description": "The raw, unparsed URL of the HTTP request line, e.g https://example.com:443/search?q=elasticsearch. This URL may be absolute or relative. For more details, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2.", - "maxLength": 1024 - }, - "protocol": { - "type": ["string", "null"], - "description": "The protocol of the request, e.g. 'https:'.", - "maxLength": 1024 - }, - "full": { - "type": ["string", "null"], - "description": "The full, possibly agent-assembled URL of the request, e.g https://example.com:443/search?q=elasticsearch#top.", - "maxLength": 1024 - }, - "hostname": { - "type": ["string", "null"], - "description": "The hostname of the request, e.g. 'example.com'.", - "maxLength": 1024 - }, - "port": { - "type": ["string", "integer","null"], - "description": "The port of the request, e.g. '443'", - "maxLength": 1024 - }, - "pathname": { - "type": ["string", "null"], - "description": "The path of the request, e.g. '/search'", - "maxLength": 1024 - }, - "search": { - "description": "The search describes the query string of the request. It is expected to have values delimited by ampersands.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "hash": { - "type": ["string", "null"], - "description": "The hash of the request URL, e.g. 'top'", - "maxLength": 1024 - } - } - }, - "cookies": { - "description": "A parsed key-value object of cookies", - "type": ["object", "null"] - } - }, - "required": ["url", "method"] -} diff --git a/docs/spec/rum_experience.json b/docs/spec/rum_experience.json deleted file mode 100644 index 2296ace7591..00000000000 --- a/docs/spec/rum_experience.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "$id": "docs/spec/rum_experience.json", - "title": "RUM Experience Metrics", - "description": "Metrics for measuring real user (browser) experience", - "type": ["object", "null"], - "properties": { - "cls": { - "type": ["number", "null"], - "description": "The Cumulative Layout Shift metric", - "minimum": 0 - }, - "tbt": { - "type": ["number", "null"], - "description": "The Total Blocking Time metric", - "minimum": 0 - }, - "fid": { - "type": ["number", "null"], - "description": "The First Input Delay metric", - "minimum": 0 - } - } -} diff --git a/docs/spec/rum_v3_context.json b/docs/spec/rum_v3_context.json deleted file mode 100644 index b522a23b841..00000000000 --- a/docs/spec/rum_v3_context.json +++ /dev/null @@ -1,164 +0,0 @@ -{ - "$id": "docs/spec/rum_v3_context.json", - "title": "Context", - "description": "Any arbitrary contextual information regarding the event, captured by the agent, optionally provided by the user", - "type": [ - "object", - "null" - ], - "properties": { - "cu": { - "description": "An arbitrary mapping of additional metadata to store with the event.", - "type": [ - "object", - "null" - ], - "patternProperties": { - "^[^.*\"]*$": {} - }, - "additionalProperties": false - }, - "r": { - "type": [ - "object", - "null" - ], - "allOf": [ - { - "properties": { - "sc": { - "type": [ - "integer", - "null" - ], - "description": "The status code of the http request." - }, - "ts": { - "type": [ - "number", - "null" - ], - "description": "Total size of the payload." - }, - "ebs": { - "type": [ - "number", - "null" - ], - "description": "The encoded size of the payload." - }, - "dbs": { - "type": [ - "number", - "null" - ], - "description": "The decoded size of the payload." - }, - "he": { - "type": [ - "object", - "null" - ], - "patternProperties": { - "[.*]*$": { - "type": [ - "string", - "array", - "null" - ], - "items": { - "type": [ - "string" - ] - } - } - } - } - } - } - ] - }, - "q": { - "properties": { - "en": { - "description": "The env variable is a compounded of environment information passed from the webserver.", - "type": [ - "object", - "null" - ], - "properties": {} - }, - "he": { - "description": "Should include any headers sent by the requester. Cookies will be taken by headers if supplied.", - "type": [ - "object", - "null" - ], - "patternProperties": { - "[.*]*$": { - "type": [ - "string", - "array", - "null" - ], - "items": { - "type": [ - "string" - ] - } - } - } - }, - "hve": { - "description": "HTTP version.", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "mt": { - "description": "HTTP method.", - "type": "string", - "maxLength": 1024 - } - }, - "required": [ - "mt" - ] - }, - "g": { - "$ref": "tags.json" - }, - "u": { - "$ref": "rum_v3_user.json" - }, - "p": { - "description": "", - "type": [ - "object", - "null" - ], - "properties": { - "rf": { - "description": "RUM specific field that stores the URL of the page that 'linked' to the current page.", - "type": [ - "string", - "null" - ] - }, - "url": { - "description": "RUM specific field that stores the URL of the current page", - "type": [ - "string", - "null" - ] - } - } - }, - "se": { - "description": "Service related information can be sent per event. Provided information will override the more generic information from metadata, non provided fields will be set according to the metadata information.", - "$ref": "rum_v3_service.json" - } - } -} diff --git a/docs/spec/rum_v3_metadata.json b/docs/spec/rum_v3_metadata.json deleted file mode 100644 index c6cdc358f13..00000000000 --- a/docs/spec/rum_v3_metadata.json +++ /dev/null @@ -1,151 +0,0 @@ -{ - "$id": "docs/spec/rum_v3_metadata.json", - "title": "Metadata", - "description": "Metadata concerning the other objects in the stream.", - "type": [ - "object" - ], - "properties": { - "se": { - "$id": "docs/spec/rum_v3_service.json", - "title": "Service", - "type": [ - "object" - ], - "properties": { - "a": { - "description": "Name and version of the Elastic APM agent", - "type": [ - "object" - ], - "properties": { - "n": { - "description": "Name of the Elastic APM agent, e.g. \"Python\"", - "type": [ - "string" - ], - "minLength": 1, - "maxLength": 1024 - }, - "ve": { - "description": "Version of the Elastic APM agent, e.g.\"1.0.0\"", - "type": [ - "string" - ], - "maxLength": 1024 - } - }, - "required": [ - "n", - "ve" - ] - }, - "fw": { - "description": "Name and version of the web framework used", - "type": [ - "object", - "null" - ], - "properties": { - "n": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "ve": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "la": { - "description": "Name and version of the programming language used", - "type": [ - "object", - "null" - ], - "properties": { - "n": { - "type": [ - "string" - ], - "maxLength": 1024 - }, - "ve": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - }, - "required": [ - "n" - ] - }, - "n": { - "description": "Immutable name of the service emitting this event", - "type": [ - "string" - ], - "pattern": "^[a-zA-Z0-9 _-]+$", - "minLength": 1, - "maxLength": 1024 - }, - "en": { - "description": "Environment name of the service, e.g. \"production\" or \"staging\"", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "ru": { - "description": "Name and version of the language runtime running this service", - "type": [ - "object", - "null" - ], - "properties": { - "n": { - "type": [ - "string" - ], - "maxLength": 1024 - }, - "ve": { - "type": [ - "string" - ], - "maxLength": 1024 - } - }, - "required": [ - "n", - "ve" - ] - }, - "ve": { - "description": "Version of the service emitting this event", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - }, - "required": [ - "a", - "n" - ] - } - }, - "required": [ - "se" - ] -} \ No newline at end of file diff --git a/docs/spec/rum_v3_service.json b/docs/spec/rum_v3_service.json deleted file mode 100644 index 4febb870612..00000000000 --- a/docs/spec/rum_v3_service.json +++ /dev/null @@ -1,129 +0,0 @@ -{ - "$id": "docs/spec/rum_v3_service.json", - "title": "Service", - "type": [ - "object", - "null" - ], - "properties": { - "a": { - "description": "Name and version of the Elastic APM agent", - "type": [ - "object", - "null" - ], - "properties": { - "n": { - "description": "Name of the Elastic APM agent, e.g. \"Python\"", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "ve": { - "description": "Version of the Elastic APM agent, e.g.\"1.0.0\"", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "fw": { - "description": "Name and version of the web framework used", - "type": [ - "object", - "null" - ], - "properties": { - "n": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "ve": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "la": { - "description": "Name and version of the programming language used", - "type": [ - "object", - "null" - ], - "properties": { - "n": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "ve": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "n": { - "description": "Immutable name of the service emitting this event", - "type": [ - "string", - "null" - ], - "pattern": "^[a-zA-Z0-9 _-]+$", - "maxLength": 1024 - }, - "en": { - "description": "Environment name of the service, e.g. \"production\" or \"staging\"", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "ru": { - "description": "Name and version of the language runtime running this service", - "type": [ - "object", - "null" - ], - "properties": { - "n": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "ve": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "ve": { - "description": "Version of the service emitting this event", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } -} diff --git a/docs/spec/rum_v3_stacktrace_frame.json b/docs/spec/rum_v3_stacktrace_frame.json deleted file mode 100644 index e11f9cb868b..00000000000 --- a/docs/spec/rum_v3_stacktrace_frame.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "$id": "docs/spec/rum_v3_stacktrace_frame.json", - "title": "Stacktrace", - "type": "object", - "description": "A stacktrace frame, contains various bits (most optional) describing the context of the frame", - "properties": { - "ap": { - "description": "The absolute path of the file involved in the stack frame", - "type": [ - "string", - "null" - ] - }, - "co": { - "description": "Column number", - "type": [ - "integer", - "null" - ] - }, - "cli": { - "description": "The line of code part of the stack frame", - "type": [ - "string", - "null" - ] - }, - "f": { - "description": "The relative filename of the code involved in the stack frame, used e.g. to do error checksumming", - "type": [ - "string", - "null" - ] - }, - "cn": { - "description": "The classname of the code involved in the stack frame", - "type": [ - "string", - "null" - ] - }, - "fn": { - "description": "The function involved in the stack frame", - "type": [ - "string", - "null" - ] - }, - "li": { - "description": "The line number of code part of the stack frame, used e.g. to do error checksumming", - "type": [ - "integer", - "null" - ] - }, - "mo": { - "description": "The module to which frame belongs to", - "type": [ - "string", - "null" - ] - }, - "poc": { - "description": "The lines of code after the stack frame", - "type": [ - "array", - "null" - ], - "minItems": 0, - "items": { - "type": "string" - } - }, - "prc": { - "description": "The lines of code before the stack frame", - "type": [ - "array", - "null" - ], - "minItems": 0, - "items": { - "type": "string" - } - } - }, - "required": [ - "f" - ] -} \ No newline at end of file diff --git a/docs/spec/rum_v3_user.json b/docs/spec/rum_v3_user.json deleted file mode 100644 index cf71104f2d5..00000000000 --- a/docs/spec/rum_v3_user.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "$id": "docs/spec/rum_v3_user.json", - "title": "User", - "type": [ - "object", - "null" - ], - "properties": { - "id": { - "description": "Identifier of the logged in user, e.g. the primary key of the user", - "type": [ - "string", - "integer", - "null" - ], - "maxLength": 1024 - }, - "em": { - "description": "Email of the logged in user", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "un": { - "description": "The username of the logged in user", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } -} \ No newline at end of file diff --git a/docs/spec/rumv3/error.json b/docs/spec/rumv3/error.json new file mode 100644 index 00000000000..7a6ab9fb3f8 --- /dev/null +++ b/docs/spec/rumv3/error.json @@ -0,0 +1,839 @@ +{ + "$id": "docs/spec/rumv3/error", + "type": "object", + "properties": { + "c": { + "description": "Context holds arbitrary contextual information for the event.", + "type": [ + "null", + "object" + ], + "properties": { + "cu": { + "description": "Custom can contain additional metadata to be stored with the event. The format is unspecified and can be deeply nested objects. The information will not be indexed or searchable in Elasticsearch.", + "type": [ + "null", + "object" + ] + }, + "g": { + "description": "Tags are a flat mapping of user-defined tags. Allowed value types are string, boolean and number values. Tags are indexed and searchable.", + "type": [ + "null", + "object" + ], + "additionalProperties": { + "type": [ + "null", + "string", + "boolean", + "number" + ], + "maxLength": 1024 + } + }, + "p": { + "description": "Page holds information related to the current page and page referers. It is only sent from RUM agents.", + "type": [ + "null", + "object" + ], + "properties": { + "rf": { + "description": "Referer holds the URL of the page that 'linked' to the current page.", + "type": [ + "null", + "string" + ] + }, + "url": { + "description": "URL of the current page", + "type": [ + "null", + "string" + ] + } + } + }, + "q": { + "description": "Request describes the HTTP request information in case the event was created as a result of an HTTP request.", + "type": [ + "null", + "object" + ], + "properties": { + "en": { + "description": "Env holds environment variable information passed to the monitored service.", + "type": [ + "null", + "object" + ] + }, + "he": { + "description": "Headers includes any HTTP headers sent by the requester. Cookies will be taken by headers if supplied.", + "type": [ + "null", + "object" + ], + "additionalProperties": false, + "patternProperties": { + "[.*]*$": { + "type": [ + "null", + "array", + "string" + ], + "items": { + "type": "string" + } + } + } + }, + "hve": { + "description": "HTTPVersion holds information about the used HTTP version.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "mt": { + "description": "Method holds information about the method of the HTTP request.", + "type": "string", + "maxLength": 1024 + } + }, + "required": [ + "mt" + ] + }, + "r": { + "description": "Response describes the HTTP response information in case the event was created as a result of an HTTP request.", + "type": [ + "null", + "object" + ], + "properties": { + "dbs": { + "description": "DecodedBodySize holds the size of the decoded payload.", + "type": [ + "null", + "number" + ] + }, + "ebs": { + "description": "EncodedBodySize holds the size of the encoded payload.", + "type": [ + "null", + "number" + ] + }, + "he": { + "description": "Headers holds the http headers sent in the http response.", + "type": [ + "null", + "object" + ], + "additionalProperties": false, + "patternProperties": { + "[.*]*$": { + "type": [ + "null", + "array", + "string" + ], + "items": { + "type": "string" + } + } + } + }, + "sc": { + "description": "StatusCode sent in the http response.", + "type": [ + "null", + "integer" + ] + }, + "ts": { + "description": "TransferSize holds the total size of the payload.", + "type": [ + "null", + "number" + ] + } + } + }, + "se": { + "description": "Service related information can be sent per event. Information provided here will override the more generic information retrieved from metadata, missing service fields will be retrieved from the metadata information.", + "type": [ + "null", + "object" + ], + "properties": { + "a": { + "description": "Agent holds information about the APM agent capturing the event.", + "type": [ + "null", + "object" + ], + "properties": { + "n": { + "description": "Name of the APM agent capturing information.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "ve": { + "description": "Version of the APM agent capturing information.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "en": { + "description": "Environment in which the monitored service is running, e.g. `production` or `staging`.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "fw": { + "description": "Framework holds information about the framework used in the monitored service.", + "type": [ + "null", + "object" + ], + "properties": { + "n": { + "description": "Name of the used framework", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "ve": { + "description": "Version of the used framework", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "la": { + "description": "Language holds information about the programming language of the monitored service.", + "type": [ + "null", + "object" + ], + "properties": { + "n": { + "description": "Name of the used programming language", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "ve": { + "description": "Version of the used programming language", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "n": { + "description": "Name of the monitored service.", + "type": [ + "null", + "string" + ], + "maxLength": 1024, + "pattern": "^[a-zA-Z0-9 _-]+$" + }, + "ru": { + "description": "Runtime holds information about the language runtime running the monitored service", + "type": [ + "null", + "object" + ], + "properties": { + "n": { + "description": "Name of the language runtime", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "ve": { + "description": "Version of the language runtime", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "ve": { + "description": "Version of the monitored service.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "u": { + "description": "User holds information about the correlated user for this event. If user data are provided here, all user related information from metadata is ignored, otherwise the metadata's user information will be stored with the event.", + "type": [ + "null", + "object" + ], + "properties": { + "em": { + "description": "Email of the user.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "id": { + "description": "ID identifies the logged in user, e.g. can be the primary key of the user", + "type": [ + "null", + "string", + "integer" + ], + "maxLength": 1024 + }, + "ud": { + "description": "Domain of the user", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "un": { + "description": "Name of the user.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + } + } + }, + "cl": { + "description": "Culprit identifies the function call which was the primary perpetrator of this event.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "ex": { + "description": "Exception holds information about the original error. The information is language specific.", + "type": [ + "null", + "object" + ], + "properties": { + "at": { + "description": "Attributes of the exception.", + "type": [ + "null", + "object" + ] + }, + "ca": { + "description": "Cause can hold a collection of error exceptions representing chained exceptions. The chain starts with the outermost exception, followed by its cause, and so on.", + "type": [ + "null", + "array" + ], + "items": { + "type": "object" + }, + "minItems": 0 + }, + "cd": { + "description": "Code that is set when the error happened, e.g. database error code.", + "type": [ + "null", + "string", + "integer" + ], + "maxLength": 1024 + }, + "hd": { + "description": "Handled indicates whether the error was caught in the code or not.", + "type": [ + "null", + "boolean" + ] + }, + "mg": { + "description": "Message contains the originally captured error message.", + "type": [ + "null", + "string" + ] + }, + "mo": { + "description": "Module describes the exception type's module namespace.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "st": { + "description": "Stacktrace information of the captured exception.", + "type": [ + "null", + "array" + ], + "items": { + "type": "object", + "properties": { + "ap": { + "description": "AbsPath is the absolute path of the frame's file.", + "type": [ + "null", + "string" + ] + }, + "cli": { + "description": "ContextLine is the line from the frame's file.", + "type": [ + "null", + "string" + ] + }, + "cn": { + "description": "Classname of the frame.", + "type": [ + "null", + "string" + ] + }, + "co": { + "description": "ColumnNumber of the frame.", + "type": [ + "null", + "integer" + ] + }, + "f": { + "description": "Filename is the relative name of the frame's file.", + "type": "string" + }, + "fn": { + "description": "Function represented by the frame.", + "type": [ + "null", + "string" + ] + }, + "li": { + "description": "LineNumber of the frame.", + "type": [ + "null", + "integer" + ] + }, + "mo": { + "description": "Module to which the frame belongs to.", + "type": [ + "null", + "string" + ] + }, + "poc": { + "description": "PostContext is a slice of code lines immediately before the line from the frame's file.", + "type": [ + "null", + "array" + ], + "items": { + "type": "string" + }, + "minItems": 0 + }, + "prc": { + "description": "PreContext is a slice of code lines immediately after the line from the frame's file.", + "type": [ + "null", + "array" + ], + "items": { + "type": "string" + }, + "minItems": 0 + } + }, + "required": [ + "f" + ] + }, + "minItems": 0 + }, + "t": { + "description": "Type of the exception.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + }, + "anyOf": [ + { + "properties": { + "mg": { + "type": "string" + } + }, + "required": [ + "mg" + ] + }, + { + "properties": { + "t": { + "type": "string" + } + }, + "required": [ + "t" + ] + } + ] + }, + "id": { + "description": "ID holds the hex encoded 128 random bits ID of the event.", + "type": "string", + "maxLength": 1024 + }, + "log": { + "description": "Log holds additional information added when the error is logged.", + "type": [ + "null", + "object" + ], + "properties": { + "ln": { + "description": "LoggerName holds the name of the used logger instance.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "lv": { + "description": "Level represents the severity of the recorded log.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "mg": { + "description": "Message of the logged error. In case a parameterized message is captured, Message should contain the same information, but with any placeholders being replaced.", + "type": "string" + }, + "pmg": { + "description": "ParamMessage should contain the same information as Message, but with placeholders where parameters were logged, e.g. 'error connecting to %s'. The string is not interpreted, allowing differnt placeholders per client languange. The information might be used to group errors together.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "st": { + "description": "Stacktrace information of the captured error.", + "type": [ + "null", + "array" + ], + "items": { + "type": "object", + "properties": { + "ap": { + "description": "AbsPath is the absolute path of the frame's file.", + "type": [ + "null", + "string" + ] + }, + "cli": { + "description": "ContextLine is the line from the frame's file.", + "type": [ + "null", + "string" + ] + }, + "cn": { + "description": "Classname of the frame.", + "type": [ + "null", + "string" + ] + }, + "co": { + "description": "ColumnNumber of the frame.", + "type": [ + "null", + "integer" + ] + }, + "f": { + "description": "Filename is the relative name of the frame's file.", + "type": "string" + }, + "fn": { + "description": "Function represented by the frame.", + "type": [ + "null", + "string" + ] + }, + "li": { + "description": "LineNumber of the frame.", + "type": [ + "null", + "integer" + ] + }, + "mo": { + "description": "Module to which the frame belongs to.", + "type": [ + "null", + "string" + ] + }, + "poc": { + "description": "PostContext is a slice of code lines immediately before the line from the frame's file.", + "type": [ + "null", + "array" + ], + "items": { + "type": "string" + }, + "minItems": 0 + }, + "prc": { + "description": "PreContext is a slice of code lines immediately after the line from the frame's file.", + "type": [ + "null", + "array" + ], + "items": { + "type": "string" + }, + "minItems": 0 + } + }, + "required": [ + "f" + ] + }, + "minItems": 0 + } + }, + "required": [ + "mg" + ] + }, + "pid": { + "description": "ParentID holds the hex encoded 64 random bits ID of the parent transaction or span.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "tid": { + "description": "TraceID holds the hex encoded 128 random bits ID of the correlated trace.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "timestamp": { + "description": "Timestamp holds the recorded time of the event, UTC based and formatted as microseconds since Unix epoch.", + "type": [ + "null", + "integer" + ] + }, + "x": { + "description": "Transaction holds information about the correlated transaction.", + "type": [ + "null", + "object" + ], + "properties": { + "sm": { + "description": "Sampled indicates whether or not the full information for a transaction is captured. If a transaction is unsampled no spans and less context information will be reported.", + "type": [ + "null", + "boolean" + ] + }, + "t": { + "description": "Type expresses the correlated transaction's type as keyword that has specific relevance within the service's domain, eg: 'request', 'backgroundjob'.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "xid": { + "description": "TransactionID holds the hex encoded 64 random bits ID of the correlated transaction.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + }, + "required": [ + "id" + ], + "allOf": [ + { + "if": { + "properties": { + "xid": { + "type": "string" + } + }, + "required": [ + "xid" + ] + }, + "then": { + "properties": { + "pid": { + "type": "string" + } + }, + "required": [ + "pid" + ] + } + }, + { + "if": { + "properties": { + "tid": { + "type": "string" + } + }, + "required": [ + "tid" + ] + }, + "then": { + "properties": { + "pid": { + "type": "string" + } + }, + "required": [ + "pid" + ] + } + }, + { + "if": { + "properties": { + "xid": { + "type": "string" + } + }, + "required": [ + "xid" + ] + }, + "then": { + "properties": { + "tid": { + "type": "string" + } + }, + "required": [ + "tid" + ] + } + }, + { + "if": { + "properties": { + "pid": { + "type": "string" + } + }, + "required": [ + "pid" + ] + }, + "then": { + "properties": { + "tid": { + "type": "string" + } + }, + "required": [ + "tid" + ] + } + } + ], + "anyOf": [ + { + "properties": { + "ex": { + "type": "object" + } + }, + "required": [ + "ex" + ] + }, + { + "properties": { + "log": { + "type": "object" + } + }, + "required": [ + "log" + ] + } + ] +} \ No newline at end of file diff --git a/docs/spec/rumv3/metadata.json b/docs/spec/rumv3/metadata.json new file mode 100644 index 00000000000..876ac03a6df --- /dev/null +++ b/docs/spec/rumv3/metadata.json @@ -0,0 +1,218 @@ +{ + "$id": "docs/spec/rumv3/metadata", + "type": "object", + "properties": { + "l": { + "description": "Labels are a flat mapping of user-defined tags. Allowed value types are string, boolean and number values. Labels are indexed and searchable.", + "type": [ + "null", + "object" + ], + "additionalProperties": { + "type": [ + "null", + "string", + "boolean", + "number" + ], + "maxLength": 1024 + } + }, + "n": { + "description": "Network holds information about the network over which the monitored service is communicating.", + "type": [ + "null", + "object" + ], + "properties": { + "c": { + "type": [ + "null", + "object" + ], + "properties": { + "t": { + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + } + } + }, + "se": { + "description": "Service metadata about the monitored service.", + "type": "object", + "properties": { + "a": { + "description": "Agent holds information about the APM agent capturing the event.", + "type": "object", + "properties": { + "n": { + "description": "Name of the APM agent capturing information.", + "type": "string", + "maxLength": 1024, + "minLength": 1 + }, + "ve": { + "description": "Version of the APM agent capturing information.", + "type": "string", + "maxLength": 1024 + } + }, + "required": [ + "n", + "ve" + ] + }, + "en": { + "description": "Environment in which the monitored service is running, e.g. `production` or `staging`.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "fw": { + "description": "Framework holds information about the framework used in the monitored service.", + "type": [ + "null", + "object" + ], + "properties": { + "n": { + "description": "Name of the used framework", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "ve": { + "description": "Version of the used framework", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "la": { + "description": "Language holds information about the programming language of the monitored service.", + "type": [ + "null", + "object" + ], + "properties": { + "n": { + "description": "Name of the used programming language", + "type": "string", + "maxLength": 1024 + }, + "ve": { + "description": "Version of the used programming language", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + }, + "required": [ + "n" + ] + }, + "n": { + "description": "Name of the monitored service.", + "type": "string", + "maxLength": 1024, + "minLength": 1, + "pattern": "^[a-zA-Z0-9 _-]+$" + }, + "ru": { + "description": "Runtime holds information about the language runtime running the monitored service", + "type": [ + "null", + "object" + ], + "properties": { + "n": { + "description": "Name of the language runtime", + "type": "string", + "maxLength": 1024 + }, + "ve": { + "description": "Name of the language runtime", + "type": "string", + "maxLength": 1024 + } + }, + "required": [ + "n", + "ve" + ] + }, + "ve": { + "description": "Version of the monitored service.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + }, + "required": [ + "a", + "n" + ] + }, + "u": { + "description": "User metadata, which can be overwritten on a per event basis.", + "type": [ + "null", + "object" + ], + "properties": { + "em": { + "description": "Email of the user.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "id": { + "description": "ID identifies the logged in user, e.g. can be the primary key of the user", + "type": [ + "null", + "string", + "integer" + ], + "maxLength": 1024 + }, + "ud": { + "description": "Domain of the user", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "un": { + "description": "Name of the user.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + } + }, + "required": [ + "se" + ] +} \ No newline at end of file diff --git a/docs/spec/rumv3/span.json b/docs/spec/rumv3/span.json new file mode 100644 index 00000000000..a99ff889db4 --- /dev/null +++ b/docs/spec/rumv3/span.json @@ -0,0 +1,364 @@ +{ + "$id": "docs/spec/rumv3/span", + "type": "object", + "properties": { + "ac": { + "description": "Action holds the specific kind of event within the sub-type represented by the span (e.g. query, connect)", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "c": { + "description": "Context holds arbitrary contextual information for the event.", + "type": [ + "null", + "object" + ], + "properties": { + "dt": { + "description": "Destination contains contextual data about the destination of spans", + "type": [ + "null", + "object" + ], + "properties": { + "ad": { + "description": "Address is the destination network address: hostname (e.g. 'localhost'), FQDN (e.g. 'elastic.co'), IPv4 (e.g. '127.0.0.1') IPv6 (e.g. '::1')", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "po": { + "description": "Port is the destination network port (e.g. 443)", + "type": [ + "null", + "integer" + ] + }, + "se": { + "description": "Service describes the destination service", + "type": [ + "null", + "object" + ], + "properties": { + "n": { + "description": "Name is the identifier for the destination service, e.g. 'http://elastic.co', 'elasticsearch', 'rabbitmq' DEPRECATED: this field will be removed in a future release", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "rc": { + "description": "Resource identifies the destination service resource being operated on e.g. 'http://elastic.co:80', 'elasticsearch', 'rabbitmq/queue_name'", + "type": "string", + "maxLength": 1024 + }, + "t": { + "description": "Type of the destination service, e.g. db, elasticsearch. Should typically be the same as span.type. DEPRECATED: this field will be removed in a future release", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + }, + "required": [ + "rc" + ] + } + } + }, + "g": { + "description": "Tags are a flat mapping of user-defined tags. Allowed value types are string, boolean and number values. Tags are indexed and searchable.", + "type": [ + "null", + "object" + ], + "additionalProperties": { + "type": [ + "null", + "string", + "boolean", + "number" + ], + "maxLength": 1024 + } + }, + "h": { + "description": "HTTP contains contextual information when the span concerns an HTTP request.", + "type": [ + "null", + "object" + ], + "properties": { + "mt": { + "description": "Method holds information about the method of the HTTP request.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "r": { + "description": "Response describes the HTTP response information in case the event was created as a result of an HTTP request.", + "type": [ + "null", + "object" + ], + "properties": { + "dbs": { + "description": "DecodedBodySize holds the size of the decoded payload.", + "type": [ + "null", + "number" + ] + }, + "ebs": { + "description": "EncodedBodySize holds the size of the encoded payload.", + "type": [ + "null", + "number" + ] + }, + "ts": { + "description": "TransferSize holds the total size of the payload.", + "type": [ + "null", + "number" + ] + } + } + }, + "sc": { + "description": "Deprecated: Use Response.StatusCode instead. StatusCode sent in the http response.", + "type": [ + "null", + "integer" + ] + }, + "url": { + "description": "URL is the raw url of the correlating HTTP request.", + "type": [ + "null", + "string" + ] + } + } + }, + "se": { + "description": "Service related information can be sent per span. Information provided here will override the more generic information retrieved from metadata, missing service fields will be retrieved from the metadata information.", + "type": [ + "null", + "object" + ], + "properties": { + "a": { + "description": "Agent holds information about the APM agent capturing the event.", + "type": [ + "null", + "object" + ], + "properties": { + "n": { + "description": "Name of the APM agent capturing information.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "ve": { + "description": "Version of the APM agent capturing information.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "n": { + "description": "Name of the monitored service.", + "type": [ + "null", + "string" + ], + "maxLength": 1024, + "pattern": "^[a-zA-Z0-9 _-]+$" + } + } + } + } + }, + "d": { + "description": "Duration of the span in milliseconds", + "type": "number", + "minimum": 0 + }, + "id": { + "description": "ID holds the hex encoded 64 random bits ID of the event.", + "type": "string", + "maxLength": 1024 + }, + "n": { + "description": "Name is the generic designation of a span in the scope of a transaction.", + "type": "string", + "maxLength": 1024 + }, + "o": { + "description": "Outcome of the span: success, failure, or unknown. Outcome may be one of a limited set of permitted values describing the success or failure of the span. It can be used for calculating error rates for outgoing requests.", + "type": [ + "null", + "string" + ], + "enum": [ + "success", + "failure", + "unknown", + null + ] + }, + "pi": { + "description": "ParentIndex is the index of the parent span in the list. Absent when the parent is a transaction.", + "type": [ + "null", + "integer" + ] + }, + "s": { + "description": "Start is the offset relative to the transaction's timestamp identifying the start of the span, in milliseconds.", + "type": "number" + }, + "sr": { + "description": "SampleRate applied to the monitored service at the time where this span was recorded.", + "type": [ + "null", + "number" + ] + }, + "st": { + "description": "Stacktrace connected to this span event.", + "type": [ + "null", + "array" + ], + "items": { + "type": "object", + "properties": { + "ap": { + "description": "AbsPath is the absolute path of the frame's file.", + "type": [ + "null", + "string" + ] + }, + "cli": { + "description": "ContextLine is the line from the frame's file.", + "type": [ + "null", + "string" + ] + }, + "cn": { + "description": "Classname of the frame.", + "type": [ + "null", + "string" + ] + }, + "co": { + "description": "ColumnNumber of the frame.", + "type": [ + "null", + "integer" + ] + }, + "f": { + "description": "Filename is the relative name of the frame's file.", + "type": "string" + }, + "fn": { + "description": "Function represented by the frame.", + "type": [ + "null", + "string" + ] + }, + "li": { + "description": "LineNumber of the frame.", + "type": [ + "null", + "integer" + ] + }, + "mo": { + "description": "Module to which the frame belongs to.", + "type": [ + "null", + "string" + ] + }, + "poc": { + "description": "PostContext is a slice of code lines immediately before the line from the frame's file.", + "type": [ + "null", + "array" + ], + "items": { + "type": "string" + }, + "minItems": 0 + }, + "prc": { + "description": "PreContext is a slice of code lines immediately after the line from the frame's file.", + "type": [ + "null", + "array" + ], + "items": { + "type": "string" + }, + "minItems": 0 + } + }, + "required": [ + "f" + ] + }, + "minItems": 0 + }, + "su": { + "description": "Subtype is a further sub-division of the type (e.g. postgresql, elasticsearch)", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "sy": { + "description": "Sync indicates whether the span was executed synchronously or asynchronously.", + "type": [ + "null", + "boolean" + ] + }, + "t": { + "description": "Type holds the span's type, and can have specific keywords within the service's domain (eg: 'request', 'backgroundjob', etc)", + "type": "string", + "maxLength": 1024 + } + }, + "required": [ + "d", + "id", + "n", + "s", + "t" + ] +} \ No newline at end of file diff --git a/docs/spec/rumv3/transaction.json b/docs/spec/rumv3/transaction.json new file mode 100644 index 00000000000..83752a04b8c --- /dev/null +++ b/docs/spec/rumv3/transaction.json @@ -0,0 +1,1047 @@ +{ + "$id": "docs/spec/rumv3/transaction", + "type": "object", + "properties": { + "c": { + "description": "Context holds arbitrary contextual information for the event.", + "type": [ + "null", + "object" + ], + "properties": { + "cu": { + "description": "Custom can contain additional metadata to be stored with the event. The format is unspecified and can be deeply nested objects. The information will not be indexed or searchable in Elasticsearch.", + "type": [ + "null", + "object" + ] + }, + "g": { + "description": "Tags are a flat mapping of user-defined tags. Allowed value types are string, boolean and number values. Tags are indexed and searchable.", + "type": [ + "null", + "object" + ], + "additionalProperties": { + "type": [ + "null", + "string", + "boolean", + "number" + ], + "maxLength": 1024 + } + }, + "p": { + "description": "Page holds information related to the current page and page referers. It is only sent from RUM agents.", + "type": [ + "null", + "object" + ], + "properties": { + "rf": { + "description": "Referer holds the URL of the page that 'linked' to the current page.", + "type": [ + "null", + "string" + ] + }, + "url": { + "description": "URL of the current page", + "type": [ + "null", + "string" + ] + } + } + }, + "q": { + "description": "Request describes the HTTP request information in case the event was created as a result of an HTTP request.", + "type": [ + "null", + "object" + ], + "properties": { + "en": { + "description": "Env holds environment variable information passed to the monitored service.", + "type": [ + "null", + "object" + ] + }, + "he": { + "description": "Headers includes any HTTP headers sent by the requester. Cookies will be taken by headers if supplied.", + "type": [ + "null", + "object" + ], + "additionalProperties": false, + "patternProperties": { + "[.*]*$": { + "type": [ + "null", + "array", + "string" + ], + "items": { + "type": "string" + } + } + } + }, + "hve": { + "description": "HTTPVersion holds information about the used HTTP version.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "mt": { + "description": "Method holds information about the method of the HTTP request.", + "type": "string", + "maxLength": 1024 + } + }, + "required": [ + "mt" + ] + }, + "r": { + "description": "Response describes the HTTP response information in case the event was created as a result of an HTTP request.", + "type": [ + "null", + "object" + ], + "properties": { + "dbs": { + "description": "DecodedBodySize holds the size of the decoded payload.", + "type": [ + "null", + "number" + ] + }, + "ebs": { + "description": "EncodedBodySize holds the size of the encoded payload.", + "type": [ + "null", + "number" + ] + }, + "he": { + "description": "Headers holds the http headers sent in the http response.", + "type": [ + "null", + "object" + ], + "additionalProperties": false, + "patternProperties": { + "[.*]*$": { + "type": [ + "null", + "array", + "string" + ], + "items": { + "type": "string" + } + } + } + }, + "sc": { + "description": "StatusCode sent in the http response.", + "type": [ + "null", + "integer" + ] + }, + "ts": { + "description": "TransferSize holds the total size of the payload.", + "type": [ + "null", + "number" + ] + } + } + }, + "se": { + "description": "Service related information can be sent per event. Information provided here will override the more generic information retrieved from metadata, missing service fields will be retrieved from the metadata information.", + "type": [ + "null", + "object" + ], + "properties": { + "a": { + "description": "Agent holds information about the APM agent capturing the event.", + "type": [ + "null", + "object" + ], + "properties": { + "n": { + "description": "Name of the APM agent capturing information.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "ve": { + "description": "Version of the APM agent capturing information.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "en": { + "description": "Environment in which the monitored service is running, e.g. `production` or `staging`.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "fw": { + "description": "Framework holds information about the framework used in the monitored service.", + "type": [ + "null", + "object" + ], + "properties": { + "n": { + "description": "Name of the used framework", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "ve": { + "description": "Version of the used framework", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "la": { + "description": "Language holds information about the programming language of the monitored service.", + "type": [ + "null", + "object" + ], + "properties": { + "n": { + "description": "Name of the used programming language", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "ve": { + "description": "Version of the used programming language", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "n": { + "description": "Name of the monitored service.", + "type": [ + "null", + "string" + ], + "maxLength": 1024, + "pattern": "^[a-zA-Z0-9 _-]+$" + }, + "ru": { + "description": "Runtime holds information about the language runtime running the monitored service", + "type": [ + "null", + "object" + ], + "properties": { + "n": { + "description": "Name of the language runtime", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "ve": { + "description": "Version of the language runtime", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "ve": { + "description": "Version of the monitored service.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "u": { + "description": "User holds information about the correlated user for this event. If user data are provided here, all user related information from metadata is ignored, otherwise the metadata's user information will be stored with the event.", + "type": [ + "null", + "object" + ], + "properties": { + "em": { + "description": "Email of the user.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "id": { + "description": "ID identifies the logged in user, e.g. can be the primary key of the user", + "type": [ + "null", + "string", + "integer" + ], + "maxLength": 1024 + }, + "ud": { + "description": "Domain of the user", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "un": { + "description": "Name of the user.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + } + } + }, + "d": { + "description": "Duration how long the transaction took to complete, in milliseconds with 3 decimal points.", + "type": "number", + "minimum": 0 + }, + "exp": { + "description": "UserExperience holds metrics for measuring real user experience. This information is only sent by RUM agents.", + "type": [ + "null", + "object" + ], + "properties": { + "cls": { + "description": "CumulativeLayoutShift holds the Cumulative Layout Shift (CLS) metric value, or a negative value if CLS is unknown. See https://web.dev/cls/", + "type": [ + "null", + "number" + ], + "minimum": 0 + }, + "fid": { + "description": "FirstInputDelay holds the First Input Delay (FID) metric value, or a negative value if FID is unknown. See https://web.dev/fid/", + "type": [ + "null", + "number" + ], + "minimum": 0 + }, + "lt": { + "description": "Longtask holds longtask duration/count metrics.", + "type": [ + "null", + "object" + ], + "properties": { + "count": { + "description": "Count is the total number of of longtasks.", + "type": "integer", + "minimum": 0 + }, + "max": { + "description": "Max longtask duration", + "type": "number", + "minimum": 0 + }, + "sum": { + "description": "Sum of longtask durations", + "type": "number", + "minimum": 0 + } + }, + "required": [ + "count", + "max", + "sum" + ] + }, + "tbt": { + "description": "TotalBlockingTime holds the Total Blocking Time (TBT) metric value, or a negative value if TBT is unknown. See https://web.dev/tbt/", + "type": [ + "null", + "number" + ], + "minimum": 0 + } + } + }, + "id": { + "description": "ID holds the hex encoded 64 random bits ID of the event.", + "type": "string", + "maxLength": 1024 + }, + "k": { + "description": "Marks capture the timing of a significant event during the lifetime of a transaction. Marks are organized into groups and can be set by the user or the agent. Marks are only reported by RUM agents.", + "type": [ + "null", + "object" + ], + "additionalProperties": { + "type": [ + "null", + "object" + ], + "additionalProperties": { + "type": [ + "null", + "number" + ] + } + } + }, + "me": { + "description": "Metricsets is a collection metrics related to this transaction.", + "type": [ + "null", + "array" + ], + "items": { + "type": "object", + "properties": { + "sa": { + "description": "Samples hold application metrics collected from the agent.", + "type": "object", + "properties": { + "xbc": { + "description": "TransactionBreakdownCount The number of transactions for which breakdown metrics (span.self_time) have been created. As the Java agent tracks the breakdown for both sampled and non-sampled transactions, this metric is equivalent to transaction.duration.count", + "type": [ + "null", + "object" + ], + "properties": { + "v": { + "description": "Value holds the value of a single metric sample.", + "type": "number" + } + }, + "required": [ + "v" + ] + }, + "xdc": { + "description": "TransactionDurationCount is the number of transactions since the last report (the delta). The duration of transactions is tracked, which allows for the creation of graphs displaying a weighted average.", + "type": [ + "null", + "object" + ], + "properties": { + "v": { + "description": "Value holds the value of a single metric sample.", + "type": "number" + } + }, + "required": [ + "v" + ] + }, + "xds": { + "description": "TransactionDurationSum is the sum of all transactions durations in ms since the last report (the delta). The duration of transactions is tracked, which allows for the creation of graphs displaying a weighted average.", + "type": [ + "null", + "object" + ], + "properties": { + "v": { + "description": "Value holds the value of a single metric sample.", + "type": "number" + } + }, + "required": [ + "v" + ] + }, + "ysc": { + "description": "SpanSelfTimeCount holds the count of the related spans' self_time.", + "type": [ + "null", + "object" + ], + "properties": { + "v": { + "description": "Value holds the value of a single metric sample.", + "type": "number" + } + }, + "required": [ + "v" + ] + }, + "yss": { + "description": "SpanSelfTimeSum holds the sum of the related spans' self_time.", + "type": [ + "null", + "object" + ], + "properties": { + "v": { + "description": "Value holds the value of a single metric sample.", + "type": "number" + } + }, + "required": [ + "v" + ] + } + } + }, + "y": { + "description": "Span holds selected information about the correlated transaction.", + "type": [ + "null", + "object" + ], + "properties": { + "su": { + "description": "Subtype is a further sub-division of the type (e.g. postgresql, elasticsearch)", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "t": { + "description": "Type expresses the correlated span's type as keyword that has specific relevance within the service's domain, eg: 'request', 'backgroundjob'.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + } + }, + "required": [ + "sa" + ] + }, + "minItems": 0 + }, + "n": { + "description": "Name is the generic designation of a transaction in the scope of a single service, eg: 'GET /users/:id'.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "o": { + "description": "Outcome of the transaction with a limited set of permitted values, describing the success or failure of the transaction from the service's perspective. It is used for calculating error rates for incoming requests. Permitted values: success, failure, unknown.", + "type": [ + "null", + "string" + ], + "enum": [ + "success", + "failure", + "unknown", + null + ] + }, + "pid": { + "description": "ParentID holds the hex encoded 64 random bits ID of the parent transaction or span.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "rt": { + "description": "Result of the transaction. For HTTP-related transactions, this should be the status code formatted like 'HTTP 2xx'.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "ses": { + "description": "Session holds optional transaction session information for RUM.", + "type": [ + "null", + "object" + ], + "properties": { + "id": { + "description": "ID holds a session ID for grouping a set of related transactions.", + "type": "string" + }, + "seq": { + "description": "Sequence holds an optional sequence number for a transaction within a session. It is not meaningful to compare sequences across two different sessions.", + "type": [ + "null", + "integer" + ], + "minimum": 1 + } + }, + "required": [ + "id" + ] + }, + "sm": { + "description": "Sampled indicates whether or not the full information for a transaction is captured. If a transaction is unsampled no spans and less context information will be reported.", + "type": [ + "null", + "boolean" + ] + }, + "sr": { + "description": "SampleRate applied to the monitored service at the time where this transaction was recorded. Allowed values are [0..1]. A SampleRate \u003c1 indicates that not all spans are recorded.", + "type": [ + "null", + "number" + ] + }, + "t": { + "description": "Type expresses the transaction's type as keyword that has specific relevance within the service's domain, eg: 'request', 'backgroundjob'.", + "type": "string", + "maxLength": 1024 + }, + "tid": { + "description": "TraceID holds the hex encoded 128 random bits ID of the correlated trace.", + "type": "string", + "maxLength": 1024 + }, + "y": { + "description": "Spans is a collection of spans related to this transaction.", + "type": [ + "null", + "array" + ], + "items": { + "type": "object", + "properties": { + "ac": { + "description": "Action holds the specific kind of event within the sub-type represented by the span (e.g. query, connect)", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "c": { + "description": "Context holds arbitrary contextual information for the event.", + "type": [ + "null", + "object" + ], + "properties": { + "dt": { + "description": "Destination contains contextual data about the destination of spans", + "type": [ + "null", + "object" + ], + "properties": { + "ad": { + "description": "Address is the destination network address: hostname (e.g. 'localhost'), FQDN (e.g. 'elastic.co'), IPv4 (e.g. '127.0.0.1') IPv6 (e.g. '::1')", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "po": { + "description": "Port is the destination network port (e.g. 443)", + "type": [ + "null", + "integer" + ] + }, + "se": { + "description": "Service describes the destination service", + "type": [ + "null", + "object" + ], + "properties": { + "n": { + "description": "Name is the identifier for the destination service, e.g. 'http://elastic.co', 'elasticsearch', 'rabbitmq' DEPRECATED: this field will be removed in a future release", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "rc": { + "description": "Resource identifies the destination service resource being operated on e.g. 'http://elastic.co:80', 'elasticsearch', 'rabbitmq/queue_name'", + "type": "string", + "maxLength": 1024 + }, + "t": { + "description": "Type of the destination service, e.g. db, elasticsearch. Should typically be the same as span.type. DEPRECATED: this field will be removed in a future release", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + }, + "required": [ + "rc" + ] + } + } + }, + "g": { + "description": "Tags are a flat mapping of user-defined tags. Allowed value types are string, boolean and number values. Tags are indexed and searchable.", + "type": [ + "null", + "object" + ], + "additionalProperties": { + "type": [ + "null", + "string", + "boolean", + "number" + ], + "maxLength": 1024 + } + }, + "h": { + "description": "HTTP contains contextual information when the span concerns an HTTP request.", + "type": [ + "null", + "object" + ], + "properties": { + "mt": { + "description": "Method holds information about the method of the HTTP request.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "r": { + "description": "Response describes the HTTP response information in case the event was created as a result of an HTTP request.", + "type": [ + "null", + "object" + ], + "properties": { + "dbs": { + "description": "DecodedBodySize holds the size of the decoded payload.", + "type": [ + "null", + "number" + ] + }, + "ebs": { + "description": "EncodedBodySize holds the size of the encoded payload.", + "type": [ + "null", + "number" + ] + }, + "ts": { + "description": "TransferSize holds the total size of the payload.", + "type": [ + "null", + "number" + ] + } + } + }, + "sc": { + "description": "Deprecated: Use Response.StatusCode instead. StatusCode sent in the http response.", + "type": [ + "null", + "integer" + ] + }, + "url": { + "description": "URL is the raw url of the correlating HTTP request.", + "type": [ + "null", + "string" + ] + } + } + }, + "se": { + "description": "Service related information can be sent per span. Information provided here will override the more generic information retrieved from metadata, missing service fields will be retrieved from the metadata information.", + "type": [ + "null", + "object" + ], + "properties": { + "a": { + "description": "Agent holds information about the APM agent capturing the event.", + "type": [ + "null", + "object" + ], + "properties": { + "n": { + "description": "Name of the APM agent capturing information.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "ve": { + "description": "Version of the APM agent capturing information.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "n": { + "description": "Name of the monitored service.", + "type": [ + "null", + "string" + ], + "maxLength": 1024, + "pattern": "^[a-zA-Z0-9 _-]+$" + } + } + } + } + }, + "d": { + "description": "Duration of the span in milliseconds", + "type": "number", + "minimum": 0 + }, + "id": { + "description": "ID holds the hex encoded 64 random bits ID of the event.", + "type": "string", + "maxLength": 1024 + }, + "n": { + "description": "Name is the generic designation of a span in the scope of a transaction.", + "type": "string", + "maxLength": 1024 + }, + "o": { + "description": "Outcome of the span: success, failure, or unknown. Outcome may be one of a limited set of permitted values describing the success or failure of the span. It can be used for calculating error rates for outgoing requests.", + "type": [ + "null", + "string" + ], + "enum": [ + "success", + "failure", + "unknown", + null + ] + }, + "pi": { + "description": "ParentIndex is the index of the parent span in the list. Absent when the parent is a transaction.", + "type": [ + "null", + "integer" + ] + }, + "s": { + "description": "Start is the offset relative to the transaction's timestamp identifying the start of the span, in milliseconds.", + "type": "number" + }, + "sr": { + "description": "SampleRate applied to the monitored service at the time where this span was recorded.", + "type": [ + "null", + "number" + ] + }, + "st": { + "description": "Stacktrace connected to this span event.", + "type": [ + "null", + "array" + ], + "items": { + "type": "object", + "properties": { + "ap": { + "description": "AbsPath is the absolute path of the frame's file.", + "type": [ + "null", + "string" + ] + }, + "cli": { + "description": "ContextLine is the line from the frame's file.", + "type": [ + "null", + "string" + ] + }, + "cn": { + "description": "Classname of the frame.", + "type": [ + "null", + "string" + ] + }, + "co": { + "description": "ColumnNumber of the frame.", + "type": [ + "null", + "integer" + ] + }, + "f": { + "description": "Filename is the relative name of the frame's file.", + "type": "string" + }, + "fn": { + "description": "Function represented by the frame.", + "type": [ + "null", + "string" + ] + }, + "li": { + "description": "LineNumber of the frame.", + "type": [ + "null", + "integer" + ] + }, + "mo": { + "description": "Module to which the frame belongs to.", + "type": [ + "null", + "string" + ] + }, + "poc": { + "description": "PostContext is a slice of code lines immediately before the line from the frame's file.", + "type": [ + "null", + "array" + ], + "items": { + "type": "string" + }, + "minItems": 0 + }, + "prc": { + "description": "PreContext is a slice of code lines immediately after the line from the frame's file.", + "type": [ + "null", + "array" + ], + "items": { + "type": "string" + }, + "minItems": 0 + } + }, + "required": [ + "f" + ] + }, + "minItems": 0 + }, + "su": { + "description": "Subtype is a further sub-division of the type (e.g. postgresql, elasticsearch)", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "sy": { + "description": "Sync indicates whether the span was executed synchronously or asynchronously.", + "type": [ + "null", + "boolean" + ] + }, + "t": { + "description": "Type holds the span's type, and can have specific keywords within the service's domain (eg: 'request', 'backgroundjob', etc)", + "type": "string", + "maxLength": 1024 + } + }, + "required": [ + "d", + "id", + "n", + "s", + "t" + ] + }, + "minItems": 0 + }, + "yc": { + "description": "SpanCount counts correlated spans.", + "type": "object", + "properties": { + "dd": { + "description": "Dropped is the number of correlated spans that have been dropped by the APM agent recording the transaction.", + "type": [ + "null", + "integer" + ] + }, + "sd": { + "description": "Started is the number of correlated spans that are recorded.", + "type": "integer" + } + }, + "required": [ + "sd" + ] + } + }, + "required": [ + "d", + "id", + "yc", + "tid", + "t" + ] +} \ No newline at end of file diff --git a/docs/spec/service.json b/docs/spec/service.json deleted file mode 100644 index 163d3199350..00000000000 --- a/docs/spec/service.json +++ /dev/null @@ -1,96 +0,0 @@ -{ - "$id": "docs/spec/service.json", - "title": "Service", - "type": ["object", "null"], - "properties": { - "agent": { - "description": "Name and version of the Elastic APM agent", - "type": ["object", "null"], - "properties": { - "name": { - "description": "Name of the Elastic APM agent, e.g. \"Python\"", - "type": ["string", "null"], - "maxLength": 1024 - }, - "version": { - "description": "Version of the Elastic APM agent, e.g.\"1.0.0\"", - "type": ["string", "null"], - "maxLength": 1024 - }, - "ephemeral_id": { - "description": "Free format ID used for metrics correlation by some agents", - "type": ["string", "null"], - "maxLength": 1024 - } - } - }, - "framework": { - "description": "Name and version of the web framework used", - "type": ["object", "null"], - "properties": { - "name": { - "type": ["string", "null"], - "maxLength": 1024 - }, - "version": { - "type": ["string", "null"], - "maxLength": 1024 - } - } - }, - "language": { - "description": "Name and version of the programming language used", - "type": ["object", "null"], - "properties": { - "name": { - "type": ["string", "null"], - "maxLength": 1024 - }, - "version": { - "type": ["string", "null"], - "maxLength": 1024 - } - } - }, - "name": { - "description": "Immutable name of the service emitting this event", - "type": ["string", "null"], - "pattern": "^[a-zA-Z0-9 _-]+$", - "maxLength": 1024 - }, - "environment": { - "description": "Environment name of the service, e.g. \"production\" or \"staging\"", - "type": ["string", "null"], - "maxLength": 1024 - }, - "runtime": { - "description": "Name and version of the language runtime running this service", - "type": ["object", "null"], - "properties": { - "name": { - "type": ["string", "null"], - "maxLength": 1024 - }, - "version": { - "type": ["string", "null"], - "maxLength": 1024 - } - } - }, - "version": { - "description": "Version of the service emitting this event", - "type": ["string", "null"], - "maxLength": 1024 - }, - "node": { - "description": "Unique meaningful name of the service node.", - "type": ["object", "null"], - "properties": { - "configured_name": { - "type": ["string", "null"], - "maxLength": 1024 - } - } - } - } -} diff --git a/docs/spec/sourcemaps/payload.json b/docs/spec/sourcemaps/payload.json deleted file mode 100644 index f1a57945b73..00000000000 --- a/docs/spec/sourcemaps/payload.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "$id": "docs/spec/sourcemaps/sourcemap-metadata.json", - "title": "Sourcemap Metadata", - "description": "Sourcemap Metadata", - "type": "object", - "properties": { - "bundle_filepath": { - "description": "relative path of the minified bundle file", - "type": "string", - "maxLength": 1024, - "minLength": 1 - }, - "service_version": { - "description": "Version of the service emitting this event", - "type": "string", - "maxLength": 1024, - "minLength": 1 - }, - "service_name": { - "description": "Immutable name of the service emitting this event", - "type": "string", - "pattern": "^[a-zA-Z0-9 _-]+$", - "maxLength": 1024, - "minLength": 1 - } - }, - "required": ["bundle_filepath", "service_name", "service_version"] -} diff --git a/docs/spec/span_subtype.json b/docs/spec/span_subtype.json deleted file mode 100644 index 93fc4594748..00000000000 --- a/docs/spec/span_subtype.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$id": "docs/spec/span_subtype.json", - "title": "Span Subtype", - "type": ["object"], - "properties": { - "subtype": { - "type": ["string", "null"], - "description": "A further sub-division of the type (e.g. postgresql, elasticsearch)", - "maxLength": 1024 - } - } -} diff --git a/docs/spec/span_type.json b/docs/spec/span_type.json deleted file mode 100644 index 772058add89..00000000000 --- a/docs/spec/span_type.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$id": "docs/spec/span_type.json", - "title": "Span Type", - "type": ["object"], - "properties": { - "type": { - "type": "string", - "description": "Keyword of specific relevance in the service's domain (eg: 'db.postgresql.query', 'template.erb', etc)", - "maxLength": 1024 - } - } -} diff --git a/docs/spec/spans/rum_v3_span.json b/docs/spec/spans/rum_v3_span.json deleted file mode 100644 index 95da11b4c90..00000000000 --- a/docs/spec/spans/rum_v3_span.json +++ /dev/null @@ -1,244 +0,0 @@ -{ - "$id": "docs/spec/spans/rum_v3_span.json", - "description": "An event captured by an agent occurring in a monitored service", - "allOf": [ - { - "properties": { - "id": { - "description": "Hex encoded 64 random bits ID of the span.", - "type": "string", - "maxLength": 1024 - }, - "pi": { - "description": "Index of the parent span in the list. Absent when the parent is a transaction.", - "type": ["integer", "null"], - "maxLength": 1024 - }, - "s": { - "type": [ - "number", - "null" - ], - "description": "Offset relative to the transaction's timestamp identifying the start of the span, in milliseconds" - }, - "sr": { - "description": "Sampling rate", - "type": ["number", "null"] - }, - "t": { - "type": "string", - "description": "Keyword of specific relevance in the service's domain (eg: 'db.postgresql.query', 'template.erb', etc)", - "maxLength": 1024 - }, - "su": { - "type": [ - "string", - "null" - ], - "description": "A further sub-division of the type (e.g. postgresql, elasticsearch)", - "maxLength": 1024 - }, - "ac": { - "type": [ - "string", - "null" - ], - "description": "The specific kind of event within the sub-type represented by the span (e.g. query, connect)", - "maxLength": 1024 - }, - "o": { - "$ref": "../outcome.json", - "description": "The outcome of the span: success, failure, or unknown. Outcome may be one of a limited set of permitted values describing the success or failure of the span. This field can be used for calculating error rates for outgoing requests." - }, - "c": { - "type": [ - "object", - "null" - ], - "description": "Any other arbitrary data captured by the agent, optionally provided by the user", - "properties": { - "dt": { - "type": [ - "object", - "null" - ], - "description": "An object containing contextual data about the destination for spans", - "properties": { - "ad": { - "type": [ - "string", - "null" - ], - "description": "Destination network address: hostname (e.g. 'localhost'), FQDN (e.g. 'elastic.co'), IPv4 (e.g. '127.0.0.1') or IPv6 (e.g. '::1')", - "maxLength": 1024 - }, - "po": { - "type": [ - "integer", - "null" - ], - "description": "Destination network port (e.g. 443)" - }, - "se": { - "description": "Destination service context", - "type": [ - "object", - "null" - ], - "properties": { - "t": { - "description": "Type of the destination service (e.g. 'db', 'elasticsearch'). Should typically be the same as span.type.", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "n": { - "description": "Identifier for the destination service (e.g. 'http://elastic.co', 'elasticsearch', 'rabbitmq')", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "rc": { - "description": "Identifier for the destination service resource being operated on (e.g. 'http://elastic.co:80', 'elasticsearch', 'rabbitmq/queue_name')", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - }, - "required": [ - "t", - "n", - "rc" - ] - } - } - }, - "h": { - "type": [ - "object", - "null" - ], - "description": "An object containing contextual data of the related http request.", - "properties": { - "url": { - "type": [ - "string", - "null" - ], - "description": "The raw url of the correlating http request." - }, - "sc": { - "type": [ - "integer", - "null" - ], - "description": "The status code of the http request." - }, - "mt": { - "type": [ - "string", - "null" - ], - "maxLength": 1024, - "description": "The method of the http request." - } - } - }, - "g": { - "$ref": "../tags.json" - }, - "se": { - "description": "Service related information can be sent per event. Provided information will override the more generic information from metadata, non provided fields will be set according to the metadata information.", - "properties": { - "a": { - "description": "Name and version of the Elastic APM agent", - "type": [ - "object", - "null" - ], - "properties": { - "n": { - "description": "Name of the Elastic APM agent, e.g. \"Python\"", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "ve": { - "description": "Version of the Elastic APM agent, e.g.\"1.0.0\"", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "n": { - "description": "Immutable name of the service emitting this event", - "type": [ - "string", - "null" - ], - "pattern": "^[a-zA-Z0-9 _-]+$", - "maxLength": 1024 - } - } - } - } - }, - "d": { - "type": "number", - "description": "Duration of the span in milliseconds", - "minimum": 0 - }, - "n": { - "type": "string", - "description": "Generic designation of a span in the scope of a transaction", - "maxLength": 1024 - }, - "st": { - "type": [ - "array", - "null" - ], - "description": "List of stack frames with variable attributes (eg: lineno, filename, etc)", - "items": { - "$ref": "../rum_v3_stacktrace_frame.json" - }, - "minItems": 0 - }, - "sy": { - "type": [ - "boolean", - "null" - ], - "description": "Indicates whether the span was executed synchronously or asynchronously." - } - }, - "required": [ - "d", - "n", - "t", - "id" - ] - }, - { - "required": [ - "s" - ], - "properties": { - "s": { - "type": "number" - } - } - } - ] -} diff --git a/docs/spec/spans/span.json b/docs/spec/spans/span.json deleted file mode 100644 index 9db6bea9ce2..00000000000 --- a/docs/spec/spans/span.json +++ /dev/null @@ -1,235 +0,0 @@ -{ - "$id": "docs/spec/spans/span.json", - "type": "object", - "description": "An event captured by an agent occurring in a monitored service", - "allOf": [ - { "$ref": "../timestamp_epoch.json" }, - { "$ref": "../span_type.json" }, - { "$ref": "../span_subtype.json" }, - { - "properties": { - "id": { - "description": "Hex encoded 64 random bits ID of the span.", - "type": "string", - "maxLength": 1024 - }, - "transaction_id": { - "type": ["string", "null"], - "description": "Hex encoded 64 random bits ID of the correlated transaction.", - "maxLength": 1024 - }, - "trace_id": { - "description": "Hex encoded 128 random bits ID of the correlated trace.", - "type": "string", - "maxLength": 1024 - }, - "parent_id": { - "description": "Hex encoded 64 random bits ID of the parent transaction or span.", - "type": "string", - "maxLength": 1024 - }, - "child_ids": { - "description": "List of successor transactions and/or spans.", - "type": ["array", "null"], - "minItems": 0, - "maxLength": 1024, - "items": { - "type": "string", - "maxLength": 1024 - } - }, - "start": { - "type": ["number", "null"], - "description": "Offset relative to the transaction's timestamp identifying the start of the span, in milliseconds" - }, - "sample_rate": { - "description": "Sampling rate", - "type": ["number", "null"] - }, - "action": { - "type": ["string", "null"], - "description": "The specific kind of event within the sub-type represented by the span (e.g. query, connect)", - "maxLength": 1024 - }, - "outcome": { - "$ref": "../outcome.json", - "description": "The outcome of the span: success, failure, or unknown. Outcome may be one of a limited set of permitted values describing the success or failure of the span. This field can be used for calculating error rates for outgoing requests." - }, - "context": { - "type": ["object", "null"], - "description": "Any other arbitrary data captured by the agent, optionally provided by the user", - "properties": { - "destination": { - "type": ["object", "null"], - "description": "An object containing contextual data about the destination for spans", - "properties": { - "address": { - "type": ["string", "null"], - "description": "Destination network address: hostname (e.g. 'localhost'), FQDN (e.g. 'elastic.co'), IPv4 (e.g. '127.0.0.1') or IPv6 (e.g. '::1')", - "maxLength": 1024 - }, - "port": { - "type": ["integer", "null"], - "description": "Destination network port (e.g. 443)" - }, - "service": { - "description": "Destination service context", - "type": ["object", "null"], - "properties": { - "type": { - "description": "Type of the destination service (e.g. 'db', 'elasticsearch'). Should typically be the same as span.type.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "name": { - "description": "Identifier for the destination service (e.g. 'http://elastic.co', 'elasticsearch', 'rabbitmq')", - "type": ["string", "null"], - "maxLength": 1024 - }, - "resource": { - "description": "Identifier for the destination service resource being operated on (e.g. 'http://elastic.co:80', 'elasticsearch', 'rabbitmq/queue_name')", - "type": ["string", "null"], - "maxLength": 1024 - } - }, - "required": ["type", "name", "resource"] - } - } - }, - "db": { - "type": ["object", "null"], - "description": "An object containing contextual data for database spans", - "properties": { - "instance": { - "type": ["string", "null"], - "description": "Database instance name" - }, - "link": { - "type": ["string", "null"], - "maxLength": 1024, - "description": "Database link" - }, - "statement": { - "type": ["string", "null"], - "description": "A database statement (e.g. query) for the given database type" - }, - "type": { - "type": ["string", "null"], - "description": "Database type. For any SQL database, \"sql\". For others, the lower-case database category, e.g. \"cassandra\", \"hbase\", or \"redis\"" - }, - "user": { - "type": ["string", "null"], - "description": "Username for accessing database" - }, - "rows_affected": { - "type": ["integer", "null"], - "description": "Number of rows affected by the SQL statement (if applicable)" - } - } - }, - "http": { - "type": ["object", "null"], - "description": "An object containing contextual data of the related http request.", - "properties": { - "url": { - "type": ["string", "null"], - "description": "The raw url of the correlating http request." - }, - "status_code": { - "type": ["integer", "null"], - "description": "Deprecated: Use span.context.http.response.status_code instead." - }, - "method": { - "type": ["string", "null"], - "maxLength": 1024, - "description": "The method of the http request." - }, - "response": { - "$ref": "../http_response.json" - } - } - }, - "tags": { - "$ref": "../tags.json" - }, - "service": { - "description": "Service related information can be sent per event. Provided information will override the more generic information from metadata, non provided fields will be set according to the metadata information.", - "properties": { - "agent": { - "description": "Name and version of the Elastic APM agent", - "type": [ - "object", - "null" - ], - "properties": { - "name": { - "description": "Name of the Elastic APM agent, e.g. \"Python\"", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "version": { - "description": "Version of the Elastic APM agent, e.g.\"1.0.0\"", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "ephemeral_id": { - "description": "Free format ID used for metrics correlation by some agents", - "type": ["string", "null"], - "maxLength": 1024 - } - } - }, - "name": { - "description": "Immutable name of the service emitting this event", - "type": [ - "string", - "null" - ], - "pattern": "^[a-zA-Z0-9 _-]+$", - "maxLength": 1024 - } - } - }, - "message": { - "$ref": "../message.json" - } - } - }, - "duration": { - "type": "number", - "description": "Duration of the span in milliseconds", - "minimum": 0 - }, - "name": { - "type": "string", - "description": "Generic designation of a span in the scope of a transaction", - "maxLength": 1024 - }, - "stacktrace": { - "type": ["array", "null"], - "description": "List of stack frames with variable attributes (eg: lineno, filename, etc)", - "items": { - "$ref": "../stacktrace_frame.json" - }, - "minItems": 0 - }, - "sync": { - "type": ["boolean", "null"], - "description": "Indicates whether the span was executed synchronously or asynchronously." - } - }, - "required": ["duration", "name", "type", "id","trace_id", "parent_id"] - }, - { "anyOf":[ - {"required": ["timestamp"], "properties": {"timestamp": { "type": "integer" }}}, - {"required": ["start"], "properties": {"start": { "type": "number" }}} - ] - } - ] -} diff --git a/docs/spec/stacktrace_frame.json b/docs/spec/stacktrace_frame.json deleted file mode 100644 index 59772329b8b..00000000000 --- a/docs/spec/stacktrace_frame.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "$id": "docs/spec/stacktrace_frame.json", - "title": "Stacktrace", - "type": "object", - "description": "A stacktrace frame, contains various bits (most optional) describing the context of the frame", - "properties": { - "abs_path": { - "description": "The absolute path of the file involved in the stack frame", - "type": ["string", "null"] - }, - "colno": { - "description": "Column number", - "type": ["integer", "null"] - }, - "context_line": { - "description": "The line of code part of the stack frame", - "type": ["string", "null"] - }, - "filename": { - "description": "The relative filename of the code involved in the stack frame, used e.g. to do error checksumming", - "type": ["string", "null"] - }, - "classname": { - "description": "The classname of the code involved in the stack frame", - "type": ["string", "null"] - }, - "function": { - "description": "The function involved in the stack frame", - "type": ["string", "null"] - }, - "library_frame": { - "description": "A boolean, indicating if this frame is from a library or user code", - "type": ["boolean", "null"] - }, - "lineno": { - "description": "The line number of code part of the stack frame, used e.g. to do error checksumming", - "type": ["integer", "null"] - }, - "module": { - "description": "The module to which frame belongs to", - "type": ["string", "null"] - }, - "post_context": { - "description": "The lines of code after the stack frame", - "type": ["array", "null"], - "minItems": 0, - "items": { - "type": "string" - } - }, - "pre_context": { - "description": "The lines of code before the stack frame", - "type": ["array", "null"], - "minItems": 0, - "items": { - "type": "string" - } - }, - "vars": { - "description": "Local variables for this stack frame", - "type": ["object", "null"], - "properties": {} - } - }, - "anyOf": [ - { "required": ["filename"], "properties": {"filename": { "type": "string" }} }, - { "required": ["classname"], "properties": {"classname": { "type": "string" }} } - ] -} diff --git a/docs/spec/system.json b/docs/spec/system.json deleted file mode 100644 index 892cc9e9960..00000000000 --- a/docs/spec/system.json +++ /dev/null @@ -1,74 +0,0 @@ -{ - "$id": "docs/spec/system.json", - "title": "System", - "type": ["object", "null"], - "properties": { - "architecture": { - "description": "Architecture of the system the agent is running on.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "hostname": { - "description": "Deprecated. Hostname of the system the agent is running on. Will be ignored if kubernetes information is set.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "detected_hostname": { - "description": "Hostname of the host the monitored service is running on. It normally contains what the hostname command returns on the host machine. Will be ignored if kubernetes information is set, otherwise should always be set.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "configured_hostname": { - "description": "Name of the host the monitored service is running on. It should only be set when configured by the user. If empty, will be set to detected_hostname or derived from kubernetes information if provided.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "platform": { - "description": "Name of the system platform the agent is running on.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "container": { - "properties": { - "id" : { - "description": "Container ID", - "type": ["string"], - "maxLength": 1024 - } - }, - "required": ["id"] - }, - "kubernetes": { - "properties": { - "namespace": { - "description": "Kubernetes namespace", - "type": ["string", "null"], - "maxLength": 1024 - }, - "pod":{ - "properties": { - "name": { - "description": "Kubernetes pod name", - "type": ["string", "null"], - "maxLength": 1024 - }, - "uid": { - "description": "Kubernetes pod uid", - "type": ["string", "null"], - "maxLength": 1024 - } - } - }, - "node":{ - "properties": { - "name": { - "description": "Kubernetes node name", - "type": ["string", "null"], - "maxLength": 1024 - } - } - } - } - } - } -} diff --git a/docs/spec/tags.json b/docs/spec/tags.json deleted file mode 100644 index ead09da3f7b..00000000000 --- a/docs/spec/tags.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "$id": "docs/spec/tags.json", - "title": "Tags", - "type": ["object", "null"], - "description": "A flat mapping of user-defined tags with string, boolean or number values.", - "patternProperties": { - "^[^.*\"]*$": { - "type": ["string", "boolean", "number", "null"], - "maxLength": 1024 - } - }, - "additionalProperties": false -} diff --git a/docs/spec/timestamp_epoch.json b/docs/spec/timestamp_epoch.json deleted file mode 100644 index 7e6aea39dd6..00000000000 --- a/docs/spec/timestamp_epoch.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$id": "docs/spec/timestamp_epoch.json", - "title": "Timestamp Epoch", - "description": "Object with 'timestamp' property.", - "type": ["object"], - "properties": { - "timestamp": { - "description": "Recorded time of the event, UTC based and formatted as microseconds since Unix epoch", - "type": ["integer", "null"] - } - } -} diff --git a/docs/spec/timestamp_rfc3339.json b/docs/spec/timestamp_rfc3339.json deleted file mode 100644 index f674c0d3397..00000000000 --- a/docs/spec/timestamp_rfc3339.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "$id": "docs/spec/timestamp_rfc3339.json", - "title": "Timestamp", - "description": "Used for '@timestamp' property.", - "type": ["object"], - "properties": { - "timestamp": { - "type": ["string", "null"], - "pattern": "Z$", - "format": "date-time", - "description": "Recorded time of the transaction, UTC based and formatted as YYYY-MM-DDTHH:mm:ss.sssZ" - } - } -} diff --git a/docs/spec/transaction_name.json b/docs/spec/transaction_name.json deleted file mode 100644 index 2ecb30e79ab..00000000000 --- a/docs/spec/transaction_name.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$id": "docs/spec/transaction_name.json", - "title": "Transaction Name", - "type": ["object"], - "properties": { - "name": { - "type": ["string","null"], - "description": "Generic designation of a transaction in the scope of a single service (eg: 'GET /users/:id')", - "maxLength": 1024 - } - } -} diff --git a/docs/spec/transaction_type.json b/docs/spec/transaction_type.json deleted file mode 100644 index c1de2586b94..00000000000 --- a/docs/spec/transaction_type.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$id": "docs/spec/transaction_type.json", - "title": "Transaction Type", - "type": ["object"], - "properties": { - "type": { - "type": "string", - "description": "Keyword of specific relevance in the service's domain (eg: 'request', 'backgroundjob', etc)", - "maxLength": 1024 - } - } -} diff --git a/docs/spec/transactions/mark.json b/docs/spec/transactions/mark.json deleted file mode 100644 index deee295beee..00000000000 --- a/docs/spec/transactions/mark.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "$id": "docs/spec/transactions/mark.json", - "type": ["object", "null"], - "description": "A mark captures the timing in milliseconds of a significant event during the lifetime of a transaction. Every mark is a simple key value pair, where the value has to be a number, and can be set by the user or the agent.", - "patternProperties": { - "^[^.*\"]*$": { - "type": ["number", "null"] - } - }, - "additionalProperties": false -} diff --git a/docs/spec/transactions/rum_v3_mark.json b/docs/spec/transactions/rum_v3_mark.json deleted file mode 100644 index 8237bdef61b..00000000000 --- a/docs/spec/transactions/rum_v3_mark.json +++ /dev/null @@ -1,107 +0,0 @@ -{ - "$id": "docs/spec/transactions/rum_v3_mark.json", - "type": ["object", "null"], - "description": "A mark captures the timing in milliseconds of a significant event during the lifetime of a transaction. Every mark is a simple key value pair, where the value has to be a number, and can be set by the user or the agent.", - "properties": { - "a": { - "type": ["object", "null"], - "description": "agent", - "properties": { - "dc": { - "type": ["number", "null"], - "description": "domComplete" - }, - "di": { - "type": ["number", "null"], - "description": "domInteractive" - }, - "ds": { - "type": ["number", "null"], - "description": "domContentLoadedEventStart" - }, - "de": { - "type": ["number", "null"], - "description": "domContentLoadedEventEnd" - }, - "fb": { - "type": ["number", "null"], - "description": "timeToFirstByte" - }, - "fp": { - "type": ["number", "null"], - "description": "firstContentfulPaint" - }, - "lp": { - "type": ["number", "null"], - "description": "largestContentfulPaint" - } - } - }, - "nt": { - "type": ["object", "null"], - "description": "navigation-timing", - "properties": { - "fs": { - "type": ["number", "null"], - "description": "fetchStart" - }, - "ls": { - "type": ["number", "null"], - "description": "domainLookupStart" - }, - "le": { - "type": ["number", "null"], - "description": "domainLookupEnd" - }, - "cs": { - "type": ["number", "null"], - "description": "connectStart" - }, - "ce": { - "type": ["number", "null"], - "description": "connectEnd" - }, - "qs": { - "type": ["number", "null"], - "description": "requestStart" - }, - "rs": { - "type": ["number", "null"], - "description": "responseStart" - }, - "re": { - "type": ["number", "null"], - "description": "responseEnd" - }, - "dl": { - "type": ["number", "null"], - "description": "domLoading" - }, - "di": { - "type": ["number", "null"], - "description": "domInteractive" - }, - "ds": { - "type": ["number", "null"], - "description": "domContentLoadedEventStart" - }, - "de": { - "type": ["number", "null"], - "description": "domContentLoadedEventEnd" - }, - "dc": { - "type": ["number", "null"], - "description": "domComplete" - }, - "es": { - "type": ["number", "null"], - "description": "loadEventStart" - }, - "ee": { - "type": ["number", "null"], - "description": "loadEventEnd" - } - } - } - } -} diff --git a/docs/spec/transactions/rum_v3_transaction.json b/docs/spec/transactions/rum_v3_transaction.json deleted file mode 100644 index 7dc2830f52e..00000000000 --- a/docs/spec/transactions/rum_v3_transaction.json +++ /dev/null @@ -1,118 +0,0 @@ -{ - "$id": "docs/spec/transactions/rum_v3_transaction.json", - "type": "object", - "description": "An event corresponding to an incoming request or similar task occurring in a monitored service", - "allOf": [ - { - "properties": { - "id": { - "type": "string", - "description": "Hex encoded 64 random bits ID of the transaction.", - "maxLength": 1024 - }, - "tid": { - "description": "Hex encoded 128 random bits ID of the correlated trace.", - "type": "string", - "maxLength": 1024 - }, - "pid": { - "description": "Hex encoded 64 random bits ID of the parent transaction or span. Only root transactions of a trace do not have a parent_id, otherwise it needs to be set.", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "t": { - "type": "string", - "description": "Keyword of specific relevance in the service's domain (eg: 'request', 'backgroundjob', etc)", - "maxLength": 1024 - }, - "n": { - "type": [ - "string", - "null" - ], - "description": "Generic designation of a transaction in the scope of a single service (eg: 'GET /users/:id')", - "maxLength": 1024 - }, - "y": { - "type": ["array", "null"], - "$ref": "../spans/rum_v3_span.json" - }, - "me": { - "type": ["array", "null"], - "$ref": "../metricsets/rum_v3_metricset.json" - }, - "sr": { - "description": "Sampling rate", - "type": ["number", "null"] - }, - "yc": { - "type": "object", - "properties": { - "sd": { - "type": "integer", - "description": "Number of correlated spans that are recorded." - }, - "dd": { - "type": [ - "integer", - "null" - ], - "description": "Number of spans that have been dd by the a recording the x." - } - }, - "required": [ - "sd" - ] - }, - "c": { - "$ref": "../rum_v3_context.json" - }, - "d": { - "type": "number", - "description": "How long the transaction took to complete, in ms with 3 decimal points", - "minimum": 0 - }, - "rt": { - "type": [ - "string", - "null" - ], - "description": "The result of the transaction. For HTTP-related transactions, this should be the status code formatted like 'HTTP 2xx'.", - "maxLength": 1024 - }, - "o": { - "$ref": "../outcome.json", - "description": "The outcome of the transaction: success, failure, or unknown. This is similar to 'result', but has a limited set of permitted values describing the success or failure of the transaction from the service's perspective. This field can be used for calculating error rates for incoming requests." - }, - "k": { - "type": [ - "object", - "null" - ], - "description": "A mark captures the timing of a significant event during the lifetime of a transaction. Marks are organized into groups and can be set by the user or the agent.", - "$ref": "rum_v3_mark.json" - }, - "sm": { - "type": [ - "boolean", - "null" - ], - "description": "Transactions that are 'sampled' will include all available information. Transactions that are not sampled will not have 'spans' or 'context'. Defaults to true." - }, - "exp": { - "$ref": "../rum_experience.json" - } - }, - "required": [ - "id", - "tid", - "yc", - "d", - "t" - ] - } - ] -} diff --git a/docs/spec/transactions/transaction.json b/docs/spec/transactions/transaction.json deleted file mode 100644 index c481133d682..00000000000 --- a/docs/spec/transactions/transaction.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "$id": "docs/spec/transactions/transaction.json", - "type": "object", - "description": "An event corresponding to an incoming request or similar task occurring in a monitored service", - "allOf": [ - { "$ref": "../timestamp_epoch.json" }, - { "$ref": "../transaction_name.json" }, - { "$ref": "../transaction_type.json" }, - { - "properties": { - "id": { - "type": "string", - "description": "Hex encoded 64 random bits ID of the transaction.", - "maxLength": 1024 - }, - "trace_id": { - "description": "Hex encoded 128 random bits ID of the correlated trace.", - "type": "string", - "maxLength": 1024 - }, - "parent_id": { - "description": "Hex encoded 64 random bits ID of the parent transaction or span. Only root transactions of a trace do not have a parent_id, otherwise it needs to be set.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "sample_rate": { - "description": "Sampling rate", - "type": ["number", "null"] - }, - "span_count": { - "type": "object", - "properties": { - "started": { - "type": "integer", - "description": "Number of correlated spans that are recorded." - - }, - "dropped": { - "type": ["integer","null"], - "description": "Number of spans that have been dropped by the agent recording the transaction." - - } - }, - "required": ["started"] - }, - "context": { - "$ref": "../context.json" - }, - "duration": { - "type": "number", - "description": "How long the transaction took to complete, in ms with 3 decimal points", - "minimum": 0 - }, - "result": { - "type": ["string", "null"], - "description": "The result of the transaction. For HTTP-related transactions, this should be the status code formatted like 'HTTP 2xx'.", - "maxLength": 1024 - }, - "outcome": { - "$ref": "../outcome.json", - "description": "The outcome of the transaction: success, failure, or unknown. This is similar to 'result', but has a limited set of permitted values describing the success or failure of the transaction from the service's perspective. This field can be used for calculating error rates for incoming requests." - }, - "marks": { - "type": ["object", "null"], - "description": "A mark captures the timing of a significant event during the lifetime of a transaction. Marks are organized into groups and can be set by the user or the agent.", - "patternProperties": { - "^[^.*\"]*$": { - "$ref": "mark.json" - } - }, - "additionalProperties": false - }, - "sampled": { - "type": ["boolean", "null"], - "description": "Transactions that are 'sampled' will include all available information. Transactions that are not sampled will not have 'spans' or 'context'. Defaults to true." - }, - "experience": { - "$ref": "../rum_experience.json" - } - }, - "required": ["id", "trace_id", "span_count", "duration", "type"] - } - ] -} diff --git a/docs/spec/user.json b/docs/spec/user.json deleted file mode 100644 index b122d7eee1c..00000000000 --- a/docs/spec/user.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "$id": "docs/spec/user.json", - "title": "User", - "type": ["object", "null"], - "properties": { - "id": { - "description": "Identifier of the logged in user, e.g. the primary key of the user", - "type": ["string", "integer", "null"], - "maxLength": 1024 - }, - "email": { - "description": "Email of the logged in user", - "type": ["string", "null"], - "maxLength": 1024 - }, - "username": { - "description": "The username of the logged in user", - "type": ["string", "null"], - "maxLength": 1024 - } - } -} diff --git a/docs/spec/v2/error.json b/docs/spec/v2/error.json new file mode 100644 index 00000000000..7de5702849d --- /dev/null +++ b/docs/spec/v2/error.json @@ -0,0 +1,1132 @@ +{ + "$id": "docs/spec/v2/error", + "description": "errorEvent represents an error or a logged error message, captured by an APM agent in a monitored service.", + "type": "object", + "properties": { + "context": { + "description": "Context holds arbitrary contextual information for the event.", + "type": [ + "null", + "object" + ], + "properties": { + "custom": { + "description": "Custom can contain additional metadata to be stored with the event. The format is unspecified and can be deeply nested objects. The information will not be indexed or searchable in Elasticsearch.", + "type": [ + "null", + "object" + ] + }, + "message": { + "description": "Message holds details related to message receiving and publishing if the captured event integrates with a messaging system", + "type": [ + "null", + "object" + ], + "properties": { + "age": { + "description": "Age of the message. If the monitored messaging framework provides a timestamp for the message, agents may use it. Otherwise, the sending agent can add a timestamp in milliseconds since the Unix epoch to the message's metadata to be retrieved by the receiving agent. If a timestamp is not available, agents should omit this field.", + "type": [ + "null", + "object" + ], + "properties": { + "ms": { + "description": "Age of the message in milliseconds.", + "type": [ + "null", + "integer" + ] + } + } + }, + "body": { + "description": "Body of the received message, similar to an HTTP request body", + "type": [ + "null", + "string" + ] + }, + "headers": { + "description": "Headers received with the message, similar to HTTP request headers.", + "type": [ + "null", + "object" + ], + "additionalProperties": false, + "patternProperties": { + "[.*]*$": { + "type": [ + "null", + "array", + "string" + ], + "items": { + "type": "string" + } + } + } + }, + "queue": { + "description": "Queue holds information about the message queue where the message is received.", + "type": [ + "null", + "object" + ], + "properties": { + "name": { + "description": "Name holds the name of the message queue where the message is received.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + } + } + }, + "page": { + "description": "Page holds information related to the current page and page referers. It is only sent from RUM agents.", + "type": [ + "null", + "object" + ], + "properties": { + "referer": { + "description": "Referer holds the URL of the page that 'linked' to the current page.", + "type": [ + "null", + "string" + ] + }, + "url": { + "description": "URL of the current page", + "type": [ + "null", + "string" + ] + } + } + }, + "request": { + "description": "Request describes the HTTP request information in case the event was created as a result of an HTTP request.", + "type": [ + "null", + "object" + ], + "properties": { + "body": { + "description": "Body only contais the request bod, not the query string information. It can either be a dictionary (for standard HTTP requests) or a raw request body.", + "type": [ + "null", + "string", + "object" + ] + }, + "cookies": { + "description": "Cookies used by the request, parsed as key-value objects.", + "type": [ + "null", + "object" + ] + }, + "env": { + "description": "Env holds environment variable information passed to the monitored service.", + "type": [ + "null", + "object" + ] + }, + "headers": { + "description": "Headers includes any HTTP headers sent by the requester. Cookies will be taken by headers if supplied.", + "type": [ + "null", + "object" + ], + "additionalProperties": false, + "patternProperties": { + "[.*]*$": { + "type": [ + "null", + "array", + "string" + ], + "items": { + "type": "string" + } + } + } + }, + "http_version": { + "description": "HTTPVersion holds information about the used HTTP version.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "method": { + "description": "Method holds information about the method of the HTTP request.", + "type": "string", + "maxLength": 1024 + }, + "socket": { + "description": "Socket holds information related to the recorded request, such as whether or not data were encrypted and the remote address.", + "type": [ + "null", + "object" + ], + "properties": { + "encrypted": { + "description": "Encrypted indicates whether a request was sent as TLS/HTTPS request. DEPRECATED: this field will be removed in a future release.", + "type": [ + "null", + "boolean" + ] + }, + "remote_address": { + "description": "RemoteAddress holds the network address sending the request. It should be obtained through standard APIs and not be parsed from any headers like 'Forwarded'.", + "type": [ + "null", + "string" + ] + } + } + }, + "url": { + "description": "URL holds information sucha as the raw URL, scheme, host and path.", + "type": [ + "null", + "object" + ], + "properties": { + "full": { + "description": "Full, possibly agent-assembled URL of the request, e.g. https://example.com:443/search?q=elasticsearch#top.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "hash": { + "description": "Hash of the request URL, e.g. 'top'", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "hostname": { + "description": "Hostname information of the request, e.g. 'example.com'.\"", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "pathname": { + "description": "Path of the request, e.g. '/search'", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "port": { + "description": "Port of the request, e.g. '443'. Can be sent as string or int.", + "type": [ + "null", + "string", + "integer" + ], + "maxLength": 1024 + }, + "protocol": { + "description": "Protocol information for the recorded request, e.g. 'https:'.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "raw": { + "description": "Raw unparsed URL of the HTTP request line, e.g https://example.com:443/search?q=elasticsearch. This URL may be absolute or relative. For more details, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "search": { + "description": "Search contains the query string information of the request. It is expected to have values delimited by ampersands.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + } + }, + "required": [ + "method" + ] + }, + "response": { + "description": "Response describes the HTTP response information in case the event was created as a result of an HTTP request.", + "type": [ + "null", + "object" + ], + "properties": { + "decoded_body_size": { + "description": "DecodedBodySize holds the size of the decoded payload.", + "type": [ + "null", + "number" + ] + }, + "encoded_body_size": { + "description": "EncodedBodySize holds the size of the encoded payload.", + "type": [ + "null", + "number" + ] + }, + "finished": { + "description": "Finished indicates whether the response was finished or not.", + "type": [ + "null", + "boolean" + ] + }, + "headers": { + "description": "Headers holds the http headers sent in the http response.", + "type": [ + "null", + "object" + ], + "additionalProperties": false, + "patternProperties": { + "[.*]*$": { + "type": [ + "null", + "array", + "string" + ], + "items": { + "type": "string" + } + } + } + }, + "headers_sent": { + "description": "HeadersSent indicates whether http headers were sent.", + "type": [ + "null", + "boolean" + ] + }, + "status_code": { + "description": "StatusCode sent in the http response.", + "type": [ + "null", + "integer" + ] + }, + "transfer_size": { + "description": "TransferSize holds the total size of the payload.", + "type": [ + "null", + "number" + ] + } + } + }, + "service": { + "description": "Service related information can be sent per event. Information provided here will override the more generic information retrieved from metadata, missing service fields will be retrieved from the metadata information.", + "type": [ + "null", + "object" + ], + "properties": { + "agent": { + "description": "Agent holds information about the APM agent capturing the event.", + "type": [ + "null", + "object" + ], + "properties": { + "ephemeral_id": { + "description": "EphemeralID is a free format ID used for metrics correlation by agents", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "name": { + "description": "Name of the APM agent capturing information.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "version": { + "description": "Version of the APM agent capturing information.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "environment": { + "description": "Environment in which the monitored service is running, e.g. `production` or `staging`.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "framework": { + "description": "Framework holds information about the framework used in the monitored service.", + "type": [ + "null", + "object" + ], + "properties": { + "name": { + "description": "Name of the used framework", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "version": { + "description": "Version of the used framework", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "language": { + "description": "Language holds information about the programming language of the monitored service.", + "type": [ + "null", + "object" + ], + "properties": { + "name": { + "description": "Name of the used programming language", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "version": { + "description": "Version of the used programming language", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "name": { + "description": "Name of the monitored service.", + "type": [ + "null", + "string" + ], + "maxLength": 1024, + "pattern": "^[a-zA-Z0-9 _-]+$" + }, + "node": { + "description": "Node must be a unique meaningful name of the service node.", + "type": [ + "null", + "object" + ], + "properties": { + "configured_name": { + "description": "Name of the service node", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "runtime": { + "description": "Runtime holds information about the language runtime running the monitored service", + "type": [ + "null", + "object" + ], + "properties": { + "name": { + "description": "Name of the language runtime", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "version": { + "description": "Version of the language runtime", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "version": { + "description": "Version of the monitored service.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "tags": { + "description": "Tags are a flat mapping of user-defined tags. On the agent side, tags are called labels. Allowed value types are string, boolean and number values. Tags are indexed and searchable.", + "type": [ + "null", + "object" + ], + "additionalProperties": { + "type": [ + "null", + "string", + "boolean", + "number" + ], + "maxLength": 1024 + } + }, + "user": { + "description": "User holds information about the correlated user for this event. If user data are provided here, all user related information from metadata is ignored, otherwise the metadata's user information will be stored with the event.", + "type": [ + "null", + "object" + ], + "properties": { + "domain": { + "description": "Domain of the logged in user", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "email": { + "description": "Email of the user.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "id": { + "description": "ID identifies the logged in user, e.g. can be the primary key of the user", + "type": [ + "null", + "string", + "integer" + ], + "maxLength": 1024 + }, + "username": { + "description": "Name of the user.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + } + } + }, + "culprit": { + "description": "Culprit identifies the function call which was the primary perpetrator of this event.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "exception": { + "description": "Exception holds information about the original error. The information is language specific.", + "type": [ + "null", + "object" + ], + "properties": { + "attributes": { + "description": "Attributes of the exception.", + "type": [ + "null", + "object" + ] + }, + "cause": { + "description": "Cause can hold a collection of error exceptions representing chained exceptions. The chain starts with the outermost exception, followed by its cause, and so on.", + "type": [ + "null", + "array" + ], + "items": { + "type": "object" + }, + "minItems": 0 + }, + "code": { + "description": "Code that is set when the error happened, e.g. database error code.", + "type": [ + "null", + "string", + "integer" + ], + "maxLength": 1024 + }, + "handled": { + "description": "Handled indicates whether the error was caught in the code or not.", + "type": [ + "null", + "boolean" + ] + }, + "message": { + "description": "Message contains the originally captured error message.", + "type": [ + "null", + "string" + ] + }, + "module": { + "description": "Module describes the exception type's module namespace.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "stacktrace": { + "description": "Stacktrace information of the captured exception.", + "type": [ + "null", + "array" + ], + "items": { + "type": "object", + "properties": { + "abs_path": { + "description": "AbsPath is the absolute path of the frame's file.", + "type": [ + "null", + "string" + ] + }, + "classname": { + "description": "Classname of the frame.", + "type": [ + "null", + "string" + ] + }, + "colno": { + "description": "ColumnNumber of the frame.", + "type": [ + "null", + "integer" + ] + }, + "context_line": { + "description": "ContextLine is the line from the frame's file.", + "type": [ + "null", + "string" + ] + }, + "filename": { + "description": "Filename is the relative name of the frame's file.", + "type": [ + "null", + "string" + ] + }, + "function": { + "description": "Function represented by the frame.", + "type": [ + "null", + "string" + ] + }, + "library_frame": { + "description": "LibraryFrame indicates whether the frame is from a third party library.", + "type": [ + "null", + "boolean" + ] + }, + "lineno": { + "description": "LineNumber of the frame.", + "type": [ + "null", + "integer" + ] + }, + "module": { + "description": "Module to which the frame belongs to.", + "type": [ + "null", + "string" + ] + }, + "post_context": { + "description": "PostContext is a slice of code lines immediately before the line from the frame's file.", + "type": [ + "null", + "array" + ], + "items": { + "type": "string" + }, + "minItems": 0 + }, + "pre_context": { + "description": "PreContext is a slice of code lines immediately after the line from the frame's file.", + "type": [ + "null", + "array" + ], + "items": { + "type": "string" + }, + "minItems": 0 + }, + "vars": { + "description": "Vars is a flat mapping of local variables of the frame.", + "type": [ + "null", + "object" + ] + } + }, + "anyOf": [ + { + "properties": { + "classname": { + "type": "string" + } + }, + "required": [ + "classname" + ] + }, + { + "properties": { + "filename": { + "type": "string" + } + }, + "required": [ + "filename" + ] + } + ] + }, + "minItems": 0 + }, + "type": { + "description": "Type of the exception.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + }, + "anyOf": [ + { + "properties": { + "message": { + "type": "string" + } + }, + "required": [ + "message" + ] + }, + { + "properties": { + "type": { + "type": "string" + } + }, + "required": [ + "type" + ] + } + ] + }, + "id": { + "description": "ID holds the hex encoded 128 random bits ID of the event.", + "type": "string", + "maxLength": 1024 + }, + "log": { + "description": "Log holds additional information added when the error is logged.", + "type": [ + "null", + "object" + ], + "properties": { + "level": { + "description": "Level represents the severity of the recorded log.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "logger_name": { + "description": "LoggerName holds the name of the used logger instance.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "message": { + "description": "Message of the logged error. In case a parameterized message is captured, Message should contain the same information, but with any placeholders being replaced.", + "type": "string" + }, + "param_message": { + "description": "ParamMessage should contain the same information as Message, but with placeholders where parameters were logged, e.g. 'error connecting to %s'. The string is not interpreted, allowing differnt placeholders per client languange. The information might be used to group errors together.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "stacktrace": { + "description": "Stacktrace information of the captured error.", + "type": [ + "null", + "array" + ], + "items": { + "type": "object", + "properties": { + "abs_path": { + "description": "AbsPath is the absolute path of the frame's file.", + "type": [ + "null", + "string" + ] + }, + "classname": { + "description": "Classname of the frame.", + "type": [ + "null", + "string" + ] + }, + "colno": { + "description": "ColumnNumber of the frame.", + "type": [ + "null", + "integer" + ] + }, + "context_line": { + "description": "ContextLine is the line from the frame's file.", + "type": [ + "null", + "string" + ] + }, + "filename": { + "description": "Filename is the relative name of the frame's file.", + "type": [ + "null", + "string" + ] + }, + "function": { + "description": "Function represented by the frame.", + "type": [ + "null", + "string" + ] + }, + "library_frame": { + "description": "LibraryFrame indicates whether the frame is from a third party library.", + "type": [ + "null", + "boolean" + ] + }, + "lineno": { + "description": "LineNumber of the frame.", + "type": [ + "null", + "integer" + ] + }, + "module": { + "description": "Module to which the frame belongs to.", + "type": [ + "null", + "string" + ] + }, + "post_context": { + "description": "PostContext is a slice of code lines immediately before the line from the frame's file.", + "type": [ + "null", + "array" + ], + "items": { + "type": "string" + }, + "minItems": 0 + }, + "pre_context": { + "description": "PreContext is a slice of code lines immediately after the line from the frame's file.", + "type": [ + "null", + "array" + ], + "items": { + "type": "string" + }, + "minItems": 0 + }, + "vars": { + "description": "Vars is a flat mapping of local variables of the frame.", + "type": [ + "null", + "object" + ] + } + }, + "anyOf": [ + { + "properties": { + "classname": { + "type": "string" + } + }, + "required": [ + "classname" + ] + }, + { + "properties": { + "filename": { + "type": "string" + } + }, + "required": [ + "filename" + ] + } + ] + }, + "minItems": 0 + } + }, + "required": [ + "message" + ] + }, + "parent_id": { + "description": "ParentID holds the hex encoded 64 random bits ID of the parent transaction or span.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "timestamp": { + "description": "Timestamp holds the recorded time of the event, UTC based and formatted as microseconds since Unix epoch.", + "type": [ + "null", + "integer" + ] + }, + "trace_id": { + "description": "TraceID holds the hex encoded 128 random bits ID of the correlated trace.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "transaction": { + "description": "Transaction holds information about the correlated transaction.", + "type": [ + "null", + "object" + ], + "properties": { + "sampled": { + "description": "Sampled indicates whether or not the full information for a transaction is captured. If a transaction is unsampled no spans and less context information will be reported.", + "type": [ + "null", + "boolean" + ] + }, + "type": { + "description": "Type expresses the correlated transaction's type as keyword that has specific relevance within the service's domain, eg: 'request', 'backgroundjob'.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "transaction_id": { + "description": "TransactionID holds the hex encoded 64 random bits ID of the correlated transaction.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + }, + "required": [ + "id" + ], + "allOf": [ + { + "if": { + "properties": { + "transaction_id": { + "type": "string" + } + }, + "required": [ + "transaction_id" + ] + }, + "then": { + "properties": { + "parent_id": { + "type": "string" + } + }, + "required": [ + "parent_id" + ] + } + }, + { + "if": { + "properties": { + "trace_id": { + "type": "string" + } + }, + "required": [ + "trace_id" + ] + }, + "then": { + "properties": { + "parent_id": { + "type": "string" + } + }, + "required": [ + "parent_id" + ] + } + }, + { + "if": { + "properties": { + "transaction_id": { + "type": "string" + } + }, + "required": [ + "transaction_id" + ] + }, + "then": { + "properties": { + "trace_id": { + "type": "string" + } + }, + "required": [ + "trace_id" + ] + } + }, + { + "if": { + "properties": { + "parent_id": { + "type": "string" + } + }, + "required": [ + "parent_id" + ] + }, + "then": { + "properties": { + "trace_id": { + "type": "string" + } + }, + "required": [ + "trace_id" + ] + } + } + ], + "anyOf": [ + { + "properties": { + "exception": { + "type": "object" + } + }, + "required": [ + "exception" + ] + }, + { + "properties": { + "log": { + "type": "object" + } + }, + "required": [ + "log" + ] + } + ] +} \ No newline at end of file diff --git a/docs/spec/v2/metadata.json b/docs/spec/v2/metadata.json new file mode 100644 index 00000000000..d0c7c1978e0 --- /dev/null +++ b/docs/spec/v2/metadata.json @@ -0,0 +1,552 @@ +{ + "$id": "docs/spec/v2/metadata", + "type": "object", + "properties": { + "cloud": { + "description": "Cloud metadata about where the monitored service is running.", + "type": [ + "null", + "object" + ], + "properties": { + "account": { + "description": "Account where the monitored service is running.", + "type": [ + "null", + "object" + ], + "properties": { + "id": { + "description": "ID of the cloud account.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "name": { + "description": "Name of the cloud account.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "availability_zone": { + "description": "AvailabilityZone where the monitored service is running, e.g. us-east-1a", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "instance": { + "description": "Instance on which the monitored service is running.", + "type": [ + "null", + "object" + ], + "properties": { + "id": { + "description": "ID of the cloud instance.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "name": { + "description": "Name of the cloud instance.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "machine": { + "description": "Machine on which the monitored service is running.", + "type": [ + "null", + "object" + ], + "properties": { + "type": { + "description": "ID of the cloud machine.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "project": { + "description": "Project in which the monitored service is running.", + "type": [ + "null", + "object" + ], + "properties": { + "id": { + "description": "ID of the cloud project.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "name": { + "description": "Name of the cloud project.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "provider": { + "description": "Provider that is used, e.g. aws, azure, gcp, digitalocean.", + "type": "string", + "maxLength": 1024 + }, + "region": { + "description": "Region where the monitored service is running, e.g. us-east-1", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "service": { + "description": "Service that is monitored on cloud", + "type": [ + "null", + "object" + ], + "properties": { + "name": { + "description": "Name of the cloud service, intended to distinguish services running on different platforms within a provider, eg AWS EC2 vs Lambda, GCP GCE vs App Engine, Azure VM vs App Server.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + } + }, + "required": [ + "provider" + ] + }, + "labels": { + "description": "Labels are a flat mapping of user-defined tags. Allowed value types are string, boolean and number values. Labels are indexed and searchable.", + "type": [ + "null", + "object" + ], + "additionalProperties": { + "type": [ + "null", + "string", + "boolean", + "number" + ], + "maxLength": 1024 + } + }, + "network": { + "description": "Network holds information about the network over which the monitored service is communicating.", + "type": [ + "null", + "object" + ], + "properties": { + "connection": { + "type": [ + "null", + "object" + ], + "properties": { + "type": { + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + } + } + }, + "process": { + "description": "Process metadata about the monitored service.", + "type": [ + "null", + "object" + ], + "properties": { + "argv": { + "description": "Argv holds the command line arguments used to start this process.", + "type": [ + "null", + "array" + ], + "items": { + "type": "string" + }, + "minItems": 0 + }, + "pid": { + "description": "PID holds the process ID of the service.", + "type": "integer" + }, + "ppid": { + "description": "Ppid holds the parent process ID of the service.", + "type": [ + "null", + "integer" + ] + }, + "title": { + "description": "Title is the process title. It can be the same as process name.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + }, + "required": [ + "pid" + ] + }, + "service": { + "description": "Service metadata about the monitored service.", + "type": "object", + "properties": { + "agent": { + "description": "Agent holds information about the APM agent capturing the event.", + "type": "object", + "properties": { + "ephemeral_id": { + "description": "EphemeralID is a free format ID used for metrics correlation by agents", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "name": { + "description": "Name of the APM agent capturing information.", + "type": "string", + "maxLength": 1024, + "minLength": 1 + }, + "version": { + "description": "Version of the APM agent capturing information.", + "type": "string", + "maxLength": 1024 + } + }, + "required": [ + "name", + "version" + ] + }, + "environment": { + "description": "Environment in which the monitored service is running, e.g. `production` or `staging`.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "framework": { + "description": "Framework holds information about the framework used in the monitored service.", + "type": [ + "null", + "object" + ], + "properties": { + "name": { + "description": "Name of the used framework", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "version": { + "description": "Version of the used framework", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "language": { + "description": "Language holds information about the programming language of the monitored service.", + "type": [ + "null", + "object" + ], + "properties": { + "name": { + "description": "Name of the used programming language", + "type": "string", + "maxLength": 1024 + }, + "version": { + "description": "Version of the used programming language", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + }, + "required": [ + "name" + ] + }, + "name": { + "description": "Name of the monitored service.", + "type": "string", + "maxLength": 1024, + "minLength": 1, + "pattern": "^[a-zA-Z0-9 _-]+$" + }, + "node": { + "description": "Node must be a unique meaningful name of the service node.", + "type": [ + "null", + "object" + ], + "properties": { + "configured_name": { + "description": "Name of the service node", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "runtime": { + "description": "Runtime holds information about the language runtime running the monitored service", + "type": [ + "null", + "object" + ], + "properties": { + "name": { + "description": "Name of the language runtime", + "type": "string", + "maxLength": 1024 + }, + "version": { + "description": "Name of the language runtime", + "type": "string", + "maxLength": 1024 + } + }, + "required": [ + "name", + "version" + ] + }, + "version": { + "description": "Version of the monitored service.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + }, + "required": [ + "agent", + "name" + ] + }, + "system": { + "description": "System metadata", + "type": [ + "null", + "object" + ], + "properties": { + "architecture": { + "description": "Architecture of the system the monitored service is running on.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "configured_hostname": { + "description": "ConfiguredHostname is the configured name of the host the monitored service is running on. It should only be sent when configured by the user. If given, it is used as the event's hostname.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "container": { + "description": "Container holds the system's container ID if available.", + "type": [ + "null", + "object" + ], + "properties": { + "id": { + "description": "ID of the container the monitored service is running in.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "detected_hostname": { + "description": "DetectedHostname is the hostname detected by the APM agent. It usually contains what the hostname command returns on the host machine. It will be used as the event's hostname if ConfiguredHostname is not present.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "hostname": { + "description": "Deprecated: Use ConfiguredHostname and DetectedHostname instead. DeprecatedHostname is the host name of the system the service is running on. It does not distinguish between configured and detected hostname and therefore is deprecated and only used if no other hostname information is available.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "kubernetes": { + "description": "Kubernetes system information if the monitored service runs on Kubernetes.", + "type": [ + "null", + "object" + ], + "properties": { + "namespace": { + "description": "Namespace of the Kubernetes resource the monitored service is run on.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "node": { + "description": "Node related information", + "type": [ + "null", + "object" + ], + "properties": { + "name": { + "description": "Name of the Kubernetes Node", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "pod": { + "description": "Pod related information", + "type": [ + "null", + "object" + ], + "properties": { + "name": { + "description": "Name of the Kubernetes Pod", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "uid": { + "description": "UID is the system-generated string uniquely identifying the Pod.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + } + } + }, + "platform": { + "description": "Platform name of the system platform the monitored service is running on.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "user": { + "description": "User metadata, which can be overwritten on a per event basis.", + "type": [ + "null", + "object" + ], + "properties": { + "domain": { + "description": "Domain of the logged in user", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "email": { + "description": "Email of the user.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "id": { + "description": "ID identifies the logged in user, e.g. can be the primary key of the user", + "type": [ + "null", + "string", + "integer" + ], + "maxLength": 1024 + }, + "username": { + "description": "Name of the user.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + } + }, + "required": [ + "service" + ] +} \ No newline at end of file diff --git a/docs/spec/v2/metricset.json b/docs/spec/v2/metricset.json new file mode 100644 index 00000000000..391ae34809e --- /dev/null +++ b/docs/spec/v2/metricset.json @@ -0,0 +1,209 @@ +{ + "$id": "docs/spec/v2/metricset", + "type": "object", + "properties": { + "samples": { + "description": "Samples hold application metrics collected from the agent.", + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^[^*\"]*$": { + "type": [ + "null", + "object" + ], + "properties": { + "counts": { + "description": "Counts holds the bucket counts for histogram metrics. These numbers must be positive or zero. If Counts is specified, then Values is expected to be specified with the same number of elements, and with the same order.", + "type": [ + "null", + "array" + ], + "items": { + "type": "integer", + "minimum": 0 + }, + "minItems": 0 + }, + "type": { + "description": "Type holds an optional metric type: gauge, counter, or histogram. If Type is unknown, it will be ignored.", + "type": [ + "null", + "string" + ] + }, + "unit": { + "description": "Unit holds an optional unit for the metric. - \"percent\" (value is in the range [0,1]) - \"byte\" - a time unit: \"nanos\", \"micros\", \"ms\", \"s\", \"m\", \"h\", \"d\" If Unit is unknown, it will be ignored.", + "type": [ + "null", + "string" + ] + }, + "value": { + "description": "Value holds the value of a single metric sample.", + "type": [ + "null", + "number" + ] + }, + "values": { + "description": "Values holds the bucket values for histogram metrics. Values must be provided in ascending order; failure to do so will result in the metric being discarded.", + "type": [ + "null", + "array" + ], + "items": { + "type": "number" + }, + "minItems": 0 + } + }, + "allOf": [ + { + "if": { + "properties": { + "counts": { + "type": "array" + } + }, + "required": [ + "counts" + ] + }, + "then": { + "properties": { + "values": { + "type": "array" + } + }, + "required": [ + "values" + ] + } + }, + { + "if": { + "properties": { + "values": { + "type": "array" + } + }, + "required": [ + "values" + ] + }, + "then": { + "properties": { + "counts": { + "type": "array" + } + }, + "required": [ + "counts" + ] + } + } + ], + "anyOf": [ + { + "properties": { + "value": { + "type": "number" + } + }, + "required": [ + "value" + ] + }, + { + "properties": { + "values": { + "type": "array" + } + }, + "required": [ + "values" + ] + } + ] + } + } + }, + "span": { + "description": "Span holds selected information about the correlated transaction.", + "type": [ + "null", + "object" + ], + "properties": { + "subtype": { + "description": "Subtype is a further sub-division of the type (e.g. postgresql, elasticsearch)", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "type": { + "description": "Type expresses the correlated span's type as keyword that has specific relevance within the service's domain, eg: 'request', 'backgroundjob'.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "tags": { + "description": "Tags are a flat mapping of user-defined tags. On the agent side, tags are called labels. Allowed value types are string, boolean and number values. Tags are indexed and searchable.", + "type": [ + "null", + "object" + ], + "additionalProperties": { + "type": [ + "null", + "string", + "boolean", + "number" + ], + "maxLength": 1024 + } + }, + "timestamp": { + "description": "Timestamp holds the recorded time of the event, UTC based and formatted as microseconds since Unix epoch", + "type": [ + "null", + "integer" + ] + }, + "transaction": { + "description": "Transaction holds selected information about the correlated transaction.", + "type": [ + "null", + "object" + ], + "properties": { + "name": { + "description": "Name of the correlated transaction.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "type": { + "description": "Type expresses the correlated transaction's type as keyword that has specific relevance within the service's domain, eg: 'request', 'backgroundjob'.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + } + }, + "required": [ + "samples" + ] +} \ No newline at end of file diff --git a/docs/spec/v2/span.json b/docs/spec/v2/span.json new file mode 100644 index 00000000000..f3b1b233c86 --- /dev/null +++ b/docs/spec/v2/span.json @@ -0,0 +1,748 @@ +{ + "$id": "docs/spec/v2/span", + "type": "object", + "properties": { + "action": { + "description": "Action holds the specific kind of event within the sub-type represented by the span (e.g. query, connect)", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "child_ids": { + "description": "ChildIDs holds a list of successor transactions and/or spans.", + "type": [ + "null", + "array" + ], + "items": { + "type": "string", + "maxLength": 1024 + }, + "minItems": 0 + }, + "composite": { + "description": "Composite holds details on a group of spans represented by a single one.", + "type": [ + "null", + "object" + ], + "properties": { + "compression_strategy": { + "description": "A string value indicating which compression strategy was used. The valid values are `exact_match` and `same_kind`.", + "type": "string" + }, + "count": { + "description": "Count is the number of compressed spans the composite span represents. The minimum count is 2, as a composite span represents at least two spans.", + "type": "integer", + "minimum": 2 + }, + "sum": { + "description": "Sum is the durations of all compressed spans this composite span represents in milliseconds.", + "type": "number", + "minimum": 0 + } + }, + "required": [ + "count", + "sum", + "compression_strategy" + ] + }, + "context": { + "description": "Context holds arbitrary contextual information for the event.", + "type": [ + "null", + "object" + ], + "properties": { + "db": { + "description": "Database contains contextual data for database spans", + "type": [ + "null", + "object" + ], + "properties": { + "instance": { + "description": "Instance name of the database.", + "type": [ + "null", + "string" + ] + }, + "link": { + "description": "Link to the database server.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "rows_affected": { + "description": "RowsAffected shows the number of rows affected by the statement.", + "type": [ + "null", + "integer" + ] + }, + "statement": { + "description": "Statement of the recorded database event, e.g. query.", + "type": [ + "null", + "string" + ] + }, + "type": { + "description": "Type of the recorded database event., e.g. sql, cassandra, hbase, redis.", + "type": [ + "null", + "string" + ] + }, + "user": { + "description": "User is the username with which the database is accessed.", + "type": [ + "null", + "string" + ] + } + } + }, + "destination": { + "description": "Destination contains contextual data about the destination of spans", + "type": [ + "null", + "object" + ], + "properties": { + "address": { + "description": "Address is the destination network address: hostname (e.g. 'localhost'), FQDN (e.g. 'elastic.co'), IPv4 (e.g. '127.0.0.1') IPv6 (e.g. '::1')", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "port": { + "description": "Port is the destination network port (e.g. 443)", + "type": [ + "null", + "integer" + ] + }, + "service": { + "description": "Service describes the destination service", + "type": [ + "null", + "object" + ], + "properties": { + "name": { + "description": "Name is the identifier for the destination service, e.g. 'http://elastic.co', 'elasticsearch', 'rabbitmq' ( DEPRECATED: this field will be removed in a future release", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "resource": { + "description": "Resource identifies the destination service resource being operated on e.g. 'http://elastic.co:80', 'elasticsearch', 'rabbitmq/queue_name'", + "type": "string", + "maxLength": 1024 + }, + "type": { + "description": "Type of the destination service, e.g. db, elasticsearch. Should typically be the same as span.type. DEPRECATED: this field will be removed in a future release", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + }, + "required": [ + "resource" + ] + } + } + }, + "http": { + "description": "HTTP contains contextual information when the span concerns an HTTP request.", + "type": [ + "null", + "object" + ], + "properties": { + "method": { + "description": "Method holds information about the method of the HTTP request.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "response": { + "description": "Response describes the HTTP response information in case the event was created as a result of an HTTP request.", + "type": [ + "null", + "object" + ], + "properties": { + "decoded_body_size": { + "description": "DecodedBodySize holds the size of the decoded payload.", + "type": [ + "null", + "number" + ] + }, + "encoded_body_size": { + "description": "EncodedBodySize holds the size of the encoded payload.", + "type": [ + "null", + "number" + ] + }, + "headers": { + "description": "Headers holds the http headers sent in the http response.", + "type": [ + "null", + "object" + ], + "additionalProperties": false, + "patternProperties": { + "[.*]*$": { + "type": [ + "null", + "array", + "string" + ], + "items": { + "type": "string" + } + } + } + }, + "status_code": { + "description": "StatusCode sent in the http response.", + "type": [ + "null", + "integer" + ] + }, + "transfer_size": { + "description": "TransferSize holds the total size of the payload.", + "type": [ + "null", + "number" + ] + } + } + }, + "status_code": { + "description": "Deprecated: Use Response.StatusCode instead. StatusCode sent in the http response.", + "type": [ + "null", + "integer" + ] + }, + "url": { + "description": "URL is the raw url of the correlating HTTP request.", + "type": [ + "null", + "string" + ] + } + } + }, + "message": { + "description": "Message holds details related to message receiving and publishing if the captured event integrates with a messaging system", + "type": [ + "null", + "object" + ], + "properties": { + "age": { + "description": "Age of the message. If the monitored messaging framework provides a timestamp for the message, agents may use it. Otherwise, the sending agent can add a timestamp in milliseconds since the Unix epoch to the message's metadata to be retrieved by the receiving agent. If a timestamp is not available, agents should omit this field.", + "type": [ + "null", + "object" + ], + "properties": { + "ms": { + "description": "Age of the message in milliseconds.", + "type": [ + "null", + "integer" + ] + } + } + }, + "body": { + "description": "Body of the received message, similar to an HTTP request body", + "type": [ + "null", + "string" + ] + }, + "headers": { + "description": "Headers received with the message, similar to HTTP request headers.", + "type": [ + "null", + "object" + ], + "additionalProperties": false, + "patternProperties": { + "[.*]*$": { + "type": [ + "null", + "array", + "string" + ], + "items": { + "type": "string" + } + } + } + }, + "queue": { + "description": "Queue holds information about the message queue where the message is received.", + "type": [ + "null", + "object" + ], + "properties": { + "name": { + "description": "Name holds the name of the message queue where the message is received.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + } + } + }, + "service": { + "description": "Service related information can be sent per span. Information provided here will override the more generic information retrieved from metadata, missing service fields will be retrieved from the metadata information.", + "type": [ + "null", + "object" + ], + "properties": { + "agent": { + "description": "Agent holds information about the APM agent capturing the event.", + "type": [ + "null", + "object" + ], + "properties": { + "ephemeral_id": { + "description": "EphemeralID is a free format ID used for metrics correlation by agents", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "name": { + "description": "Name of the APM agent capturing information.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "version": { + "description": "Version of the APM agent capturing information.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "environment": { + "description": "Environment in which the monitored service is running, e.g. `production` or `staging`.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "framework": { + "description": "Framework holds information about the framework used in the monitored service.", + "type": [ + "null", + "object" + ], + "properties": { + "name": { + "description": "Name of the used framework", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "version": { + "description": "Version of the used framework", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "language": { + "description": "Language holds information about the programming language of the monitored service.", + "type": [ + "null", + "object" + ], + "properties": { + "name": { + "description": "Name of the used programming language", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "version": { + "description": "Version of the used programming language", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "name": { + "description": "Name of the monitored service.", + "type": [ + "null", + "string" + ], + "maxLength": 1024, + "pattern": "^[a-zA-Z0-9 _-]+$" + }, + "node": { + "description": "Node must be a unique meaningful name of the service node.", + "type": [ + "null", + "object" + ], + "properties": { + "configured_name": { + "description": "Name of the service node", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "runtime": { + "description": "Runtime holds information about the language runtime running the monitored service", + "type": [ + "null", + "object" + ], + "properties": { + "name": { + "description": "Name of the language runtime", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "version": { + "description": "Version of the language runtime", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "version": { + "description": "Version of the monitored service.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "tags": { + "description": "Tags are a flat mapping of user-defined tags. On the agent side, tags are called labels. Allowed value types are string, boolean and number values. Tags are indexed and searchable.", + "type": [ + "null", + "object" + ], + "additionalProperties": { + "type": [ + "null", + "string", + "boolean", + "number" + ], + "maxLength": 1024 + } + } + } + }, + "duration": { + "description": "Duration of the span in milliseconds. When the span is a composite one, duration is the gross duration, including \"whitespace\" in between spans.", + "type": "number", + "minimum": 0 + }, + "id": { + "description": "ID holds the hex encoded 64 random bits ID of the event.", + "type": "string", + "maxLength": 1024 + }, + "name": { + "description": "Name is the generic designation of a span in the scope of a transaction.", + "type": "string", + "maxLength": 1024 + }, + "outcome": { + "description": "Outcome of the span: success, failure, or unknown. Outcome may be one of a limited set of permitted values describing the success or failure of the span. It can be used for calculating error rates for outgoing requests.", + "type": [ + "null", + "string" + ], + "enum": [ + "success", + "failure", + "unknown", + null + ] + }, + "parent_id": { + "description": "ParentID holds the hex encoded 64 random bits ID of the parent transaction or span.", + "type": "string", + "maxLength": 1024 + }, + "sample_rate": { + "description": "SampleRate applied to the monitored service at the time where this span was recorded.", + "type": [ + "null", + "number" + ] + }, + "stacktrace": { + "description": "Stacktrace connected to this span event.", + "type": [ + "null", + "array" + ], + "items": { + "type": "object", + "properties": { + "abs_path": { + "description": "AbsPath is the absolute path of the frame's file.", + "type": [ + "null", + "string" + ] + }, + "classname": { + "description": "Classname of the frame.", + "type": [ + "null", + "string" + ] + }, + "colno": { + "description": "ColumnNumber of the frame.", + "type": [ + "null", + "integer" + ] + }, + "context_line": { + "description": "ContextLine is the line from the frame's file.", + "type": [ + "null", + "string" + ] + }, + "filename": { + "description": "Filename is the relative name of the frame's file.", + "type": [ + "null", + "string" + ] + }, + "function": { + "description": "Function represented by the frame.", + "type": [ + "null", + "string" + ] + }, + "library_frame": { + "description": "LibraryFrame indicates whether the frame is from a third party library.", + "type": [ + "null", + "boolean" + ] + }, + "lineno": { + "description": "LineNumber of the frame.", + "type": [ + "null", + "integer" + ] + }, + "module": { + "description": "Module to which the frame belongs to.", + "type": [ + "null", + "string" + ] + }, + "post_context": { + "description": "PostContext is a slice of code lines immediately before the line from the frame's file.", + "type": [ + "null", + "array" + ], + "items": { + "type": "string" + }, + "minItems": 0 + }, + "pre_context": { + "description": "PreContext is a slice of code lines immediately after the line from the frame's file.", + "type": [ + "null", + "array" + ], + "items": { + "type": "string" + }, + "minItems": 0 + }, + "vars": { + "description": "Vars is a flat mapping of local variables of the frame.", + "type": [ + "null", + "object" + ] + } + }, + "anyOf": [ + { + "properties": { + "classname": { + "type": "string" + } + }, + "required": [ + "classname" + ] + }, + { + "properties": { + "filename": { + "type": "string" + } + }, + "required": [ + "filename" + ] + } + ] + }, + "minItems": 0 + }, + "start": { + "description": "Start is the offset relative to the transaction's timestamp identifying the start of the span, in milliseconds.", + "type": [ + "null", + "number" + ] + }, + "subtype": { + "description": "Subtype is a further sub-division of the type (e.g. postgresql, elasticsearch)", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "sync": { + "description": "Sync indicates whether the span was executed synchronously or asynchronously.", + "type": [ + "null", + "boolean" + ] + }, + "timestamp": { + "description": "Timestamp holds the recorded time of the event, UTC based and formatted as microseconds since Unix epoch", + "type": [ + "null", + "integer" + ] + }, + "trace_id": { + "description": "TraceID holds the hex encoded 128 random bits ID of the correlated trace.", + "type": "string", + "maxLength": 1024 + }, + "transaction_id": { + "description": "TransactionID holds the hex encoded 64 random bits ID of the correlated transaction.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "type": { + "description": "Type holds the span's type, and can have specific keywords within the service's domain (eg: 'request', 'backgroundjob', etc)", + "type": "string", + "maxLength": 1024 + } + }, + "required": [ + "duration", + "id", + "name", + "parent_id", + "trace_id", + "type" + ], + "anyOf": [ + { + "properties": { + "start": { + "type": "number" + } + }, + "required": [ + "start" + ] + }, + { + "properties": { + "timestamp": { + "type": "integer" + } + }, + "required": [ + "timestamp" + ] + } + ] +} \ No newline at end of file diff --git a/docs/spec/v2/transaction.json b/docs/spec/v2/transaction.json new file mode 100644 index 00000000000..dc3ec6c07cf --- /dev/null +++ b/docs/spec/v2/transaction.json @@ -0,0 +1,777 @@ +{ + "$id": "docs/spec/v2/transaction", + "type": "object", + "properties": { + "context": { + "description": "Context holds arbitrary contextual information for the event.", + "type": [ + "null", + "object" + ], + "properties": { + "custom": { + "description": "Custom can contain additional metadata to be stored with the event. The format is unspecified and can be deeply nested objects. The information will not be indexed or searchable in Elasticsearch.", + "type": [ + "null", + "object" + ] + }, + "message": { + "description": "Message holds details related to message receiving and publishing if the captured event integrates with a messaging system", + "type": [ + "null", + "object" + ], + "properties": { + "age": { + "description": "Age of the message. If the monitored messaging framework provides a timestamp for the message, agents may use it. Otherwise, the sending agent can add a timestamp in milliseconds since the Unix epoch to the message's metadata to be retrieved by the receiving agent. If a timestamp is not available, agents should omit this field.", + "type": [ + "null", + "object" + ], + "properties": { + "ms": { + "description": "Age of the message in milliseconds.", + "type": [ + "null", + "integer" + ] + } + } + }, + "body": { + "description": "Body of the received message, similar to an HTTP request body", + "type": [ + "null", + "string" + ] + }, + "headers": { + "description": "Headers received with the message, similar to HTTP request headers.", + "type": [ + "null", + "object" + ], + "additionalProperties": false, + "patternProperties": { + "[.*]*$": { + "type": [ + "null", + "array", + "string" + ], + "items": { + "type": "string" + } + } + } + }, + "queue": { + "description": "Queue holds information about the message queue where the message is received.", + "type": [ + "null", + "object" + ], + "properties": { + "name": { + "description": "Name holds the name of the message queue where the message is received.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + } + } + }, + "page": { + "description": "Page holds information related to the current page and page referers. It is only sent from RUM agents.", + "type": [ + "null", + "object" + ], + "properties": { + "referer": { + "description": "Referer holds the URL of the page that 'linked' to the current page.", + "type": [ + "null", + "string" + ] + }, + "url": { + "description": "URL of the current page", + "type": [ + "null", + "string" + ] + } + } + }, + "request": { + "description": "Request describes the HTTP request information in case the event was created as a result of an HTTP request.", + "type": [ + "null", + "object" + ], + "properties": { + "body": { + "description": "Body only contais the request bod, not the query string information. It can either be a dictionary (for standard HTTP requests) or a raw request body.", + "type": [ + "null", + "string", + "object" + ] + }, + "cookies": { + "description": "Cookies used by the request, parsed as key-value objects.", + "type": [ + "null", + "object" + ] + }, + "env": { + "description": "Env holds environment variable information passed to the monitored service.", + "type": [ + "null", + "object" + ] + }, + "headers": { + "description": "Headers includes any HTTP headers sent by the requester. Cookies will be taken by headers if supplied.", + "type": [ + "null", + "object" + ], + "additionalProperties": false, + "patternProperties": { + "[.*]*$": { + "type": [ + "null", + "array", + "string" + ], + "items": { + "type": "string" + } + } + } + }, + "http_version": { + "description": "HTTPVersion holds information about the used HTTP version.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "method": { + "description": "Method holds information about the method of the HTTP request.", + "type": "string", + "maxLength": 1024 + }, + "socket": { + "description": "Socket holds information related to the recorded request, such as whether or not data were encrypted and the remote address.", + "type": [ + "null", + "object" + ], + "properties": { + "encrypted": { + "description": "Encrypted indicates whether a request was sent as TLS/HTTPS request. DEPRECATED: this field will be removed in a future release.", + "type": [ + "null", + "boolean" + ] + }, + "remote_address": { + "description": "RemoteAddress holds the network address sending the request. It should be obtained through standard APIs and not be parsed from any headers like 'Forwarded'.", + "type": [ + "null", + "string" + ] + } + } + }, + "url": { + "description": "URL holds information sucha as the raw URL, scheme, host and path.", + "type": [ + "null", + "object" + ], + "properties": { + "full": { + "description": "Full, possibly agent-assembled URL of the request, e.g. https://example.com:443/search?q=elasticsearch#top.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "hash": { + "description": "Hash of the request URL, e.g. 'top'", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "hostname": { + "description": "Hostname information of the request, e.g. 'example.com'.\"", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "pathname": { + "description": "Path of the request, e.g. '/search'", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "port": { + "description": "Port of the request, e.g. '443'. Can be sent as string or int.", + "type": [ + "null", + "string", + "integer" + ], + "maxLength": 1024 + }, + "protocol": { + "description": "Protocol information for the recorded request, e.g. 'https:'.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "raw": { + "description": "Raw unparsed URL of the HTTP request line, e.g https://example.com:443/search?q=elasticsearch. This URL may be absolute or relative. For more details, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "search": { + "description": "Search contains the query string information of the request. It is expected to have values delimited by ampersands.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + } + }, + "required": [ + "method" + ] + }, + "response": { + "description": "Response describes the HTTP response information in case the event was created as a result of an HTTP request.", + "type": [ + "null", + "object" + ], + "properties": { + "decoded_body_size": { + "description": "DecodedBodySize holds the size of the decoded payload.", + "type": [ + "null", + "number" + ] + }, + "encoded_body_size": { + "description": "EncodedBodySize holds the size of the encoded payload.", + "type": [ + "null", + "number" + ] + }, + "finished": { + "description": "Finished indicates whether the response was finished or not.", + "type": [ + "null", + "boolean" + ] + }, + "headers": { + "description": "Headers holds the http headers sent in the http response.", + "type": [ + "null", + "object" + ], + "additionalProperties": false, + "patternProperties": { + "[.*]*$": { + "type": [ + "null", + "array", + "string" + ], + "items": { + "type": "string" + } + } + } + }, + "headers_sent": { + "description": "HeadersSent indicates whether http headers were sent.", + "type": [ + "null", + "boolean" + ] + }, + "status_code": { + "description": "StatusCode sent in the http response.", + "type": [ + "null", + "integer" + ] + }, + "transfer_size": { + "description": "TransferSize holds the total size of the payload.", + "type": [ + "null", + "number" + ] + } + } + }, + "service": { + "description": "Service related information can be sent per event. Information provided here will override the more generic information retrieved from metadata, missing service fields will be retrieved from the metadata information.", + "type": [ + "null", + "object" + ], + "properties": { + "agent": { + "description": "Agent holds information about the APM agent capturing the event.", + "type": [ + "null", + "object" + ], + "properties": { + "ephemeral_id": { + "description": "EphemeralID is a free format ID used for metrics correlation by agents", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "name": { + "description": "Name of the APM agent capturing information.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "version": { + "description": "Version of the APM agent capturing information.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "environment": { + "description": "Environment in which the monitored service is running, e.g. `production` or `staging`.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "framework": { + "description": "Framework holds information about the framework used in the monitored service.", + "type": [ + "null", + "object" + ], + "properties": { + "name": { + "description": "Name of the used framework", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "version": { + "description": "Version of the used framework", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "language": { + "description": "Language holds information about the programming language of the monitored service.", + "type": [ + "null", + "object" + ], + "properties": { + "name": { + "description": "Name of the used programming language", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "version": { + "description": "Version of the used programming language", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "name": { + "description": "Name of the monitored service.", + "type": [ + "null", + "string" + ], + "maxLength": 1024, + "pattern": "^[a-zA-Z0-9 _-]+$" + }, + "node": { + "description": "Node must be a unique meaningful name of the service node.", + "type": [ + "null", + "object" + ], + "properties": { + "configured_name": { + "description": "Name of the service node", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "runtime": { + "description": "Runtime holds information about the language runtime running the monitored service", + "type": [ + "null", + "object" + ], + "properties": { + "name": { + "description": "Name of the language runtime", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "version": { + "description": "Version of the language runtime", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "version": { + "description": "Version of the monitored service.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + }, + "tags": { + "description": "Tags are a flat mapping of user-defined tags. On the agent side, tags are called labels. Allowed value types are string, boolean and number values. Tags are indexed and searchable.", + "type": [ + "null", + "object" + ], + "additionalProperties": { + "type": [ + "null", + "string", + "boolean", + "number" + ], + "maxLength": 1024 + } + }, + "user": { + "description": "User holds information about the correlated user for this event. If user data are provided here, all user related information from metadata is ignored, otherwise the metadata's user information will be stored with the event.", + "type": [ + "null", + "object" + ], + "properties": { + "domain": { + "description": "Domain of the logged in user", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "email": { + "description": "Email of the user.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "id": { + "description": "ID identifies the logged in user, e.g. can be the primary key of the user", + "type": [ + "null", + "string", + "integer" + ], + "maxLength": 1024 + }, + "username": { + "description": "Name of the user.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + } + } + } + } + }, + "duration": { + "description": "Duration how long the transaction took to complete, in milliseconds with 3 decimal points.", + "type": "number", + "minimum": 0 + }, + "experience": { + "description": "UserExperience holds metrics for measuring real user experience. This information is only sent by RUM agents.", + "type": [ + "null", + "object" + ], + "properties": { + "cls": { + "description": "CumulativeLayoutShift holds the Cumulative Layout Shift (CLS) metric value, or a negative value if CLS is unknown. See https://web.dev/cls/", + "type": [ + "null", + "number" + ], + "minimum": 0 + }, + "fid": { + "description": "FirstInputDelay holds the First Input Delay (FID) metric value, or a negative value if FID is unknown. See https://web.dev/fid/", + "type": [ + "null", + "number" + ], + "minimum": 0 + }, + "longtask": { + "description": "Longtask holds longtask duration/count metrics.", + "type": [ + "null", + "object" + ], + "properties": { + "count": { + "description": "Count is the total number of of longtasks.", + "type": "integer", + "minimum": 0 + }, + "max": { + "description": "Max longtask duration", + "type": "number", + "minimum": 0 + }, + "sum": { + "description": "Sum of longtask durations", + "type": "number", + "minimum": 0 + } + }, + "required": [ + "count", + "max", + "sum" + ] + }, + "tbt": { + "description": "TotalBlockingTime holds the Total Blocking Time (TBT) metric value, or a negative value if TBT is unknown. See https://web.dev/tbt/", + "type": [ + "null", + "number" + ], + "minimum": 0 + } + } + }, + "id": { + "description": "ID holds the hex encoded 64 random bits ID of the event.", + "type": "string", + "maxLength": 1024 + }, + "marks": { + "description": "Marks capture the timing of a significant event during the lifetime of a transaction. Marks are organized into groups and can be set by the user or the agent. Marks are only reported by RUM agents.", + "type": [ + "null", + "object" + ], + "additionalProperties": { + "type": [ + "null", + "object" + ], + "additionalProperties": { + "type": [ + "null", + "number" + ] + } + } + }, + "name": { + "description": "Name is the generic designation of a transaction in the scope of a single service, eg: 'GET /users/:id'.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "outcome": { + "description": "Outcome of the transaction with a limited set of permitted values, describing the success or failure of the transaction from the service's perspective. It is used for calculating error rates for incoming requests. Permitted values: success, failure, unknown.", + "type": [ + "null", + "string" + ], + "enum": [ + "success", + "failure", + "unknown", + null + ] + }, + "parent_id": { + "description": "ParentID holds the hex encoded 64 random bits ID of the parent transaction or span.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "result": { + "description": "Result of the transaction. For HTTP-related transactions, this should be the status code formatted like 'HTTP 2xx'.", + "type": [ + "null", + "string" + ], + "maxLength": 1024 + }, + "sample_rate": { + "description": "SampleRate applied to the monitored service at the time where this transaction was recorded. Allowed values are [0..1]. A SampleRate \u003c1 indicates that not all spans are recorded.", + "type": [ + "null", + "number" + ] + }, + "sampled": { + "description": "Sampled indicates whether or not the full information for a transaction is captured. If a transaction is unsampled no spans and less context information will be reported.", + "type": [ + "null", + "boolean" + ] + }, + "session": { + "description": "Session holds optional transaction session information for RUM.", + "type": [ + "null", + "object" + ], + "properties": { + "id": { + "description": "ID holds a session ID for grouping a set of related transactions.", + "type": "string", + "maxLength": 1024 + }, + "sequence": { + "description": "Sequence holds an optional sequence number for a transaction within a session. It is not meaningful to compare sequences across two different sessions.", + "type": [ + "null", + "integer" + ], + "minimum": 1 + } + }, + "required": [ + "id" + ] + }, + "span_count": { + "description": "SpanCount counts correlated spans.", + "type": "object", + "properties": { + "dropped": { + "description": "Dropped is the number of correlated spans that have been dropped by the APM agent recording the transaction.", + "type": [ + "null", + "integer" + ] + }, + "started": { + "description": "Started is the number of correlated spans that are recorded.", + "type": "integer" + } + }, + "required": [ + "started" + ] + }, + "timestamp": { + "description": "Timestamp holds the recorded time of the event, UTC based and formatted as microseconds since Unix epoch", + "type": [ + "null", + "integer" + ] + }, + "trace_id": { + "description": "TraceID holds the hex encoded 128 random bits ID of the correlated trace.", + "type": "string", + "maxLength": 1024 + }, + "type": { + "description": "Type expresses the transaction's type as keyword that has specific relevance within the service's domain, eg: 'request', 'backgroundjob'.", + "type": "string", + "maxLength": 1024 + } + }, + "required": [ + "duration", + "id", + "span_count", + "trace_id", + "type" + ] +} \ No newline at end of file diff --git a/docs/ssl-input.asciidoc b/docs/ssl-input.asciidoc index 1f2eb28f2ba..7074c9545eb 100644 --- a/docs/ssl-input.asciidoc +++ b/docs/ssl-input.asciidoc @@ -40,22 +40,24 @@ of the APM Server by authenticating its certificate. When the APM server uses a certificate that is not chained to a publicly-trusted certificate (e.g. self-signed), additional setting will be required on the agent side: -* *Go Agent*: certificate pinning through {apm-go-ref}/configuration.html#config-server-cert[`ELASTIC_APM_SERVER_CERT`] -* *Python Agent*: certificate pinning through {apm-py-ref}/configuration.html#config-server-cert[`server_cert`] -* *Ruby Agent*: certificate pinning through {apm-ruby-ref}/configuration.html#config-ssl-ca-cert[`server_ca_cert`] -* *NodeJS Agent*: custom CA setting through {apm-node-ref}/configuration.html#server-ca-cert-file[`serverCaCertFile`] -* *Java Agent*: adding the certificate to the JVM `trustStore`. +* *Go agent*: certificate pinning through {apm-go-ref}/configuration.html#config-server-cert[`ELASTIC_APM_SERVER_CERT`] +* *Python agent*: certificate pinning through {apm-py-ref}/configuration.html#config-server-cert[`server_cert`] +* *Ruby agent*: certificate pinning through {apm-ruby-ref}/configuration.html#config-ssl-ca-cert[`server_ca_cert`] +* *.NET agent*: {apm-dotnet-ref}/config-reporter.html#config-server-cert[`ServerCert`] +* *NodeJS agent*: custom CA setting through {apm-node-ref}/configuration.html#server-ca-cert-file[`serverCaCertFile`] +* *Java agent*: adding the certificate to the JVM `trustStore`. See {apm-java-ref}/ssl-configuration.html#ssl-server-authentication[APM Server authentication] for more details. It is not recommended to disable APM Server authentication, however it is possible through agents configuration: -* *Go Agent*: {apm-go-ref}/configuration.html#config-verify-server-cert[`ELASTIC_APM_VERIFY_SERVER_CERT`] -* *.NET Agent*: {apm-dotnet-ref}/config-reporter.html#config-verify-server-cert[`VerifyServerCert`] -* *Java Agent*: {apm-java-ref}/config-reporter.html#config-verify-server-cert[`verify_server_cert`] -* *Python Agent*: {apm-py-ref}/configuration.html#config-verify-server-cert[`verify_server_cert`] -* *Ruby Agent*: {apm-ruby-ref}/configuration.html#config-verify-server-cert[`verify_server_cert`] -* *NodeJS Agent*: {apm-node-ref}/configuration.html#validate-server-cert[`verifyServerCert`] +* *Go agent*: {apm-go-ref}/configuration.html#config-verify-server-cert[`ELASTIC_APM_VERIFY_SERVER_CERT`] +* *.NET agent*: {apm-dotnet-ref}/config-reporter.html#config-verify-server-cert[`VerifyServerCert`] +* *Java agent*: {apm-java-ref}/config-reporter.html#config-verify-server-cert[`verify_server_cert`] +* *PHP agent*: {apm-php-ref-v}/configuration-reference.html#config-verify-server-cert[`verify_server_cert`] +* *Python agent*: {apm-py-ref}/configuration.html#config-verify-server-cert[`verify_server_cert`] +* *Ruby agent*: {apm-ruby-ref}/configuration.html#config-verify-server-cert[`verify_server_cert`] +* *NodeJS agent*: {apm-node-ref}/configuration.html#validate-server-cert[`verifyServerCert`] [[ssl-client-authentication]] ==== Client certificate authentication diff --git a/docs/storage-management.asciidoc b/docs/storage-management.asciidoc index c261cc39cad..d594a607955 100644 --- a/docs/storage-management.asciidoc +++ b/docs/storage-management.asciidoc @@ -212,7 +212,7 @@ INFO "delete_indices" action completed. ===== Delete data matching a query You can delete documents matching a specific query. -For example, all documents with a given `c`ontext.service.name` use the following request: +For example, all documents with a given `context.service.name` use the following request: ["source","sh"] ------------------------------------------------------------ diff --git a/docs/tab-widgets/configure-agent-widget.asciidoc b/docs/tab-widgets/configure-agent-widget.asciidoc new file mode 100644 index 00000000000..0936b939643 --- /dev/null +++ b/docs/tab-widgets/configure-agent-widget.asciidoc @@ -0,0 +1,40 @@ +++++ +
+
+ + +
+
+++++ + +include::configure-agent.asciidoc[tag=central-config] + +++++ +
+ +
+++++ \ No newline at end of file diff --git a/docs/tab-widgets/configure-agent.asciidoc b/docs/tab-widgets/configure-agent.asciidoc new file mode 100644 index 00000000000..998ce5c5900 --- /dev/null +++ b/docs/tab-widgets/configure-agent.asciidoc @@ -0,0 +1,21 @@ +// tag::central-config[] +Central configuration allows you to fine-tune your agent configuration from within the APM app. +Changes are automatically propagated to your APM agents, and there’s no need to redeploy. + +A select number of configuration options are supported. +See {apm-app-ref}/agent-configuration.html[Agent configuration in Kibana] +for more information and a configuration reference. +// end::central-config[] + +// tag::reg-config[] +For a full list of agent configuration options, see the relevant agent reference: + +* {apm-go-ref-v}/configuration.html[Go Agent configuration] +* {apm-ios-ref-v}/configuration.html[iOS Agent configuration] +* {apm-java-ref-v}/configuration.html[Java Agent configuration] +* {apm-dotnet-ref-v}/configuration.html[.NET Agent configuration] +* {apm-node-ref}/configuring-the-agent.html[Node.js Agent configuration] +* {apm-py-ref-v}/configuration.html[Python Agent configuration] +* {apm-ruby-ref-v}/configuration.html[Ruby Agent configuration] +* {apm-rum-ref-v}/configuration.html[RUM Agent configuration] +// end::reg-config[] diff --git a/docs/tab-widgets/configure-server-widget.asciidoc b/docs/tab-widgets/configure-server-widget.asciidoc new file mode 100644 index 00000000000..bcbfdf2d9c5 --- /dev/null +++ b/docs/tab-widgets/configure-server-widget.asciidoc @@ -0,0 +1,40 @@ +++++ +
+
+ + +
+
+++++ + +include::configure-server.asciidoc[tag=ess] + +++++ +
+ +
+++++ \ No newline at end of file diff --git a/docs/tab-widgets/configure-server.asciidoc b/docs/tab-widgets/configure-server.asciidoc new file mode 100644 index 00000000000..2fa3488ae63 --- /dev/null +++ b/docs/tab-widgets/configure-server.asciidoc @@ -0,0 +1,19 @@ +// tag::ess[] + +If you're running APM Server in Elastic cloud, you can configure your own user settings right in the Elasticsearch Service Console. +Any changes are automatically appended to the `apm-server.yml` configuration file for your instance. + +Full details are available in the {cloud}/ec-manage-apm-settings.html[APM user settings] documentation. + +// end::ess[] + +// tag::self-managed[] + +If you've installed APM Server yourself, you can edit the `apm-server.yml` configuration file to make changes. +More information is available in {apm-server-ref-v}/configuring-howto-apm-server.html[configuring APM Server]. + +Don't forget to also read about +{apm-server-ref-v}/securing-apm-server.html[securing APM Server], and +{apm-server-ref-v}/monitoring.html[monitoring APM Server]. + +// end::self-managed[] diff --git a/docs/tab-widgets/distributed-trace-receive-widget.asciidoc b/docs/tab-widgets/distributed-trace-receive-widget.asciidoc new file mode 100644 index 00000000000..bc0f1f18da8 --- /dev/null +++ b/docs/tab-widgets/distributed-trace-receive-widget.asciidoc @@ -0,0 +1,150 @@ +// The Java agent defaults to visible. +// Change with `aria-selected="false"` and `hidden=""` +++++ +
+
+ + + + + + + + +
+ + +
+++++ + +include::distributed-trace-receive.asciidoc[tag=java] + +++++ +
+ + + + + +
+++++ \ No newline at end of file diff --git a/docs/tab-widgets/distributed-trace-receive.asciidoc b/docs/tab-widgets/distributed-trace-receive.asciidoc new file mode 100644 index 00000000000..6487d9379ce --- /dev/null +++ b/docs/tab-widgets/distributed-trace-receive.asciidoc @@ -0,0 +1,208 @@ +// tag::go[] + +// Need help with this example + +1. Parse the incoming TraceContext with +https://godoc.org/go.elastic.co/apm/module/apmhttp#ParseTraceparentHeader[`ParseTraceparentHeader`] or +https://godoc.org/go.elastic.co/apm/module/apmhttp#ParseTracestateHeader[`ParseTracestateHeader`]. + +2. Start a new transaction or span as a child of the incoming transaction with +{apm-go-ref}/api.html#tracer-api-start-transaction-options[`StartTransactionOptions`] or +{apm-go-ref}/api.html#transaction-start-span-options[`StartSpanOptions`]. + +Example: + +[source,go] +---- +// Receive incoming TraceContext +traceContext, _ := apmhttp.ParseTraceparentHeader(r.Header.Get("Traceparent")) <1> +traceContext.State, _ = apmhttp.ParseTracestateHeader(r.Header["Tracestate"]...) <2> + +opts := apm.TransactionOptions{ + TraceContext: traceContext, <3> +} +transaction := apm.DefaultTracer.StartTransactionOptions("GET /", "request", opts) <4> +---- +<1> Parse the TraceParent header +<2> Parse the tracestate header +<3> Set the parent trace context +<4> Start a new transaction as a child of the received TraceContext + +// end::go[] + +// *************************************************** +// *************************************************** + +// tag::ios[] + +experimental::[] + +_Not applicable._ + +// end::ios[] + +// *************************************************** +// *************************************************** + +// tag::java[] + +1. Create a transaction as a child of the incoming transaction with +{apm-java-ref}/public-api.html#api-transaction-inject-trace-headers[`startTransactionWithRemoteParent()`]. + +2. Start and name the transaction with {apm-java-ref}/public-api.html#api-transaction-activate[`activate()`] +and {apm-java-ref}/public-api.html#api-set-name[`setName()`]. + +Example: + +[source,java] +---- +// Hook into a callback provided by the framework that is called on incoming requests +public Response onIncomingRequest(Request request) throws Exception { + // creates a transaction representing the server-side handling of the request + Transaction transaction = ElasticApm.startTransactionWithRemoteParent(request::getHeader, request::getHeaders); <1> + try (final Scope scope = transaction.activate()) { <2> + String name = "a useful name like ClassName#methodName where the request is handled"; + transaction.setName(name); <3> + transaction.setType(Transaction.TYPE_REQUEST); <4> + return request.handle(); + } catch (Exception e) { + transaction.captureException(e); + throw e; + } finally { + transaction.end(); <5> + } +} +---- +<1> Create a transaction as the child of a remote parent +<2> Activate the transaction +<3> Name the transaction +<4> Add a transaction type +<5> Eventually, end the transaction + +// end::java[] + +// *************************************************** +// *************************************************** + +// tag::net[] + +Deserialize the incoming distributed tracing context, and pass it to any of the +{apm-dotnet-ref}/public-api.html#api-start-transaction[`StartTransaction`] or +{apm-dotnet-ref}/public-api.html#convenient-capture-transaction[`CaptureTransaction`] APIs -- +all of which have an optional `DistributedTracingData` parameter. +This will create a new transaction or span as a child of the incoming trace context. + +Example starting a new transaction: + +[source,csharp] +---- +var transaction2 = Agent.Tracer.StartTransaction("Transaction2", "TestTransaction", + DistributedTracingData.TryDeserializeFromString(serializedDistributedTracingData)); +---- + +// end::net[] + +// *************************************************** +// *************************************************** + +// tag::node[] + +1. Decode and store the `traceparent` in the receiving service. + +2. Pass in the `traceparent` as the `childOf` option to manually start a new transaction +as a child of the received `traceparent` with +{apm-node-ref}/agent-api.html#apm-start-transaction[`apm.startTransaction()`]. + +Example receiving a `traceparent` over raw UDP: + +[source,js] +---- +const traceparent = readTraceparentFromUDPPacket() <1> +agent.startTransaction('my-service-b-transaction', { childOf: traceparent }) <2> +---- +<1> Read the `traceparent` from the incoming request. +<2> Use the `traceparent` to initialize a new transaction that is a child of the original `traceparent`. + +// end::node[] + +// *************************************************** +// *************************************************** + +// tag::php[] + +1. Receive the distributed tracing data on the server side. + +2. Begin a new transaction using the agent's public API. For example, use {apm-php-ref-v}/public-api.html#api-elasticapm-class-begin-current-transaction[`ElasticApm::beginCurrentTransaction`] +and pass the received distributed tracing data (serialized as string) as a parameter. +This will create a new transaction as a child of the incoming trace context. + +3. Don't forget to eventually end the transaction on the server side. + +Example: + +[source,php] +---- +$receiverTransaction = ElasticApm::beginCurrentTransaction( <1> + 'GET /data-api', + 'data-layer', + /* timestamp */ null, + $distDataAsString <2> +); +---- +<1> Start a new transaction +<2> Pass in the received distributed tracing data (serialized as string) + +Once this new transaction has been created in the receiving service, +you can create child spans, or use any other agent API methods as you typically would. + +// end::php[] + +// *************************************************** +// *************************************************** + +// tag::python[] + +1. Create a TraceParent object from a string or HTTP header. + +2. Start a new transaction as a child of the `TraceParent` by passing in a `TraceParent` object. + +Example using HTTP headers: + +[source,python] +---- +parent = elasticapm.trace_parent_from_headers(headers_dict) <1> +client.begin_transaction('processors', trace_parent=parent) <2> +---- +<1> Create a TraceParent object from HTTP headers formed as a dictionary +<2> Begin a new transaction as a child of the received `TraceParent` + +TIP: See the {apm-py-ref}/api.html#traceparent-api[`TraceParent` API] for additional examples. +// end::python[] + +// *************************************************** +// *************************************************** + +// tag::ruby[] + +Start a new transaction or span as a child of the incoming transaction or span with +{apm-ruby-ref}/api.html#api-agent-with_transaction[`with_transaction`] or +{apm-ruby-ref}/api.html#api-agent-with_span[`with_span`]. + +Example: + +[source,ruby] +---- +# env being a Rack env +context = ElasticAPM::TraceContext.parse(env: env) <1> + +ElasticAPM.with_transaction("Do things", trace_context: context) do <2> + ElasticAPM.with_span("Do nested thing", trace_context: context) do <3> + end +end +---- +<1> Parse the incoming `TraceContext` +<2> Create a transaction as a child of the incoming `TraceContext` +<3> Create a span as a child of the newly created transaction. `trace_context` is optional here, +as spans are automatically created as a child of their parent's transaction's TraceContext when none is passed. + +// end::ruby[] diff --git a/docs/tab-widgets/distributed-trace-send-widget.asciidoc b/docs/tab-widgets/distributed-trace-send-widget.asciidoc new file mode 100644 index 00000000000..115cf6556ca --- /dev/null +++ b/docs/tab-widgets/distributed-trace-send-widget.asciidoc @@ -0,0 +1,150 @@ +// The Java agent defaults to visible. +// Change with `aria-selected="false"` and `hidden=""` +++++ +
+
+ + + + + + + + +
+ + +
+++++ + +include::distributed-trace-send.asciidoc[tag=java] + +++++ +
+ + + + + +
+++++ \ No newline at end of file diff --git a/docs/tab-widgets/distributed-trace-send.asciidoc b/docs/tab-widgets/distributed-trace-send.asciidoc new file mode 100644 index 00000000000..5d6c5580d8b --- /dev/null +++ b/docs/tab-widgets/distributed-trace-send.asciidoc @@ -0,0 +1,221 @@ +// tag::go[] + +1. Start a transaction with +{apm-go-ref}/api.html#tracer-api-start-transaction[`StartTransaction`] or a span with +{apm-go-ref}/api.html#transaction-start-span[`StartSpan`]. + +2. Get the active TraceContext. + +3. Send the TraceContext to the receiving service. + +Example: + +[source,go] +---- +transaction := apm.DefaultTracer.StartTransaction("GET /", "request") <1> +traceContext := transaction.TraceContext() <2> + +// Send TraceContext to receiving service +traceparent := apmhttp.FormatTraceparentHeader(traceContext)) <3> +tracestate := traceContext.State.String() +---- +<1> Start a transaction +<2> Get TraceContext from current Transaction +<3> Format the TraceContext or tracestate as a traceparent header. +// end::go[] + +// *************************************************** +// *************************************************** + +// tag::ios[] + +experimental::[] + +The agent will automatically inject trace headers into network requests using `URLSessions`, but if you're using a non-standard network library you may need to maunually inject them. It will be done using the OpenTelemetry APIs: + +1. Create a `Setter` + +2. Create a `Span` per https://github.com/open-telemetry/opentelemetry-swift/blob/main/Examples/Simple%20Exporter/main.swift#L35[Open Telemetry standards] + +3. Inject trace context to header dictionary + +4. Follow the procedure of your network library to complete the network request. Make sure to call `span.end()` when the request succeeds or fails. + +[source,swift] +---- +import OpenTelemetryApi +import OpenTelemetrySdk + +struct BasicSetter: Setter { <1> + func set(carrier: inout [String: String], key: String, value: String) { + carrier[key] = value + } +} + +let span : Span = ... <2> +let setter = BasicSetter() +let propagator = W3CTraceContextPropagator() +var headers = [String:String]() + +propagator.inject(spanContext: span.context, carrier: &headers, setter:setter) <3> + +let request = URLRequest(...) +request.allHTTPHeaderFields = headers +... // make network request +span.end() +---- +// end::ios[] + +// *************************************************** +// *************************************************** + +// tag::java[] + +1. Start a transaction with {apm-java-ref}/public-api.html#api-start-transaction[`startTransaction`], +or a span with {apm-java-ref}/public-api.html#api-span-start-span[`startSpan`]. + +2. Inject the `traceparent` header into the request object with +{apm-java-ref}/public-api.html#api-transaction-inject-trace-headers[`injectTraceHeaders`] + +Example of manually instrumenting an RPC framework: + +[source,java] +---- +// Hook into a callback provided by the RPC framework that is called on outgoing requests +public Response onOutgoingRequest(Request request) throws Exception { + Span span = ElasticApm.currentSpan() <1> + .startSpan("external", "http", null) + .setName(request.getMethod() + " " + request.getHost()); + try (final Scope scope = transaction.activate()) { + span.injectTraceHeaders((name, value) -> request.addHeader(name, value)); <2> + return request.execute(); + } catch (Exception e) { + span.captureException(e); + throw e; + } finally { + span.end(); <3> + } +} +---- +<1> Create a span representing an external call +<2> Inject the `traceparent` header into the request object +<3> End the span + +// end::java[] + +// *************************************************** +// *************************************************** + +// tag::net[] + +1. Serialize the distributed tracing context of the active transaction or span with +{apm-dotnet-ref}/public-api.html#api-current-transaction[`CurrentTransaction`] or +{apm-dotnet-ref}/public-api.html#api-current-span[`CurrentSpan`]. + +2. Send the serialized context the receiving service. + +Example: + +[source,csharp] +---- +string outgoingDistributedTracingData = + (Agent.Tracer.CurrentSpan?.OutgoingDistributedTracingData + ?? Agent.Tracer.CurrentTransaction?.OutgoingDistributedTracingData)?.SerializeToString(); +// Now send `outgoingDistributedTracingData` to the receiving service +---- + +// end::net[] + +// *************************************************** +// *************************************************** + +// tag::node[] + +1. Start a transaction with {apm-node-ref}/agent-api.html#apm-start-transaction[`apm.startTransaction()`], +or a span with {apm-node-ref}/agent-api.html#apm-start-span[`apm.startSpan()`]. + +2. Get the serialized `traceparent` string of the started transaction/span with +{apm-node-ref}/agent-api.html#apm-current-traceparent[`currentTraceparent`]. + +3. Encode the `traceparent` and send it to the receiving service inside your regular request. + +Example using raw UDP to communicate between two services, A and B: + +[source,js] +---- +agent.startTransaction('my-service-a-transaction'); <1> +const traceparent = agent.currentTraceparent; <2> +sendMetadata(`traceparent: ${traceparent}\n`); <3> +---- +<1> Start a transaction +<2> Get the current `traceparent` +<3> Send the `traceparent` as a header to service B. + +// end::node[] + +// *************************************************** +// *************************************************** + +// tag::php[] + +1. On the client side (i.e., the side sending the request) get the current distributed tracing context. + +2. Serialize the current distributed tracing context to a format supported by the request's transport and send it to the server side (i.e., the side receiving the request). + +Example: + +[source,php] +---- +$distDataAsString = ElasticApm::getSerializedCurrentDistributedTracingData(); <1> +---- +<1> Get the current distributed tracing data serialized as string + +// end::php[] + +// *************************************************** +// *************************************************** + +// tag::python[] + +1. Start a transaction with {apm-py-ref}/api.html#client-api-begin-transaction[`begin_transaction()`]. + +2. Get the `trace_parent` of the active transaction. + +3. Send the `trace_parent` to the receiving service. + +Example: + +[source,python] +---- +client.begin_transaction('new-transaction')<1> + +elasticapm.get_trace_parent_header('new-transaction') <2> + +# Send `trace_parent_str` to another service +---- +<1> Start a new transaction +<2> Return the string representation of the current transaction's TraceParent object +// end::python[] + +// *************************************************** +// *************************************************** + +// tag::ruby[] + +1. Start a span with {apm-ruby-ref}/api.html#api-agent-with_span[`with_span`]. + +2. Get the active `TraceContext`. + +3. Send the `TraceContext` to the receiving service. + +[source,ruby] +---- +ElasticAPM.with_span "Name" do |span| <1> + header = span.trace_context.traceparent.to_header <2> + # send the TraceContext Header to a receiving service... +end +---- +<1> Start a span +<2> Get the `TraceContext` + +// end::ruby[] diff --git a/docs/tab-widgets/install-agents-widget.asciidoc b/docs/tab-widgets/install-agents-widget.asciidoc new file mode 100644 index 00000000000..9a165850c0e --- /dev/null +++ b/docs/tab-widgets/install-agents-widget.asciidoc @@ -0,0 +1,168 @@ +// The Java agent defaults to visible. +// Change with `aria-selected="false"` and `hidden=""` +++++ +
+
+ + + + + + + + + +
+ + +
+++++ + +include::install-agents.asciidoc[tag=java] + +++++ +
+ + + + + + +
+++++ \ No newline at end of file diff --git a/docs/tab-widgets/install-agents.asciidoc b/docs/tab-widgets/install-agents.asciidoc new file mode 100644 index 00000000000..08a789c8f36 --- /dev/null +++ b/docs/tab-widgets/install-agents.asciidoc @@ -0,0 +1,578 @@ +// tag::go[] +*Install the agent* + +Install the APM agent packages for Go. + +[source,go] +---- +go get go.elastic.co/apm +---- + +*Configure the agent* + +Agents are libraries that run inside of your application process. +APM services are created programmatically based on the executable file name, or the `ELASTIC_APM_SERVICE_NAME` environment variable. + +[source,go] +---- +# Initialize using environment variables: + +# Set the service name. Allowed characters: a-z, A-Z, 0-9, -, _, and space. +# If ELASTIC_APM_SERVICE_NAME is not specified, the executable name will be used. +export ELASTIC_APM_SERVICE_NAME= + +# Set custom APM Server URL. Default: http://localhost:8200. +export ELASTIC_APM_SERVER_URL= + +# Use if APM Server requires a token +export ELASTIC_APM_SECRET_TOKEN= +---- + +*Instrument your application* + +Instrument your Go application by using one of the provided instrumentation modules or by using the tracer API directly. + +[source,go] +---- +import ( + "net/http" + + "go.elastic.co/apm/module/apmhttp" +) + +func main() { + mux := http.NewServeMux() + ... + http.ListenAndServe(":8080", apmhttp.Wrap(mux)) +} +---- + +*Learn more in the agent reference* + +* {apm-go-ref-v}/supported-tech.html[Supported technologies] +* {apm-go-ref-v}/configuration.html[Advanced configuration] +* {apm-go-ref-v}/getting-started.html[Detailed guide to instrumenting Go source code] +// end::go[] + +// *************************************************** +// *************************************************** + +// tag::ios[] + +experimental::[] + +*Add the agent dependency to your project* + +Add the Elastic APM iOS Agent as a +https://developer.apple.com/documentation/swift_packages/adding_package_dependencies_to_your_app[package dependency] +to your Xcode project or your `Package.swift`: + +[source,swift,linenums,highlight=2;10] +---- +Package( + dependencies:[ + .package(name: "iOSAgent", url: "git@github.com:elastic/apm-agent-ios.git", .branch("main")), + ], + targets:[ + .target( + name: "MyApp", + dependencies: [ + .product(name: "iOSAgent", package: "iOSAgent") + ] + ), +]) +---- + +*Initialize the agent* + +If you're using `SwiftUI` to build your app, add the following to `App.swift`: + +[source,swift,linenums,swift,highlight=2;7..12] +---- +import SwiftUI +import iOSAgent + +@main +struct MyApp: App { + init() { + var config = AgentConfiguration() + config.collectorAddress = "127.0.0.1" <1> + config.collectorPort = 8200 <2> + config.collectorTLS = false <3> + config.secretToken = "" <4> + Agent.start(with: config) + } + var body: some Scene { + WindowGroup { + ContentView() + } + } +} +---- +<1> APM Server URL or IP address +<2> APM Server port number +<3> Enable TLS for Open telemetry exporters +<4> Set secret token for APM server connection + +If you're not using `SwiftUI`, you can add the same thing to your AppDelegate file: + +`AppDelegate.swift` +[source,swift,linenums,highlight=2;9..14] +---- +import UIKit +import iOSAgent +@main +class AppDelegate: UIResponder, UIApplicationDelegate { + func application(_ application: UIApplication, didFinishLaunchingWithOptions launchOptions: [UIApplication.LaunchOptionsKey: Any]?) -> Bool { + var config = AgentConfiguration() + config.collectorAddress = "127.0.0.1" <1> + config.collectorPort = 8200 <2> + config.collectorTLS = false <3> + config.secretToken = "" <4> + Agent.start(with: config) + return true + } +} +---- +<1> APM Server url or ip address +<2> APM Server port number +<3> Enable TLS for Open telemetry exporters +<4> Set secret token for APM server connection + +// end::ios[] + +// *************************************************** +// *************************************************** + +// tag::java[] + +*Download the APM agent* + +Download the agent jar from http://search.maven.org/#search%7Cga%7C1%7Ca%3Aelastic-apm-agent[Maven Central]. +Do not add the agent as a dependency to your application. + +*Start your application with the javaagent flag* + +Add the `-javaagent` flag and configure the agent with system properties. + +* Set required service name +* Set custom APM Server URL (default: http://localhost:8200) +* Set the base package of your application + +[source,java] +---- +java -javaagent:/path/to/elastic-apm-agent-.jar \ + -Delastic.apm.service_name=my-application \ + -Delastic.apm.server_urls=http://localhost:8200 \ + -Delastic.apm.secret_token= \ + -Delastic.apm.application_packages=org.example \ + -jar my-application.jar +---- + +*Learn more in the agent reference* + +* {apm-java-ref-v}/supported-technologies-details.html[Supported technologies] +* {apm-java-ref-v}/configuration.html[Advanced configuration] +// end::java[] + +// *************************************************** +// *************************************************** + +// tag::net[] +*Download the APM agent* + +Add the agent packages from https://www.nuget.org/packages?q=Elastic.apm[NuGet] to your .NET application. +There are multiple NuGet packages available for different use cases. + +For an ASP.NET Core application with Entity Framework Core, download the +https://www.nuget.org/packages/Elastic.Apm.NetCoreAll[Elastic.Apm.NetCoreAll] package. +This package will automatically add every agent component to your application. + +To minimize the number of dependencies, you can use the +https://www.nuget.org/packages/Elastic.Apm.AspNetCore[Elastic.Apm.AspNetCore] package for just ASP.NET Core monitoring, or the +https://www.nuget.org/packages/Elastic.Apm.EntityFrameworkCore[Elastic.Apm.EfCore] package for just Entity Framework Core monitoring. + +If you only want to use the public agent API for manual instrumentation, use the +https://www.nuget.org/packages/Elastic.Apm[Elastic.Apm] package. + +*Add the agent to the application* + +For an ASP.NET Core application with the `Elastic.Apm.NetCoreAll` package, +call the `UseAllElasticApm` method in the `Configure` method within the `Startup.cs` file: + +[source,dotnet] +---- +public class Startup +{ + public void Configure(IApplicationBuilder app, IHostingEnvironment env) + { + app.UseAllElasticApm(Configuration); + //…rest of the method + } + //…rest of the class +} +---- + +Passing an `IConfiguration` instance is optional and by doing so, +the agent will read config settings through this `IConfiguration` instance, for example, +from the `appsettings.json` file: + +[source,json] +---- +{ + "ElasticApm": { + "SecretToken": "", + "ServerUrls": "http://localhost:8200", //Set custom APM Server URL (default: http://localhost:8200) + "ServiceName" : "MyApp", //allowed characters: a-z, A-Z, 0-9, -, _, and space. Default is the entry assembly of the application + } +} +---- + +If you don’t pass an `IConfiguration` instance to the agent, for example, in a non-ASP.NET Core application, +you can configure the agent with environment variables. +See the agent reference for more information. + +*Learn more in the agent reference* + +* {apm-dotnet-ref-v}/supported-technologies.html[Supported technologies] +* {apm-dotnet-ref-v}/configuration.html[Advanced configuration] +// end::net[] + +// *************************************************** +// *************************************************** + +// tag::node[] +*Install the APM agent* + +Install the APM agent for Node.js as a dependency to your application. + +[source,js] +---- +npm install elastic-apm-node --save +---- + +*Configure the agent* + +Agents are libraries that run inside of your application process. APM services are created programmatically based on the `serviceName`. +This agent supports a variety of frameworks but can also be used with your custom stack. + +[source,js] +---- +// Add this to the VERY top of the first file loaded in your app +var apm = require('elastic-apm-node').start({ + // Override service name from package.json + // Allowed characters: a-z, A-Z, 0-9, -, _, and space + serviceName: '', + + // Use if APM Server requires a token + secretToken: '', + + // Set custom APM Server URL (default: http://localhost:8200) + serverUrl: '' +}) +---- + +*Learn more in the agent reference* + +* {apm-node-ref-v}/supported-technologies.html[Supported technologies] +* {apm-node-ref-v}/advanced-setup.html[Babel/ES Modules] +* {apm-node-ref-v}/configuring-the-agent.html[Advanced configuration] + +// end::node[] + +// *************************************************** +// *************************************************** + +// tag::php[] + +*Install the agent* + +Install the PHP agent using one of the https://github.com/elastic/apm-agent-php/releases[published packages]. + +To use the RPM Package (RHEL/CentOS and Fedora): + +[source,php] +---- +rpm -ivh .rpm +---- + +To use the DEB package (Debian and Ubuntu): + +[source,php] +---- +dpkg -i .deb +---- + +To use the APK package (Alpine): + +[source,php] +---- +apk add --allow-untrusted .apk +---- + +If you can’t find your distribution, +you can install the agent by {apm-php-ref-v}/setup.html[building it from the source]. + +*Configure the agent* + +Configure your agent inside of the `php.ini` file: + +[source,ini] +---- +elastic_apm.server_url=http://localhost:8200 +elastic_apm.secret_token=SECRET_TOKEN +elastic_apm.service_name="My-service" +---- + +*Learn more in the agent reference* + +* {apm-php-ref-v}/supported-technologies.html[Supported technologies] +* {apm-php-ref-v}/configuration.html[Configuration] + +// end::php[] + +// *************************************************** +// *************************************************** + +// tag::python[] +Django:: ++ +*Install the APM agent* ++ +Install the APM agent for Python as a dependency. ++ +[source,python] +---- +$ pip install elastic-apm +---- ++ +*Configure the agent* ++ +Agents are libraries that run inside of your application process. +APM services are created programmatically based on the `SERVICE_NAME`. ++ +[source,python] +---- +# Add the agent to the installed apps +INSTALLED_APPS = ( + 'elasticapm.contrib.django', + # ... +) + +ELASTIC_APM = { + # Set required service name. Allowed characters: + # a-z, A-Z, 0-9, -, _, and space + 'SERVICE_NAME': '', + + # Use if APM Server requires a token + 'SECRET_TOKEN': '', + + # Set custom APM Server URL (default: http://localhost:8200) + 'SERVER_URL': '', +} + +# To send performance metrics, add our tracing middleware: +MIDDLEWARE = ( + 'elasticapm.contrib.django.middleware.TracingMiddleware', + #... +) +---- + +Flask:: ++ +*Install the APM agent* ++ +Install the APM agent for Python as a dependency. ++ +[source,python] +---- +$ pip install elastic-apm[flask] +---- ++ +*Configure the agent* ++ +Agents are libraries that run inside of your application process. +APM services are created programmatically based on the `SERVICE_NAME`. ++ +[source,python] +---- +# initialize using environment variables +from elasticapm.contrib.flask import ElasticAPM +app = Flask(__name__) +apm = ElasticAPM(app) + +# or configure to use ELASTIC_APM in your application settings +from elasticapm.contrib.flask import ElasticAPM +app.config['ELASTIC_APM'] = { + # Set required service name. Allowed characters: + # a-z, A-Z, 0-9, -, _, and space + 'SERVICE_NAME': '', + + # Use if APM Server requires a token + 'SECRET_TOKEN': '', + + # Set custom APM Server URL (default: http://localhost:8200) + 'SERVER_URL': '', +} + +apm = ElasticAPM(app) +---- + +*Learn more in the agent reference* + +* {apm-py-ref-v}/supported-technologies.html[Supported technologies] +* {apm-py-ref-v}/configuration.html[Advanced configuration] + +// end::python[] + +// *************************************************** +// *************************************************** + +// tag::ruby[] +*Install the APM agent* + +Add the agent to your Gemfile. + +[source,ruby] +---- +gem 'elastic-apm' +---- +*Configure the agent* + +Ruby on Rails:: ++ +APM is automatically started when your app boots. +Configure the agent by creating the config file `config/elastic_apm.yml`: ++ +[source,ruby] +---- +# config/elastic_apm.yml: + +# Set service name - allowed characters: a-z, A-Z, 0-9, -, _ and space +# Defaults to the name of your Rails app +service_name: 'my-service' + +# Use if APM Server requires a token +secret_token: '' + +# Set custom APM Server URL (default: http://localhost:8200) +server_url: 'http://localhost:8200' +---- + +Rack:: ++ +For Rack or a compatible framework, like Sinatra, include the middleware in your app and start the agent. ++ +[source,ruby] +---- +# config.ru + require 'sinatra/base' + + class MySinatraApp < Sinatra::Base + use ElasticAPM::Middleware + + # ... + end + + ElasticAPM.start( + app: MySinatraApp, # required + config_file: '' # optional, defaults to config/elastic_apm.yml + ) + + run MySinatraApp + + at_exit { ElasticAPM.stop } +---- ++ +*Create a config file* ++ +Create a config file config/elastic_apm.yml: ++ +[source,ruby] +---- +# config/elastic_apm.yml: + +# Set service name - allowed characters: a-z, A-Z, 0-9, -, _ and space +# Defaults to the name of your Rack app's class. +service_name: 'my-service' + +# Use if APM Server requires a token +secret_token: '' + +# Set custom APM Server URL (default: http://localhost:8200) +server_url: 'http://localhost:8200' +---- + +*Learn more in the agent reference* + +* {apm-ruby-ref-v}/supported-technologies.html[Supported technologies] +* {apm-ruby-ref-v}/configuration.html[Advanced configuration] + +// end::ruby[] + +// *************************************************** +// *************************************************** + +// tag::rum[] +*Enable Real User Monitoring support in APM Server* + +APM Server disables RUM support by default. +To enable it, set `apm-server.rum.enabled: true` in your APM Server configuration file. + +*Set up the agent* + +Once RUM support enabled, you can set up the RUM agent. +There are two ways to do this: add the agent as a dependency, +or set it up with ` + +---- + +*Learn more in the agent reference* + +* {apm-rum-ref-v}/supported-technologies.html[Supported technologies] +* {apm-rum-ref-v}/configuration.html[Advanced configuration] + +// end::rum[] diff --git a/docs/tab-widgets/jaeger-sampling-widget.asciidoc b/docs/tab-widgets/jaeger-sampling-widget.asciidoc new file mode 100644 index 00000000000..cf41515e53d --- /dev/null +++ b/docs/tab-widgets/jaeger-sampling-widget.asciidoc @@ -0,0 +1,40 @@ +++++ +
+
+ + +
+
+++++ + +include::jaeger-sampling.asciidoc[tag=ess] + +++++ +
+ +
+++++ \ No newline at end of file diff --git a/docs/tab-widgets/jaeger-sampling.asciidoc b/docs/tab-widgets/jaeger-sampling.asciidoc new file mode 100644 index 00000000000..cb9e52a0fdb --- /dev/null +++ b/docs/tab-widgets/jaeger-sampling.asciidoc @@ -0,0 +1,14 @@ +// tag::ess[] +Visit the {kibana-ref}/agent-configuration.html[Agent configuration] page in the APM app to add a new sampling rate. + +// end::ess[] + +// tag::self-managed[] +APM Agent central configuration requires the <> to be configured. +To enable the kibana endpoint, set <> to `true`, +and point <> at the Kibana host that APM Server will communicate with. + +Once configured, +visit the {kibana-ref}/agent-configuration.html[Agent configuration] page in the APM app to add a new sampling rate. + +// end::self-managed[] diff --git a/docs/tab-widgets/jaeger-widget.asciidoc b/docs/tab-widgets/jaeger-widget.asciidoc new file mode 100644 index 00000000000..5902738ca38 --- /dev/null +++ b/docs/tab-widgets/jaeger-widget.asciidoc @@ -0,0 +1,40 @@ +++++ +
+
+ + +
+
+++++ + +include::jaeger.asciidoc[tag=ess] + +++++ +
+ +
+++++ \ No newline at end of file diff --git a/docs/tab-widgets/jaeger.asciidoc b/docs/tab-widgets/jaeger.asciidoc new file mode 100644 index 00000000000..e20735edb3d --- /dev/null +++ b/docs/tab-widgets/jaeger.asciidoc @@ -0,0 +1,58 @@ +// tag::ess[] +. Log into {ess-console}[Elastic Cloud] and select your deployment. +Copy your APM endpoint and APM Server secret token; you'll need these in the next step. + +. Configure APM Server as a collector for your Jaeger agents. ++ +As of this writing, the Jaeger agent binary offers the following CLI flags, +which can be used to enable TLS, output to {ecloud}, and set the APM Server secret token: ++ +[source,terminal] +---- +--reporter.grpc.tls.enabled=true +--reporter.grpc.host-port= +--agent.tags="elastic-apm-auth=Bearer " +---- + +TIP: For the equivalent environment variables, +change all letters to upper-case and replace punctuation with underscores (`_`). +See the https://www.jaegertracing.io/docs/1.22/cli/[Jaeger CLI flags documentation] for more information. + +// end::ess[] + +// tag::self-managed[] +. Configure APM Server as a collector for your Jaeger agents. ++ +As of this writing, the Jaeger agent binary offers the `--reporter.grpc.host-port` CLI flag. +Use this to define the <> that APM Server is listening on: ++ +[source,terminal] +---- +--reporter.grpc.host-port= +---- + +. (Optional) Enable encryption ++ +When <> is enabled in APM Server, Jaeger agents must also enable TLS communication: ++ +[source,terminal] +---- +--reporter.grpc.tls.enabled=true +---- + +. (Optional) Enable token-based authorization ++ +A <> or <> can be used to ensure only authorized +Jaeger agents can send data to the APM Server. +When enabled, use an agent level tag to authorize Jaeger agent communication with the APM Server: ++ +[source,terminal] +---- +--agent.tags="elastic-apm-auth=Bearer " +---- + +TIP: For the equivalent environment variables, +change all letters to upper-case and replace punctuation with underscores (`_`). +See the https://www.jaegertracing.io/docs/1.22/cli/[Jaeger CLI flags documentation] for more information. + +// end::self-managed[] diff --git a/docs/tab-widgets/open-kibana-widget.asciidoc b/docs/tab-widgets/open-kibana-widget.asciidoc new file mode 100644 index 00000000000..1947f97b537 --- /dev/null +++ b/docs/tab-widgets/open-kibana-widget.asciidoc @@ -0,0 +1,40 @@ +++++ +
+
+ + +
+
+++++ + +include::open-kibana.asciidoc[tag=cloud] + +++++ +
+ +
+++++ \ No newline at end of file diff --git a/docs/tab-widgets/open-kibana.asciidoc b/docs/tab-widgets/open-kibana.asciidoc new file mode 100644 index 00000000000..b1665ea5e9e --- /dev/null +++ b/docs/tab-widgets/open-kibana.asciidoc @@ -0,0 +1,10 @@ +// tag::cloud[] +. https://cloud.elastic.co/[Log in] to your {ecloud} account. + +. Navigate to the {kib} endpoint in your deployment. +// end::cloud[] + +// tag::self-managed[] +Point your browser to http://localhost:5601[http://localhost:5601], replacing +`localhost` with the name of the {kib} host. +// end::self-managed[] \ No newline at end of file diff --git a/docs/tab-widgets/spin-up-stack-widget.asciidoc b/docs/tab-widgets/spin-up-stack-widget.asciidoc new file mode 100644 index 00000000000..6e913212257 --- /dev/null +++ b/docs/tab-widgets/spin-up-stack-widget.asciidoc @@ -0,0 +1,40 @@ +++++ +
+
+ + +
+
+++++ + +include::spin-up-stack.asciidoc[tag=ess] + +++++ +
+ +
+++++ \ No newline at end of file diff --git a/docs/tab-widgets/spin-up-stack.asciidoc b/docs/tab-widgets/spin-up-stack.asciidoc new file mode 100644 index 00000000000..75f4e99f582 --- /dev/null +++ b/docs/tab-widgets/spin-up-stack.asciidoc @@ -0,0 +1,51 @@ +// tag::ess[] +There's no faster way to get started with Elastic APM than with our hosted {ess} on {ecloud}. +{ess} is available on AWS, GCP, and Azure, +and automatically configures APM Server to work with {es} and {kib}: + +. {ess-trial}[Get a free trial]. + +. Log into {ess-console}[Elastic Cloud]. + +. Click *Create deployment*. + +. Select *Elastic Observability* and give your deployment a name. + +. Click *Create deployment* and copy the password for the `elastic` user. + +. Select *APM* from the menu on the left and make note of the APM endpoint and APM Server secret token. +You'll need these in step two. + +// end::ess[] + +// tag::self-managed[] +To install and run {es} and {kib}, see {stack-gs}/get-started-elastic-stack.html[getting started with the {stack}]. + +Next, install, set up, and run APM Server: + +. {apm-server-ref-v}/installing.html[Install APM Server]. +. {apm-server-ref-v}/apm-server-configuration.html[Set up APM Server] +. {apm-server-ref-v}/setting-up-and-running.html[Start APM Server]. + +Use the config file if you need to change the default configuration that APM Server uses to connect to {es}, +or if you need to specify credentials: + +* {apm-server-ref-v}/configuring-howto-apm-server.html[Configuring APM Server] +** {apm-server-ref-v}/configuration-process.html[General configuration options] +** {apm-server-ref-v}/elasticsearch-output.html[Configure the {es} output] + +[[secure-api-access]] +If you change the listen address from `localhost` to something that is accessible from outside of the machine, +we recommend setting up firewall rules to ensure that only your own systems can access the API. +Alternatively, +you can use a {apm-server-ref-v}/securing-apm-server.html[TLS and a secret token or API key]. + +If you have APM Server running on the same host as your service, +you can configure it to listen on a Unix domain socket. + +[[more-information]] +TIP: For detailed instructions on how to install and secure APM Server in your server environment, +including details on how to run APM Server in a highly available environment, +please see the full {apm-server-ref-v}/index.html[APM Server documentation]. + +// end::self-managed[] diff --git a/docs/transaction-api.asciidoc b/docs/transaction-api.asciidoc index 38bf2aa4af1..0aec3a321e9 100644 --- a/docs/transaction-api.asciidoc +++ b/docs/transaction-api.asciidoc @@ -1,15 +1,16 @@ [[transaction-api]] === Transactions -Transactions are events corresponding to an incoming request or similar task occurring in a monitored service. +Transactions are events corresponding to an incoming request or similar task occurring in a monitored service. [[transaction-schema]] [float] ==== Transaction Schema -The APM Server uses JSON Schema for validating requests. The specification for transactions is defined below: +APM Server uses JSON Schema to validate requests. The specification for transactions is defined on +{github_repo_link}/docs/spec/v2/transaction.json[GitHub] and included below: [source,json] ---- -include::./spec/transactions/transaction.json[] +include::./spec/v2/transaction.json[] ---- diff --git a/docs/transaction-metrics.asciidoc b/docs/transaction-metrics.asciidoc new file mode 100644 index 00000000000..04f297aec90 --- /dev/null +++ b/docs/transaction-metrics.asciidoc @@ -0,0 +1,86 @@ +[x-pack] +[[transaction-metrics]] +== Configure transaction metrics + +++++ +Transaction metrics +++++ + +When enabled, {beatname_uc} produces transaction histogram metrics that are used to power the APM app. +Shifting this responsibility from APM app to APM Server removes the need to store unsampled transactions, reducing storage costs. + +Example config file: + +["source","yaml"] +---- +apm-server: + aggregation: + transactions: + enabled: true + interval: 1m + sampling: + keep_unsampled: false +---- + +[float] +[[configuration-aggregation]] +=== Configuration options: `apm-server.aggregation.transactions.*` + +[[transactions-enabled]] +[float] +==== `enabled` + +Enables the collection and publishing of transaction metrics. +Enabling this setting removes the need to store unsampled transactions, reducing storage costs. +Storing unsampled transactions is controlled independently with <>. + +Default: `true`. + +IMPORTANT: To prevent inaccuracies in the APM app, transaction metrics must also be enabled in +Kibana with `xpack.apm.searchAggregatedTransactions`. +See {kibana-ref}/apm-settings-in-kibana.html[APM app settings] for more information. + +[[transactions-interval]] +[float] +==== `interval` + +Controls the frequency of metrics publication. + +Default: `1m`. + +[[transactions-max_groups]] +[float] +==== `max_groups` + +Maximum number of transaction groups to keep track of. +Once exceeded, APM Server devolves into recording a metrics document for each transaction that is not in one +of the transaction groups being tracked. + +Default: `10000`. + +[[transactions-hdrhistogram_significant_figures]] +[float] +==== `hdrhistogram_significant_figures` + +The fixed, worst-case percentage error (specified as a number of significant digits) +to maintain for recorded metrics. +Supported values are `1` through `5`. +See {ref}/search-aggregations-metrics-percentile-aggregation.html#_hdr_histogram_2[HDR histogram] for more information. + +Default: `2`. + +[float] +[[configuration-sampling]] +=== Configuration options: `apm-server.sampling.*` + +[[sampling-keep_unsampled]] +[float] +==== `keep_unsampled` + +Controls the recording of unsampled transaction documents. +Dropping unsampled documents (`keep_unsampled: false`) reduces APM's storage consumption. + +Default: `true`. + +IMPORTANT: Unsampled transactions should only be dropped when `apm-server.aggregation.transactions.enabled` is `true`, +otherwise, the APM app will report inaccurate metrics. diff --git a/docs/troubleshooting.asciidoc b/docs/troubleshooting.asciidoc index 62bc85e7a84..c49ca324906 100644 --- a/docs/troubleshooting.asciidoc +++ b/docs/troubleshooting.asciidoc @@ -1,13 +1,9 @@ -////////////////////////////////////////////////////////////////////////// -//// This content is mainly copied from filebeat and adapted for apm-server -////////////////////////////////////////////////////////////////////////// - [[troubleshooting]] = Troubleshoot [partintro] -- -If you have issues installing or running APM Server, +If you have issues installing or running APM Server, read the following tips: * <> @@ -22,6 +18,20 @@ Other sections in the documentation may also be helpful: * <> * {apm-overview-ref-v}/agent-server-compatibility.html[Agent/Server compatibility matrix] +If your issue is potentially related to other components of the APM ecosystem, +don't forget to check the relevant troubleshooting guides: + +* {kibana-ref}/troubleshooting.html[APM app troubleshooting] +* {apm-dotnet-ref-v}/troubleshooting.html[.NET agent troubleshooting] +* {apm-go-ref-v}/troubleshooting.html[Go agent troubleshooting] +* {apm-ios-ref-v}/troubleshooting.html[iOS agent troubleshooting] +* {apm-java-ref-v}/trouble-shooting.html[Java agent troubleshooting] +* {apm-node-ref-v}/troubleshooting.html[Node.js agent troubleshooting] +* {apm-php-ref-v}/troubleshooting.html[PHP agent troubleshooting] +* {apm-py-ref-v}/troubleshooting.html[Python agent troubleshooting] +* {apm-ruby-ref-v}/debugging.html[Ruby agent troubleshooting] +* {apm-rum-ref-v}/troubleshooting.html[RUM troubleshooting] + -- include::common-problems.asciidoc[] diff --git a/docs/upgrading.asciidoc b/docs/upgrading.asciidoc index 370d89b509c..9bc61be4dec 100644 --- a/docs/upgrading.asciidoc +++ b/docs/upgrading.asciidoc @@ -14,7 +14,8 @@ Any exceptions will be explained in <>. * Review the APM Server <> and <> for changes between your current APM Server version and the one you are upgrading to. -* Review the APM stack {apm-overview-ref-v}/apm-release-notes.html[release highlights] and {apm-overview-ref-v}/apm-breaking-changes.html[breaking changes] for highlights and important changes to other APM components. +* Review the APM stack {apm-overview-ref-v}/apm-breaking-changes.html[breaking changes] and Observability +{observability-guide}/whats-new.html[What's new in {minor-version}] for important changes to other APM components. [discrete] [[upgrade-order]] diff --git a/docs/version.asciidoc b/docs/version.asciidoc index 75e42442eb1..33bff56605b 100644 --- a/docs/version.asciidoc +++ b/docs/version.asciidoc @@ -1,17 +1,20 @@ // doc-branch can be: master, 8.0, 8.1, etc. :doc-branch: master -:go-version: 1.14.7 +:go-version: 1.16.6 :python: 3.7 :docker: 1.12 :docker-compose: 1.11 include::{asciidoc-dir}/../../shared/versions/stack/{source_branch}.asciidoc[] -// Agent links -:apm-py-ref-v: https://www.elastic.co/guide/en/apm/agent/python/{apm-py-branch} +// Agent link attributes +// Used in conjunction with the stack attributes found here: https://github.com/elastic/docs/tree/7d62a6b66d6e9c96e4dd9a96c3dc7c75ceba0288/shared/versions/stack +:apm-dotnet-ref-v: https://www.elastic.co/guide/en/apm/agent/dotnet/{apm-dotnet-branch} +:apm-go-ref-v: https://www.elastic.co/guide/en/apm/agent/go/{apm-go-branch} +:apm-ios-ref-v: https://www.elastic.co/guide/en/apm/agent/swift/{apm-ios-branch} +:apm-java-ref-v: https://www.elastic.co/guide/en/apm/agent/java/{apm-java-branch} :apm-node-ref-v: https://www.elastic.co/guide/en/apm/agent/nodejs/{apm-node-branch} -:apm-rum-ref-v: https://www.elastic.co/guide/en/apm/agent/rum-js/{apm-rum-branch} +:apm-php-ref-v: https://www.elastic.co/guide/en/apm/agent/php/{apm-php-branch} +:apm-py-ref-v: https://www.elastic.co/guide/en/apm/agent/python/{apm-py-branch} :apm-ruby-ref-v: https://www.elastic.co/guide/en/apm/agent/ruby/{apm-ruby-branch} -:apm-java-ref-v: https://www.elastic.co/guide/en/apm/agent/java/{apm-java-branch} -:apm-go-ref-v: https://www.elastic.co/guide/en/apm/agent/go/{apm-go-branch} -:apm-dotnet-ref-v: https://www.elastic.co/guide/en/apm/agent/dotnet/{apm-dotnet-branch} +:apm-rum-ref-v: https://www.elastic.co/guide/en/apm/agent/rum-js/{apm-rum-branch} diff --git a/elasticsearch/backoff.go b/elasticsearch/backoff.go new file mode 100644 index 00000000000..2fc74e0c176 --- /dev/null +++ b/elasticsearch/backoff.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package elasticsearch + +import ( + "time" + + "github.com/elastic/beats/v7/libbeat/outputs/elasticsearch" +) + +type backoffFunc func(int) time.Duration + +var ( + // DefaultBackoffConfig is the default backoff configuration used for + // es clients. + DefaultBackoffConfig = elasticsearch.Backoff{ + Init: time.Second, + Max: time.Minute, + } + + // DefaultBackoff is an exponential backoff configured with default + // backoff settings. + DefaultBackoff = exponentialBackoff(DefaultBackoffConfig) +) + +func exponentialBackoff(b elasticsearch.Backoff) backoffFunc { + return func(attempts int) time.Duration { + // Attempts starts at 1, after there's already been a failure. + // https://github.com/elastic/go-elasticsearch/blob/de2391/estransport/estransport.go#L339 + next := b.Init * (1 << (attempts - 1)) + if next > b.Max { + next = b.Max + } + return next + } +} diff --git a/elasticsearch/backoff_test.go b/elasticsearch/backoff_test.go new file mode 100644 index 00000000000..3ac78f68bc7 --- /dev/null +++ b/elasticsearch/backoff_test.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package elasticsearch + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/v7/libbeat/outputs/elasticsearch" +) + +func TestExponentialBackoff(t *testing.T) { + init := 2 * time.Second + backoffCfg := elasticsearch.Backoff{ + Init: init, + Max: time.Minute, + } + backoffFn := exponentialBackoff(backoffCfg) + assert.Equal(t, init, backoffFn(1)) + assert.Equal(t, 4*time.Second, backoffFn(2)) + assert.Equal(t, 8*time.Second, backoffFn(3)) + assert.Equal(t, 16*time.Second, backoffFn(4)) + assert.Equal(t, 32*time.Second, backoffFn(5)) + assert.Equal(t, time.Minute, backoffFn(20)) +} diff --git a/elasticsearch/bulk.go b/elasticsearch/bulk.go new file mode 100644 index 00000000000..1fd4fc29bb9 --- /dev/null +++ b/elasticsearch/bulk.go @@ -0,0 +1,146 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package elasticsearch + +import ( + "context" + "io" + "time" + + esutilv7 "github.com/elastic/go-elasticsearch/v7/esutil" + esutilv8 "github.com/elastic/go-elasticsearch/v8/esutil" +) + +// BulkIndexer represents a parallel, asynchronous, efficient indexer for Elasticsearch. +// +// This is a subset of the go-elasticsearch/esutil.BulkIndexer interface, suitable for +// use with either a v7 or v8 client. +type BulkIndexer interface { + // Add adds an item to the indexer. It returns an error when the item cannot be added. + // Use the OnSuccess and OnFailure callbacks to get the operation result for the item. + // + // You must call the Close() method after you're done adding items. + // + // It is safe for concurrent use. When it's called from goroutines, + // they must finish before the call to Close, eg. using sync.WaitGroup. + Add(context.Context, BulkIndexerItem) error + + // Close waits until all added items are flushed and closes the indexer. + Close(context.Context) error + + // Stats returns indexer statistics. + Stats() BulkIndexerStats +} + +// BulkIndexerConfig represents configuration of the indexer. +// +// This is a subset of the go-elasticsearch/esutil.BulkIndexerConfig type, suitable for use +// with either a v7 or v8 client. +type BulkIndexerConfig struct { + NumWorkers int // The number of workers. Defaults to runtime.NumCPU(). + FlushBytes int // The flush threshold in bytes. Defaults to 5MB. + FlushInterval time.Duration // The flush threshold as duration. Defaults to 30sec. + + OnError func(context.Context, error) // Called for indexer errors. + OnFlushStart func(context.Context) context.Context // Called when the flush starts. + OnFlushEnd func(context.Context) // Called when the flush ends. + + // Parameters of the Bulk API. + Index string + Pipeline string + Timeout time.Duration +} + +// BulkIndexerItem represents an indexer item. +// +// This is a clone of the go-elasticsearch/esutil.BulkIndexerItem type, suitable for use +// with either a v7 or v8 client. +type BulkIndexerItem struct { + Index string + Action string + DocumentID string + Body io.Reader + RetryOnConflict *int + + OnSuccess func(context.Context, BulkIndexerItem, BulkIndexerResponseItem) // Per item + OnFailure func(context.Context, BulkIndexerItem, BulkIndexerResponseItem, error) // Per item +} + +type ( + BulkIndexerStats esutilv7.BulkIndexerStats + BulkIndexerResponse esutilv7.BulkIndexerResponse + BulkIndexerResponseItem esutilv7.BulkIndexerResponseItem +) + +type v7BulkIndexer struct { + esutilv7.BulkIndexer +} + +func (b v7BulkIndexer) Add(ctx context.Context, item BulkIndexerItem) error { + itemv7 := esutilv7.BulkIndexerItem{ + Index: item.Index, + Action: item.Action, + DocumentID: item.DocumentID, + Body: item.Body, + RetryOnConflict: item.RetryOnConflict, + } + if item.OnSuccess != nil { + itemv7.OnSuccess = func(ctx context.Context, itemv7 esutilv7.BulkIndexerItem, resp esutilv7.BulkIndexerResponseItem) { + item.OnSuccess(ctx, item, BulkIndexerResponseItem(resp)) + } + } + if item.OnFailure != nil { + itemv7.OnFailure = func(ctx context.Context, itemv8 esutilv7.BulkIndexerItem, resp esutilv7.BulkIndexerResponseItem, err error) { + item.OnFailure(ctx, item, BulkIndexerResponseItem(resp), err) + } + } + return b.BulkIndexer.Add(ctx, itemv7) +} + +func (b v7BulkIndexer) Stats() BulkIndexerStats { + return BulkIndexerStats(b.BulkIndexer.Stats()) +} + +type v8BulkIndexer struct { + esutilv8.BulkIndexer +} + +func (b v8BulkIndexer) Add(ctx context.Context, item BulkIndexerItem) error { + itemv8 := esutilv8.BulkIndexerItem{ + Index: item.Index, + Action: item.Action, + DocumentID: item.DocumentID, + Body: item.Body, + RetryOnConflict: item.RetryOnConflict, + } + if item.OnSuccess != nil { + itemv8.OnSuccess = func(ctx context.Context, itemv8 esutilv8.BulkIndexerItem, resp esutilv8.BulkIndexerResponseItem) { + item.OnSuccess(ctx, item, BulkIndexerResponseItem(resp)) + } + } + if item.OnFailure != nil { + itemv8.OnFailure = func(ctx context.Context, itemv8 esutilv8.BulkIndexerItem, resp esutilv8.BulkIndexerResponseItem, err error) { + item.OnFailure(ctx, item, BulkIndexerResponseItem(resp), err) + } + } + return b.BulkIndexer.Add(ctx, itemv8) +} + +func (b v8BulkIndexer) Stats() BulkIndexerStats { + return BulkIndexerStats(b.BulkIndexer.Stats()) +} diff --git a/elasticsearch/client.go b/elasticsearch/client.go index 631c8f4fcad..1b536715b83 100644 --- a/elasticsearch/client.go +++ b/elasticsearch/client.go @@ -21,7 +21,6 @@ import ( "context" "encoding/base64" "encoding/json" - "io" "io/ioutil" "net/http" @@ -30,104 +29,194 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/version" esv7 "github.com/elastic/go-elasticsearch/v7" - "github.com/elastic/go-elasticsearch/v7/esapi" + esapiv7 "github.com/elastic/go-elasticsearch/v7/esapi" + esutilv7 "github.com/elastic/go-elasticsearch/v7/esutil" esv8 "github.com/elastic/go-elasticsearch/v8" + esutilv8 "github.com/elastic/go-elasticsearch/v8/esutil" ) +var retryableStatuses = []int{ + http.StatusTooManyRequests, + http.StatusBadGateway, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout, +} + // Client is an interface designed to abstract away version differences between elasticsearch clients type Client interface { + // NewBulkIndexer returns a new BulkIndexer using this client for making the requests. + NewBulkIndexer(BulkIndexerConfig) (BulkIndexer, error) + // Perform satisfies esapi.Transport Perform(*http.Request) (*http.Response, error) - // TODO: deprecate - SearchQuery(ctx context.Context, index string, body io.Reader) (int, io.ReadCloser, error) } type clientV8 struct { *esv8.Client } -func (c clientV8) SearchQuery(ctx context.Context, index string, body io.Reader) (int, io.ReadCloser, error) { - response, err := c.Search( - c.Search.WithContext(ctx), - c.Search.WithIndex(index), - c.Search.WithBody(body), - c.Search.WithTrackTotalHits(true), - c.Search.WithPretty(), - ) +func (c clientV8) NewBulkIndexer(config BulkIndexerConfig) (BulkIndexer, error) { + indexer, err := esutilv8.NewBulkIndexer(esutilv8.BulkIndexerConfig{ + Client: c.Client, + NumWorkers: config.NumWorkers, + FlushBytes: config.FlushBytes, + FlushInterval: config.FlushInterval, + OnError: config.OnError, + OnFlushStart: config.OnFlushStart, + OnFlushEnd: config.OnFlushEnd, + Index: config.Index, + Pipeline: config.Pipeline, + Timeout: config.Timeout, + }) if err != nil { - return 0, nil, err + return nil, err } - return response.StatusCode, response.Body, nil + return v8BulkIndexer{indexer}, nil } type clientV7 struct { *esv7.Client } -func (c clientV7) SearchQuery(ctx context.Context, index string, body io.Reader) (int, io.ReadCloser, error) { - response, err := c.Search( - c.Search.WithContext(ctx), - c.Search.WithIndex(index), - c.Search.WithBody(body), - c.Search.WithTrackTotalHits(true), - c.Search.WithPretty(), - ) +func (c clientV7) NewBulkIndexer(config BulkIndexerConfig) (BulkIndexer, error) { + indexer, err := esutilv7.NewBulkIndexer(esutilv7.BulkIndexerConfig{ + Client: c.Client, + NumWorkers: config.NumWorkers, + FlushBytes: config.FlushBytes, + FlushInterval: config.FlushInterval, + OnError: config.OnError, + OnFlushStart: config.OnFlushStart, + OnFlushEnd: config.OnFlushEnd, + Index: config.Index, + Pipeline: config.Pipeline, + Timeout: config.Timeout, + }) if err != nil { - return 0, nil, err + return nil, err } - return response.StatusCode, response.Body, nil + return v7BulkIndexer{indexer}, nil +} + +// ClientParams holds parameters for NewClientParams. +type ClientParams struct { + // Config holds the user-defined configuration: Elasticsearch hosts, + // max retries, etc. + Config *Config + + // Transport holds a net/http.RoundTripper to use for sending requests + // to Elasticsearch. + // + // If Transport is nil, then a net/http.Transport will be constructed + // with NewHTTPTransport(Config). + Transport http.RoundTripper } -// NewClient parses the given config and returns a version-aware client as an interface +// NewClient returns a stack version-aware Elasticsearch client, +// equivalent to NewClientParams(ClientParams{Config: config}). func NewClient(config *Config) (Client, error) { - if config == nil { + return NewClientParams(ClientParams{Config: config}) +} + +// NewClientParams returns a stack version-aware Elasticsearch client. +func NewClientParams(args ClientParams) (Client, error) { + if args.Config == nil { return nil, errConfigMissing } - transport, addresses, headers, err := connectionConfig(config) + + transport := args.Transport + if transport == nil { + httpTransport, err := NewHTTPTransport(args.Config) + if err != nil { + return nil, err + } + transport = httpTransport + } + + addrs, err := addresses(args.Config) if err != nil { return nil, err } - return NewVersionedClient(config.APIKey, config.Username, config.Password, addresses, headers, transport) -} -// NewVersionedClient returns the right elasticsearch client for the current Stack version, as an interface -func NewVersionedClient(apikey, user, pwd string, addresses []string, headers http.Header, transport http.RoundTripper) (Client, error) { - if apikey != "" { - apikey = base64.StdEncoding.EncodeToString([]byte(apikey)) + var headers http.Header + if len(args.Config.Headers) > 0 { + headers = make(http.Header, len(args.Config.Headers)) + for k, v := range args.Config.Headers { + headers.Set(k, v) + } + } + + var apikey string + if args.Config.APIKey != "" { + apikey = base64.StdEncoding.EncodeToString([]byte(args.Config.APIKey)) } - transport = apmelasticsearch.WrapRoundTripper(transport) - version := common.MustNewVersion(version.GetDefaultVersion()) - if version.IsMajor(8) { - c, err := newV8Client(apikey, user, pwd, addresses, headers, transport) - return clientV8{c}, err + + newClient := newV7Client + if version := common.MustNewVersion(version.GetDefaultVersion()); version.IsMajor(8) { + newClient = newV8Client } - c, err := newV7Client(apikey, user, pwd, addresses, headers, transport) - return clientV7{c}, err + return newClient( + apikey, args.Config.Username, args.Config.Password, + addrs, + headers, + apmelasticsearch.WrapRoundTripper(transport), + args.Config.MaxRetries, + exponentialBackoff(args.Config.Backoff), + ) } -func newV7Client(apikey, user, pwd string, addresses []string, headers http.Header, transport http.RoundTripper) (*esv7.Client, error) { - return esv7.NewClient(esv7.Config{ - APIKey: apikey, - Username: user, - Password: pwd, - Addresses: addresses, - Transport: transport, - Header: headers, +func newV7Client( + apikey, user, pwd string, + addresses []string, + headers http.Header, + transport http.RoundTripper, + maxRetries int, + fn backoffFunc, +) (Client, error) { + c, err := esv7.NewClient(esv7.Config{ + APIKey: apikey, + Username: user, + Password: pwd, + Addresses: addresses, + Transport: transport, + Header: headers, + RetryOnStatus: retryableStatuses, + EnableRetryOnTimeout: true, + RetryBackoff: fn, + MaxRetries: maxRetries, }) + if err != nil { + return nil, err + } + return clientV7{c}, nil } -func newV8Client(apikey, user, pwd string, addresses []string, headers http.Header, transport http.RoundTripper) (*esv8.Client, error) { - return esv8.NewClient(esv8.Config{ - APIKey: apikey, - Username: user, - Password: pwd, - Addresses: addresses, - Transport: transport, - Header: headers, +func newV8Client( + apikey, user, pwd string, + addresses []string, + headers http.Header, + transport http.RoundTripper, + maxRetries int, + fn backoffFunc, +) (Client, error) { + c, err := esv8.NewClient(esv8.Config{ + APIKey: apikey, + Username: user, + Password: pwd, + Addresses: addresses, + Transport: transport, + Header: headers, + RetryOnStatus: retryableStatuses, + EnableRetryOnTimeout: true, + RetryBackoff: fn, + MaxRetries: maxRetries, }) + if err != nil { + return nil, err + } + return clientV8{c}, nil } -func doRequest(ctx context.Context, transport esapi.Transport, req esapi.Request, out interface{}) error { +func doRequest(ctx context.Context, transport esapiv7.Transport, req esapiv7.Request, out interface{}) error { resp, err := req.Do(ctx, transport) if err != nil { return err diff --git a/elasticsearch/config.go b/elasticsearch/config.go index e6b9f78fe2a..9f269f7b4ff 100644 --- a/elasticsearch/config.go +++ b/elasticsearch/config.go @@ -28,6 +28,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/transport" "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" + "github.com/elastic/beats/v7/libbeat/outputs/elasticsearch" ) const ( @@ -55,11 +56,20 @@ type Config struct { Password string `config:"password"` APIKey string `config:"api_key"` Headers map[string]string `config:"headers"` + MaxRetries int `config:"max_retries"` + + elasticsearch.Backoff `config:"backoff"` } // DefaultConfig returns a default config. func DefaultConfig() *Config { - return &Config{Hosts: []string{"localhost:9200"}, Protocol: "http", Timeout: esConnectionTimeout} + return &Config{ + Hosts: []string{"localhost:9200"}, + Protocol: "http", + Timeout: esConnectionTimeout, + MaxRetries: 3, + Backoff: DefaultBackoffConfig, + } } // Hosts is an array of host strings and needs to have at least one entry @@ -73,25 +83,6 @@ func (h Hosts) Validate() error { return nil } -func connectionConfig(config *Config) (http.RoundTripper, []string, http.Header, error) { - addrs, err := addresses(config) - if err != nil { - return nil, nil, nil, err - } - transp, err := httpTransport(config) - if err != nil { - return nil, nil, nil, err - } - var headers http.Header - if len(config.Headers) > 0 { - headers = make(http.Header) - for k, v := range config.Headers { - headers.Set(k, v) - } - } - return transp, addrs, headers, nil -} - func httpProxyURL(cfg *Config) (func(*http.Request) (*url.URL, error), error) { if cfg.ProxyDisable { return nil, nil @@ -124,7 +115,8 @@ func addresses(cfg *Config) ([]string, error) { return addresses, nil } -func httpTransport(cfg *Config) (*http.Transport, error) { +// NewHTTPTransport returns a new net/http.Transport for cfg. +func NewHTTPTransport(cfg *Config) (*http.Transport, error) { proxy, err := httpProxyURL(cfg) if err != nil { return nil, err @@ -137,10 +129,7 @@ func httpTransport(cfg *Config) (*http.Transport, error) { } } dialer := transport.NetDialer(cfg.Timeout) - tlsDialer, err := transport.TLSDialer(dialer, tlsConfig, cfg.Timeout) - if err != nil { - return nil, err - } + tlsDialer := transport.TLSDialer(dialer, tlsConfig, cfg.Timeout) return &http.Transport{ Proxy: proxy, Dial: dialer.Dial, diff --git a/elasticsearch/config_test.go b/elasticsearch/config_test.go index 3aa4e53d885..2b5a0d996a9 100644 --- a/elasticsearch/config_test.go +++ b/elasticsearch/config_test.go @@ -141,7 +141,15 @@ func TestBeatsConfigSynced(t *testing.T) { // We expect the libbeat struct to be a superset of all other // fields defined in the local struct, with identical tags and // types. Struct field names do not need to match. + // + // TODO(simitt): take a closer look at ES ouput changes in libbeat + // introduced with https://github.com/elastic/beats/pull/25219 + localStructExceptions := map[string]interface{}{ + "ssl": nil, "timeout": nil, "proxy_disable": nil, "proxy_url": nil} for name, localStructField := range localStructFields { + if _, ok := localStructExceptions[name]; ok { + continue + } require.Contains(t, libbeatStructFields, name) libbeatStructField := libbeatStructFields[name] assert.Equal(t, localStructField.structTag, libbeatStructField.structTag) @@ -165,6 +173,8 @@ func TestBeatsConfigSynced(t *testing.T) { "loadbalance", "max_retries", "parameters", + "transport", + "non_indexable_policy", } for name := range libbeatStructFields { assert.Contains(t, knownUnhandled, name) diff --git a/elasticsearch/estest/client.go b/elasticsearch/estest/client.go deleted file mode 100644 index a5a9120d9ac..00000000000 --- a/elasticsearch/estest/client.go +++ /dev/null @@ -1,70 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package estest - -import ( - "bytes" - "encoding/json" - "io" - "io/ioutil" - "net/http" - "testing" - - "github.com/pkg/errors" - "github.com/stretchr/testify/require" - - "github.com/elastic/apm-server/elasticsearch" -) - -// Transport can be used to pass to test Elasticsearch Client for more control over client behavior -type Transport struct { - roundTripFn func(req *http.Request) (*http.Response, error) - executed int -} - -// RoundTrip implements http.RoundTripper interface -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - t.executed++ - return t.roundTripFn(req) -} - -// NewTransport creates test transport instance returning status code and body according to input parameters when called. -func NewTransport(t *testing.T, statusCode int, esBody map[string]interface{}) *Transport { - - return &Transport{ - roundTripFn: func(_ *http.Request) (*http.Response, error) { - if statusCode == -1 { - return nil, errors.New("client error") - } - var body io.ReadCloser - if esBody == nil { - body = ioutil.NopCloser(bytes.NewReader([]byte{})) - } else { - resp, err := json.Marshal(esBody) - require.NoError(t, err) - body = ioutil.NopCloser(bytes.NewReader(resp)) - } - return &http.Response{StatusCode: statusCode, Body: body}, nil - }, - } -} - -// NewElasticsearchClient creates ES client using the given transport instance -func NewElasticsearchClient(transport *Transport) (elasticsearch.Client, error) { - return elasticsearch.NewVersionedClient("", "", "", []string{}, nil, transport) -} diff --git a/elasticsearch/license.go b/elasticsearch/license.go new file mode 100644 index 00000000000..5a77eca515d --- /dev/null +++ b/elasticsearch/license.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package elasticsearch + +import ( + "context" + + "github.com/elastic/beats/v7/libbeat/licenser" + "github.com/elastic/go-elasticsearch/v7/esapi" +) + +// GetLicense gets the Elasticsearch licensing information. +func GetLicense(ctx context.Context, client Client) (licenser.License, error) { + var result struct { + License licenser.License `json:"license"` + } + req := esapi.LicenseGetRequest{} + if err := doRequest(ctx, client, req, &result); err != nil { + return licenser.License{}, err + } + return result.License, nil +} diff --git a/elasticsearch/security_api.go b/elasticsearch/security_api.go index 459ffd4eab5..12fcf53e0ff 100644 --- a/elasticsearch/security_api.go +++ b/elasticsearch/security_api.go @@ -67,9 +67,10 @@ func HasPrivileges(ctx context.Context, client Client, privileges HasPrivilegesR } type CreateAPIKeyRequest struct { - Name string `json:"name"` - Expiration *string `json:"expiration,omitempty"` - RoleDescriptors RoleDescriptor `json:"role_descriptors"` + Name string `json:"name"` + Expiration *string `json:"expiration,omitempty"` + RoleDescriptors RoleDescriptor `json:"role_descriptors"` + Metadata map[string]interface{} `json:"metadata,omitempty"` } type CreateAPIKeyResponse struct { @@ -97,7 +98,9 @@ type HasPrivilegesResponse struct { } type InvalidateAPIKeyRequest struct { - APIKeyQuery + // normally the Elasticsearch API will require either Ids or Name, but not both + IDs []string `json:"ids,omitempty"` + Name *string `json:"name,omitempty"` } type InvalidateAPIKeyResponse struct { @@ -119,9 +122,10 @@ type Application struct { type APIKeyResponse struct { APIKey - Creation int64 `json:"creation"` - Invalidated bool `json:"invalidated"` - Username string `json:"username"` + Creation int64 `json:"creation"` + Invalidated bool `json:"invalidated"` + Username string `json:"username"` + Metadata map[string]interface{} `json:"metadata,omitempty"` } type APIKeyQuery struct { diff --git a/go.mod b/go.mod index a1015cfdf57..cefb422100e 100644 --- a/go.mod +++ b/go.mod @@ -3,87 +3,84 @@ module github.com/elastic/apm-server go 1.13 require ( - github.com/akavel/rsrc v0.9.0 // indirect - github.com/apache/thrift v0.0.0-20161221203622-b2a4d4ae21c7 - github.com/census-instrumentation/opencensus-proto v0.2.1 + github.com/DataDog/zstd v1.4.4 // indirect + github.com/apache/thrift v0.14.2 github.com/cespare/xxhash/v2 v2.1.1 github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e - github.com/client9/misspell v0.3.5-0.20180309020325-c0b55c823952 // indirect - github.com/dgraph-io/badger/v2 v2.0.3 - github.com/dgraph-io/ristretto v0.0.2 // indirect - github.com/dlclark/regexp2 v1.2.1 // indirect - github.com/dop251/goja v0.0.0-20200824171909-536f9d946569 // indirect - github.com/dop251/goja_nodejs v0.0.0-20200811150831-9bc458b4bbeb // indirect + github.com/dgraph-io/badger/v2 v2.2007.3-0.20201012072640-f5a7e0a1c83b + github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect + github.com/dlclark/regexp2 v1.4.0 // indirect + github.com/dop251/goja v0.0.0-20210912140721-ac5354e9a820 // indirect + github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7 // indirect github.com/dustin/go-humanize v1.0.0 github.com/elastic/apm-server/approvaltest v0.0.0-00010101000000-000000000000 - github.com/elastic/beats/v7 v7.0.0-alpha2.0.20200824113715-49e8024953a4 - github.com/elastic/go-elasticsearch/v7 v7.8.0 - github.com/elastic/go-elasticsearch/v8 v8.0.0-20200819071622-59b6a186f8dd + github.com/elastic/beats/v7 v7.0.0-alpha2.0.20210911003435-13e34660f62a + github.com/elastic/ecs v1.11.0 + github.com/elastic/gmux v0.1.0 + github.com/elastic/go-elasticsearch/v7 v7.5.1-0.20210728153421-6462d8b84e7d + github.com/elastic/go-elasticsearch/v8 v8.0.0-20210727161915-8cf93274b968 github.com/elastic/go-hdrhistogram v0.1.0 - github.com/elastic/go-licenser v0.3.1 - github.com/elastic/go-sysinfo v1.4.0 // indirect - github.com/elastic/go-ucfg v0.8.3 - github.com/fatih/color v1.9.0 // indirect + github.com/elastic/go-ucfg v0.8.4-0.20200415140258-1232bd4774a6 github.com/go-sourcemap/sourcemap v2.1.3+incompatible - github.com/gofrs/uuid v3.3.0+incompatible - github.com/gogo/googleapis v1.3.1-0.20190914144012-b8d18e97a9a1 // indirect - github.com/golang/protobuf v1.4.2 - github.com/google/addlicense v0.0.0-20190907113143-be125746c2c4 // indirect - github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 - github.com/hashicorp/go-multierror v1.1.0 - github.com/hashicorp/golang-lru v0.5.3 - github.com/ianlancetaylor/demangle v0.0.0-20200715173712-053cf528c12f // indirect - github.com/jaegertracing/jaeger v1.16.0 - github.com/jcmturner/gofork v1.0.0 // indirect - github.com/josephspurrier/goversioninfo v1.2.0 // indirect - github.com/json-iterator/go v1.1.10 + github.com/gofrs/uuid v4.0.0+incompatible + github.com/gogo/protobuf v1.3.2 + github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9 + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 + github.com/hashicorp/golang-lru v0.5.4 + github.com/jaegertracing/jaeger v1.25.0 + github.com/josephspurrier/goversioninfo v1.3.0 // indirect + github.com/json-iterator/go v1.1.11 github.com/jstemmer/go-junit-report v0.9.1 - github.com/klauspost/compress v1.9.3-0.20191122130757-c099ac9f21dd // indirect - github.com/kr/pretty v0.2.0 // indirect - github.com/magefile/mage v1.10.0 - github.com/mattn/go-colorable v0.1.7 // indirect - github.com/mitchellh/hashstructure v1.0.0 // indirect + github.com/libp2p/go-reuseport v0.0.2 + github.com/magefile/mage v1.11.0 + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mitchellh/hashstructure v1.1.0 // indirect github.com/modern-go/reflect2 v1.0.1 - github.com/open-telemetry/opentelemetry-collector v0.2.1-0.20191218182225-c300f1341702 - github.com/opentracing/opentracing-go v1.1.1-0.20190913142402-a7454ce5950e // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.34.0 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pkg/errors v0.9.1 - github.com/prometheus/procfs v0.1.3 // indirect - github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect - github.com/reviewdog/reviewdog v0.9.17 - github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735 - github.com/santhosh-tekuri/jsonschema v1.2.4 - github.com/spf13/cobra v0.0.5 + github.com/poy/eachers v0.0.0-20181020210610-23942921fe77 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/ryanuber/go-glob v1.0.0 + github.com/spf13/cobra v1.2.1 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.6.1 - github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c - github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe - github.com/uber/tchannel-go v1.16.0 // indirect + github.com/stretchr/testify v1.7.0 + github.com/tidwall/gjson v1.6.5 + github.com/tidwall/sjson v1.1.1 github.com/urso/magetools v0.0.0-20200125210132-c2e338f92f3a // indirect - go.elastic.co/apm v1.8.0 - go.elastic.co/apm/module/apmelasticsearch v1.7.2 - go.elastic.co/apm/module/apmgrpc v1.7.0 - go.elastic.co/apm/module/apmhttp v1.7.2 - go.elastic.co/ecszap v0.2.0 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 + go.elastic.co/apm v1.13.1 + go.elastic.co/apm/module/apmelasticsearch v1.12.0 + go.elastic.co/apm/module/apmgrpc v1.12.0 + go.elastic.co/apm/module/apmhttp v1.12.0 + go.elastic.co/ecszap v1.0.0 // indirect go.elastic.co/fastjson v1.1.0 - go.uber.org/atomic v1.6.0 - go.uber.org/zap v1.15.0 - golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a // indirect - golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect - golang.org/x/net v0.0.0-20200822124328-c89045814202 - golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 - golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8 // indirect - golang.org/x/text v0.3.3 // indirect - golang.org/x/time v0.0.0-20191024005414-555d28b269f0 - golang.org/x/tools v0.0.0-20200823205832-c024452afbcd - google.golang.org/grpc v1.29.1 - gopkg.in/yaml.v2 v2.3.0 - howett.net/plist v0.0.0-20200419221736-3b63eb3a43b5 // indirect + go.opentelemetry.io/collector v0.34.0 + go.opentelemetry.io/collector/model v0.34.0 + go.uber.org/atomic v1.9.0 + go.uber.org/multierr v1.7.0 // indirect + go.uber.org/zap v1.19.1 + golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect + golang.org/x/mod v0.5.0 // indirect + golang.org/x/net v0.0.0-20210908191846-a5e095526f91 + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c + golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0 // indirect + golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6 + golang.org/x/tools v0.1.5 + google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af // indirect + google.golang.org/grpc v1.40.0 + gopkg.in/yaml.v2 v2.4.0 + gotest.tools/gotestsum v1.7.0 + howett.net/plist v0.0.0-20201203080718-1454fab16a06 // indirect ) replace ( github.com/Azure/go-autorest => github.com/Azure/go-autorest v12.2.0+incompatible - github.com/Shopify/sarama => github.com/elastic/sarama v0.0.0-20191122160421-355d120d0970 + github.com/Microsoft/go-winio => github.com/bi-zone/go-winio v0.4.15 + github.com/Shopify/sarama => github.com/elastic/sarama v1.19.1-0.20210120173147-5c8cb347d877 + github.com/aws/aws-sdk-go-v2 => github.com/aws/aws-sdk-go-v2 v0.9.0 github.com/docker/docker => github.com/docker/engine v0.0.0-20191113042239-ea84732a7725 github.com/docker/go-plugins-helpers => github.com/elastic/go-plugins-helpers v0.0.0-20200207104224-bdf17607b79f github.com/dop251/goja => github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20 @@ -92,6 +89,11 @@ replace ( github.com/fsnotify/fsevents => github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270 github.com/fsnotify/fsnotify => github.com/adriansr/fsnotify v0.0.0-20180417234312-c9bbe1f46f1d github.com/tonistiigi/fifo => github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c - golang.org/x/tools => golang.org/x/tools v0.0.0-20200602230032-c00d67ef29d0 // release 1.14 - k8s.io/client-go => k8s.io/client-go v0.18.3 + golang.org/x/tools => golang.org/x/tools v0.1.2 ) + +// We replace golang/glog, which is used by ristretto, to avoid polluting the +// command line flags and conflicting with command line flags added by libbeat. +replace github.com/golang/glog => ./internal/glog + +replace go.opentelemetry.io/collector => ./internal/otel_collector diff --git a/go.sum b/go.sum index c82d234375c..8acaec7b858 100644 --- a/go.sum +++ b/go.sum @@ -1,20 +1,47 @@ -4d63.com/embedfiles v0.0.0-20190311033909-995e0740726f/go.mod h1:HxEsUxoVZyRxsZML/S6e2xAuieFMlGO0756ncWx1aXE= -4d63.com/tz v1.1.1-0.20191124060701-6d37baae851b/go.mod h1:SHGqVdL7hd2ZaX2T9uEiOZ/OFAUfCCLURdLPJsd8ZNs= bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.51.0 h1:PvKAVQWCtlGUSlZkGW3QLelKaWq7KYv/MW1EboG8bfM= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.63.0/go.mod h1:GmezbQc7T2snqkEXWfZ0sy0VfkB/ivI2DdtJL2DEmlg= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= code.cloudfoundry.org/go-diodes v0.0.0-20190809170250-f77fb823c7ee h1:iAAPf9s7/+BIiGf+RjgcXLm3NoZaLIJsBXJuUa63Lx8= code.cloudfoundry.org/go-diodes v0.0.0-20190809170250-f77fb823c7ee/go.mod h1:Jzi+ccHgo/V/PLQUaQ6hnZcC1c4BS790gx21LRRui4g= code.cloudfoundry.org/go-loggregator v7.4.0+incompatible h1:KqZYloMQWM5Zg/BQKunOIA4OODh7djZbk48qqbowNFI= @@ -23,18 +50,18 @@ code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f h1:UrKzEwTg code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:sk5LnIjB/nIEU7yP5sDQExVm62wu0pBh3yrElngUisI= code.cloudfoundry.org/rfc5424 v0.0.0-20180905210152-236a6d29298a h1:8rqv2w8xEceNwckcF5ONeRt0qBHlh5bnNfFnYTrZbxs= code.cloudfoundry.org/rfc5424 v0.0.0-20180905210152-236a6d29298a/go.mod h1:tkZo8GtzBjySJ7USvxm4E36lNQw1D3xM6oKHGqdaAJ4= -contrib.go.opencensus.io/exporter/jaeger v0.1.1-0.20190430175949-e8b55949d948/go.mod h1:ukdzwIYYHgZ7QYtwVFQUjiT28BJHiMhTERo32s6qVgM= -contrib.go.opencensus.io/exporter/ocagent v0.6.0/go.mod h1:zmKjrJcdo0aYcVS7bmEeSEBLPA9YJp5bjrofdU3pIXs= -contrib.go.opencensus.io/exporter/prometheus v0.1.0/go.mod h1:cGFniUXGZlKRjzOyuZJ6mgB+PgBcCIa79kEKR8YCW+A= -contrib.go.opencensus.io/resource v0.1.2/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA= +collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +contrib.go.opencensus.io/exporter/prometheus v0.3.0/go.mod h1:rpCPVQKhiyH8oomWgm34ZmgIdZa8OVYO5WAIygPbBBE= +contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1SoxQXHPN30+DI3Z04SYqyg= github.com/Azure/azure-event-hubs-go/v3 v3.1.2/go.mod h1:hR40byNJjKkS74+3RhloPQ8sJ8zFQeJ920Uk3oYY0+k= github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= -github.com/Azure/azure-sdk-for-go v23.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v52.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo= @@ -43,39 +70,49 @@ github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX github.com/Azure/go-autorest v12.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= -github.com/Azure/go-autorest/autorest v0.9.4/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= +github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DATA-DOG/godog v0.7.13/go.mod h1:z2OZ6a3X0/YAKVqLfVzYBwFt3j6uSt3Xrqa7XTtcQE0= -github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= +github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/DataDog/zstd v1.4.4 h1:+IawcoXhCBylN7ccwdwf8LOH2jKq7NavGpEPanrlTzE= +github.com/DataDog/zstd v1.4.4/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4= +github.com/HdrHistogram/hdrhistogram-go v1.0.1/go.mod h1:BWJ+nMSHY3L41Zj7CA3uXnloDp7xxV0YvstAE7nKTaM= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI= -github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/OpenPeeDeeP/depguard v1.0.1 h1:VlW4R6jmBIv3/u1JNlawEvJMM4J+dPORPaZasQee8Us= -github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -84,66 +121,95 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20170221213301-9f32b5905fd6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/adriansr/fsnotify v0.0.0-20180417234312-c9bbe1f46f1d h1:g0M6kedfjDpyAAuxqBvJzMNjFzlrQ7Av6LCDFqWierk= github.com/adriansr/fsnotify v0.0.0-20180417234312-c9bbe1f46f1d/go.mod h1:VykaKG/ofkKje+MSvqjrDsz1wfyHIvEVFljhq2EOZ4g= github.com/aerospike/aerospike-client-go v1.27.1-0.20170612174108-0f3b54da6bdc/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= -github.com/akavel/rsrc v0.9.0 h1:HwUDC0+tMFWqN4D5G+o5siGD4oVsC3jn6zM8ocjc3nY= -github.com/akavel/rsrc v0.9.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= +github.com/akavel/rsrc v0.10.2 h1:Zxm8V5eI1hW4gGaYsJQUhxpjkENuG91ki8B4zCrvEsw= +github.com/akavel/rsrc v0.10.2/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20 h1:7rj9qZ63knnVo2ZeepYHvHuRdG76f3tRUTdIQDzRBeI= github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20/go.mod h1:cI59GRkC2FRaFYtgbYEqMlgnnfvAwXzjojyZKXwklNg= +github.com/andrewkroh/sys v0.0.0-20151128191922-287798fe3e43 h1:WFwa9pqou0Nb4DdfBOyaBTH0GqLE74Qwdf61E7ITHwQ= github.com/andrewkroh/sys v0.0.0-20151128191922-287798fe3e43/go.mod h1:tJPYQG4mnMeUtQvQKNkbsFrnmZOg59Qnf8CcctFv5v4= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= -github.com/antlr/antlr4 v0.0.0-20200225173536-225249fdaef5/go.mod h1:T7PbCXFs94rrTttyxjbyT5+/1V8T2TYDejxUfHJjw1Y= -github.com/apache/thrift v0.0.0-20161221203622-b2a4d4ae21c7 h1:Fv9bK1Q+ly/ROk4aJsVMeuIwPel4bEnD8EPiI91nZMg= -github.com/apache/thrift v0.0.0-20161221203622-b2a4d4ae21c7/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antlr/antlr4 v0.0.0-20200820155224-be881fa6b91d/go.mod h1:T7PbCXFs94rrTttyxjbyT5+/1V8T2TYDejxUfHJjw1Y= +github.com/antonmedv/expr v1.8.9/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= +github.com/antonmedv/expr v1.9.0/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= +github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.1-0.20200603211036-eac4d0c79a5f/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.14.1/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.14.2 h1:hY4rAyg7Eqbb27GB6gkhUKrRAuc8xRjlNtJq+LseKeY= +github.com/apache/thrift v0.14.2/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apoydence/eachers v0.0.0-20181020210610-23942921fe77 h1:afT88tB6u9JCKQZVAAaa9ICz/uGn5Uw9ekn6P22mYKM= github.com/apoydence/eachers v0.0.0-20181020210610-23942921fe77/go.mod h1:bXvGk6IkT1Agy7qzJ+DjIw/SJ1AaB3AvAuMDVV+Vkoo= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-lambda-go v1.6.0/go.mod h1:zUsUQhAUjYzR8AuduJPCfhBuKWUaDbQiPOG+ouzmE1A= -github.com/aws/aws-sdk-go v1.23.12/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/aws/aws-sdk-go v1.38.3/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.9.0 h1:dWtJKGRFv3UZkMBQaIzMsF0/y4ge3iQPWTzeC4r/vl4= github.com/aws/aws-sdk-go-v2 v0.9.0/go.mod h1:sa1GePZ/LfBGI4dSq30f6uR4Tthll8axxtEPvlpXZ8U= github.com/awslabs/goformation/v3 v3.1.0/go.mod h1:hQ5RXo3GNm2laHWKizDzU5DsDy+yNcenSca2UxN0850= github.com/awslabs/goformation/v4 v4.1.0/go.mod h1:MBDN7u1lMNDoehbFuO4uPvgwPeolTMA2TzX1yO6KlxI= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2 h1:oMCHnXa6CCCafdPDbMh/lWRhRByN0VFLvv+g+ayx1SI= +github.com/bi-zone/go-winio v0.4.15 h1:viLHm+U7bzIkfVHuWgc3Wp/sT5zaLoRG7XdOEy1b12w= +github.com/bi-zone/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b h1:AP/Y7sqYicnjGDfD5VcY4CIfh1hRXBUavxrvELjTiOE= -github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q= -github.com/bombsimon/wsl v1.2.5 h1:9gTOkIwVtoDZywvX802SDHokeX4kW1cKnV8ZTVAPkRs= -github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM= -github.com/bradleyfalzon/ghinstallation v1.1.0 h1:mwazVinJU0mPyLxIcdtJzu4DhWXFO5lMsWhKyFRIwFk= -github.com/bradleyfalzon/ghinstallation v1.1.0/go.mod h1:p7iD8KytOOKg2wCqbwvJlq4JGpYMjwjkiqdyUqOIHLI= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bsm/sarama-cluster v2.1.13+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= github.com/bsm/sarama-cluster v2.1.14-0.20180625083203-7e67d87a6b3f+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= -github.com/cavaliercoder/badio v0.0.0-20160213150051-ce5280129e9e h1:YYUjy5BRwO5zPtfk+aa2gw255FIIoi93zMmuy19o0bc= +github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cavaliercoder/badio v0.0.0-20160213150051-ce5280129e9e/go.mod h1:V284PjgVwSk4ETmz84rpu9ehpGg7swlIH8npP9k2bGw= -github.com/cavaliercoder/go-rpm v0.0.0-20190131055624-7a9c54e3d83e h1:Gbx+iVCXG/1m5WSnidDGuHgN+vbIwl+6fR092ANU+Y8= github.com/cavaliercoder/go-rpm v0.0.0-20190131055624-7a9c54e3d83e/go.mod h1:AZIh1CCnMrcVm6afFf96PBvE2MRpWFco91z8ObJtgDY= -github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v0.0.0-20181017004759-096ff4a8a059/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= @@ -154,11 +220,8 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5O github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/client9/misspell v0.3.5-0.20180309020325-c0b55c823952 h1:FHl0TT8LcvoqlozDaRqgFUTVlUec+G6/pB1N7MCHIK0= -github.com/client9/misspell v0.3.5-0.20180309020325-c0b55c823952/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudfoundry-community/go-cfclient v0.0.0-20190808214049-35bcce23fc5f h1:fK3ikA1s77arBhpDwFuyO0hUZ2Aa8O6o2Uzy8Q6iLbs= github.com/cloudfoundry-community/go-cfclient v0.0.0-20190808214049-35bcce23fc5f/go.mod h1:RtIewdO+K/czvxvIFCMbPyx7jdxSLL1RZ+DA/Vk8Lwg= github.com/cloudfoundry/noaa v2.1.0+incompatible h1:hr6VnM5VlYRN3YD+NmAedQLW8686sUMknOSe0mFS2vo= @@ -166,7 +229,10 @@ github.com/cloudfoundry/noaa v2.1.0+incompatible/go.mod h1:5LmacnptvxzrTvMfL9+EJ github.com/cloudfoundry/sonde-go v0.0.0-20171206171820-b33733203bb4 h1:cWfya7mo/zbnwYVio6eWGsFJHqYw4/k/uhwIJ1eqRPI= github.com/cloudfoundry/sonde-go v0.0.0-20171206171820-b33733203bb4/go.mod h1:GS0pCHd7onIsewbw8Ue9qa9pZPv2V88cUZDttK6KzgI= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= @@ -175,6 +241,7 @@ github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.3 h1:LoIzb5y9x5l8VKAlyrbusNPXqBY0+kviRloxFUMFwKc= github.com/containerd/containerd v1.3.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= @@ -184,18 +251,29 @@ github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDG github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/cucumber/godog v0.8.1 h1:lVb+X41I4YDreE+ibZ50bdXmySxgRviYFgKY6Aw4XE8= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/crossdock/crossdock-go v0.0.0-20160816171116-049aabb0122b/go.mod h1:v9FBN7gdVTpiD/+LZ7Po0UKvROyT87uLVxTHVky/dlQ= github.com/cucumber/godog v0.8.1/go.mod h1:vSh3r/lM+psC1BPXvdkSEuNjmXfpVqrMGYAElF6hxnA= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -203,23 +281,33 @@ github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVz github.com/denisenkom/go-mssqldb v0.0.0-20200206145737-bbfc9a55622e/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/devigned/tab v0.1.2-0.20190607222403-0c15cf42f9a2/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= -github.com/dgraph-io/badger/v2 v2.0.3 h1:inzdf6VF/NZ+tJ8RwwYMjJMvsOALTHYdozn0qSl6XJI= -github.com/dgraph-io/badger/v2 v2.0.3/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM= -github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.0.2 h1:a5WaUrDa0qm0YrAAS1tUykT5El3kt62KNZZeMxQn3po= +github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= +github.com/dgraph-io/badger/v2 v2.2007.3-0.20201012072640-f5a7e0a1c83b h1:mUDs72Rlzv6A4YN8w3Ra3hU9x/plOQPcQjZYL/1f5SM= +github.com/dgraph-io/badger/v2 v2.2007.3-0.20201012072640-f5a7e0a1c83b/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= +github.com/dgraph-io/badger/v3 v3.2103.0/go.mod h1:GHMCYxuDWyzbHkh4k3yyg4PM61tJPFfEGSMbE3Vd5QE= +github.com/dgraph-io/badger/v3 v3.2103.1 h1:zaX53IRg7ycxVlkd5pYdCeFp1FynD6qBGQoQql3R3Hk= +github.com/dgraph-io/badger/v3 v3.2103.1/go.mod h1:dULbq6ehJ5K0cGW/1TQ9iSfUk0gbSiToDWmWmTsJ53E= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.0.4-0.20210309073149-3836124cdc5a/go.mod h1:MIonLggsKgZLUSt414ExgwNtlOL5MuEoAJP514mwGe8= +github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgrijalva/jwt-go v3.2.1-0.20190620180102-5e25c22bd5d6+incompatible h1:4jGdduO4ceTJFKf0IhgaB8NJapGqKHwC2b4xQ/cXujM= -github.com/dgrijalva/jwt-go v3.2.1-0.20190620180102-5e25c22bd5d6+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= +github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/digitalocean/go-libvirt v0.0.0-20180301200012-6075ea3c39a1/go.mod h1:PRcPVAAma6zcLpFd4GZrjR/MRpood3TamjKI2m/z/Uw= +github.com/digitalocean/godo v1.58.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dlclark/regexp2 v1.1.7-0.20171009020623-7632a260cbaf/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= -github.com/dlclark/regexp2 v1.2.1 h1:Ff/S0snjr1oZHUNOkvA/gP6KUaMg5vDDl3Qnhjnwgm8= -github.com/dlclark/regexp2 v1.2.1/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/dlclark/regexp2 v1.4.0 h1:F1rxgk7p4uKjwIQxBs9oAXe5CqrXlCduYEJvrF4u93E= +github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/dnephin/pflag v1.0.7 h1:oxONGlWxhmUct0YzKTgrpQv9AUA1wtPBn7zuSjJqptk= +github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/engine v0.0.0-20191113042239-ea84732a7725 h1:j0zqmciWFnhB01BT/CyfoXNEONoxerGjkcxM8i6tlXI= @@ -231,35 +319,41 @@ github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dolmen-go/contextio v0.0.0-20200217195037-68fc5150bcd5/go.mod h1:cxc20xI7fOgsFHWgt+PenlDDnMcrvh7Ocuj5hEFIdEk= github.com/dop251/goja_nodejs v0.0.0-20171011081505-adff31b136e6 h1:RrkoB0pT3gnjXhL/t10BSP1mcr/0Ldea2uMyuBr2SWk= github.com/dop251/goja_nodejs v0.0.0-20171011081505-adff31b136e6/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4 h1:qk/FSDDxo05wdJH28W+p5yivv7LuLYLRXPPD8KQCtZs= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/eclipse/paho.mqtt.golang v1.2.1-0.20200121105743-0d940dd29fd2/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= +github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= +github.com/eclipse/paho.mqtt.golang v1.3.5/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elastic/beats/v7 v7.0.0-alpha2.0.20200824113715-49e8024953a4 h1:iB7cRQTxxyCZlQWbFxYvOtECxb4N4WibdQbj+FbRDjE= -github.com/elastic/beats/v7 v7.0.0-alpha2.0.20200824113715-49e8024953a4/go.mod h1:zCZdh0qhJXUD/H7+w3oSS6VlMXgdhPDzSLy2fqNLdxc= -github.com/elastic/ecs v1.5.0 h1:/VEIBsRU4ecq2+U3RPfKNc6bFyomP6qnthYEcQZu8GU= -github.com/elastic/ecs v1.5.0/go.mod h1:pgiLbQsijLOJvFR8OTILLu0Ni/R/foUNg0L+T6mU9b4= -github.com/elastic/elastic-agent-client/v7 v7.0.0-20200709172729-d43b7ad5833a h1:2NHgf1RUw+f240lpTnLrCp1aBNvq2wDi0E1A423/S1k= -github.com/elastic/elastic-agent-client/v7 v7.0.0-20200709172729-d43b7ad5833a/go.mod h1:uh/Gj9a0XEbYoM4NYz4LvaBVARz3QXLmlNjsrKY9fTc= +github.com/elastic/beats/v7 v7.0.0-alpha2.0.20210911003435-13e34660f62a h1:YLCpIj/8hsbqnYkzRnyL09T/5TcYT+uGsfO7DcdEwC8= +github.com/elastic/beats/v7 v7.0.0-alpha2.0.20210911003435-13e34660f62a/go.mod h1:LCcsmUpI0w3r2r/vslHEbO6ra8YfIwNuUPrqt5zrZdY= +github.com/elastic/ecs v1.11.0 h1:eqcKejxlTzy+6TsCIkd0aBnKHEQOkSfeXnu+pmGYMUY= +github.com/elastic/ecs v1.11.0/go.mod h1:pgiLbQsijLOJvFR8OTILLu0Ni/R/foUNg0L+T6mU9b4= +github.com/elastic/elastic-agent-client/v7 v7.0.0-20210727140539-f0905d9377f6 h1:nFvXHBjYK3e9+xF0WKDeAKK4aOO51uC28s+L9rBmilo= +github.com/elastic/elastic-agent-client/v7 v7.0.0-20210727140539-f0905d9377f6/go.mod h1:uh/Gj9a0XEbYoM4NYz4LvaBVARz3QXLmlNjsrKY9fTc= github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270/go.mod h1:Msl1pdboCbArMF/nSCDUXgQuWTeoMmE/z8607X+k7ng= -github.com/elastic/go-concert v0.0.4/go.mod h1:9MtFarjXroUgmm0m6HY3NSe1XiKhdktiNRRj9hWvIaM= -github.com/elastic/go-elasticsearch/v7 v7.8.0 h1:M9D55OK13IEgg51Jb57mZgseag1AsncwAUn4C6j1vlc= -github.com/elastic/go-elasticsearch/v7 v7.8.0/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= -github.com/elastic/go-elasticsearch/v8 v8.0.0-20200819071622-59b6a186f8dd h1:kVw29H2M+wai0NQ/W8FzngD80DnHTUfz/8kbfZl10hU= -github.com/elastic/go-elasticsearch/v8 v8.0.0-20200819071622-59b6a186f8dd/go.mod h1:xe9a/L2aeOgFKKgrO3ibQTnMdpAeL0GC+5/HpGScSa4= +github.com/elastic/gmux v0.1.0 h1:T9qSkdMG8ArioQpMWl6XSu/oI+J0y5mV5d0MmwZ+zNs= +github.com/elastic/gmux v0.1.0/go.mod h1:6Z2xDDcGvqDYkr1ZzIDQOJmrlywwOhdauasNaXn3Xeg= +github.com/elastic/go-concert v0.2.0 h1:GAQrhRVXprnNjtvTP9pWJ1d4ToEA4cU5ci7TwTa20xg= +github.com/elastic/go-concert v0.2.0/go.mod h1:HWjpO3IAEJUxOeaJOWXWEp7imKd27foxz9V5vegC/38= +github.com/elastic/go-elasticsearch/v7 v7.5.1-0.20210728153421-6462d8b84e7d h1:eW4xXKW2sVzXxlNLTxwDOfKY4ugqFENLCDHSDuK75iY= +github.com/elastic/go-elasticsearch/v7 v7.5.1-0.20210728153421-6462d8b84e7d/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= +github.com/elastic/go-elasticsearch/v8 v8.0.0-20210727161915-8cf93274b968 h1:rDj5NfOj2IQocHyDLjbkFmn21XYljn+CaJeBwEbUL6E= +github.com/elastic/go-elasticsearch/v8 v8.0.0-20210727161915-8cf93274b968/go.mod h1:xe9a/L2aeOgFKKgrO3ibQTnMdpAeL0GC+5/HpGScSa4= github.com/elastic/go-hdrhistogram v0.1.0 h1:7UVeQ9MsO5c9h8RJeH2S2lXCGi9hQB/94W6Pjjqprc4= github.com/elastic/go-hdrhistogram v0.1.0/go.mod h1:NEl0wZTQXzwq7X2WBZGl5G3efcKbvv+r9mTZpXrIs78= -github.com/elastic/go-libaudit/v2 v2.0.2/go.mod h1:MM/l/4xV7ilcl+cIblL8Zn448J7RZaDwgNLE4gNKYPg= +github.com/elastic/go-libaudit/v2 v2.2.0/go.mod h1:MM/l/4xV7ilcl+cIblL8Zn448J7RZaDwgNLE4gNKYPg= github.com/elastic/go-licenser v0.3.1 h1:RmRukU/JUmts+rpexAw0Fvt2ly7VVu6mw8z4HrEzObU= github.com/elastic/go-licenser v0.3.1/go.mod h1:D8eNQk70FOCVBl3smCGQt/lv7meBeQno2eI1S5apiHQ= github.com/elastic/go-lookslike v0.3.0/go.mod h1:AhH+rdJux5RlVjs+6ej4jkvYyoNRkj2crxmqeHlj3hA= @@ -269,25 +363,24 @@ github.com/elastic/go-perf v0.0.0-20191212140718-9c656876f595/go.mod h1:s09U1b4P github.com/elastic/go-plugins-helpers v0.0.0-20200207104224-bdf17607b79f/go.mod h1:OPGqFNdTS34kMReS5hPFtBhD9J8itmSDurs1ix2wx7c= github.com/elastic/go-seccomp-bpf v1.1.0 h1:jUzzDc6LyCtdolZdvL/26dad6rZ9vsc7xZ2eadKECAU= github.com/elastic/go-seccomp-bpf v1.1.0/go.mod h1:l+89Vy5BzjVcaX8USZRMOwmwwDScE+vxCFzzvQwN7T8= -github.com/elastic/go-structform v0.0.7 h1:ihszOJQryNuIIHE2ZgsbiDq+agKO6V4yK0JYAI3tjzc= -github.com/elastic/go-structform v0.0.7/go.mod h1:QrMyP3oM9Sjk92EVGLgRaL2lKt0Qx7ZNDRWDxB6khVs= +github.com/elastic/go-structform v0.0.9 h1:HpcS7xljL4kSyUfDJ8cXTJC6rU5ChL1wYb6cx3HLD+o= +github.com/elastic/go-structform v0.0.9/go.mod h1:CZWf9aIRYY5SuKSmOhtXScE5uQiLZNqAFnwKR4OrIM4= github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= -github.com/elastic/go-sysinfo v1.3.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= -github.com/elastic/go-sysinfo v1.4.0 h1:LUnK6TNOuy8JEByuDzTAQH3iQ6bIywy55+Z+QlKNSWk= -github.com/elastic/go-sysinfo v1.4.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= +github.com/elastic/go-sysinfo v1.7.0 h1:4vVvcfi255+8+TyQ7TYUTEK3A+G8v5FLE+ZKYL1z1Dg= +github.com/elastic/go-sysinfo v1.7.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= github.com/elastic/go-txfile v0.0.7 h1:Yn28gclW7X0Qy09nSMSsx0uOAvAGMsp6XHydbiLVe2s= github.com/elastic/go-txfile v0.0.7/go.mod h1:H0nCoFae0a4ga57apgxFsgmRjevNCsEaT6g56JoeKAE= github.com/elastic/go-ucfg v0.7.0/go.mod h1:iaiY0NBIYeasNgycLyTvhJftQlQEUO2hpF+FX0JKxzo= -github.com/elastic/go-ucfg v0.8.3 h1:leywnFjzr2QneZZWhE6uWd+QN/UpP0sdJRHYyuFvkeo= github.com/elastic/go-ucfg v0.8.3/go.mod h1:iaiY0NBIYeasNgycLyTvhJftQlQEUO2hpF+FX0JKxzo= +github.com/elastic/go-ucfg v0.8.4-0.20200415140258-1232bd4774a6 h1:Ehbr7du4rSSEypR8zePr0XRbMhO4PJgcHC9f8fDbgAg= +github.com/elastic/go-ucfg v0.8.4-0.20200415140258-1232bd4774a6/go.mod h1:iaiY0NBIYeasNgycLyTvhJftQlQEUO2hpF+FX0JKxzo= github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= -github.com/elastic/gosigar v0.10.6-0.20200715000138-f115143bb233 h1:PvAAw8rXlg0maTAhdJznCmlzVZIKPwD2BP1pljuncLA= -github.com/elastic/gosigar v0.10.6-0.20200715000138-f115143bb233/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs= -github.com/elastic/sarama v0.0.0-20191122160421-355d120d0970 h1:rSo6gsz4zOanqtJ5fmZYQJvEJnA5YsVOB25casIwqUw= -github.com/elastic/sarama v0.0.0-20191122160421-355d120d0970/go.mod h1:fGP8eQ6PugKEI0iUETYYtnP6d1pH/bdDMTel1X5ajsU= -github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/elastic/gosigar v0.14.1 h1:T0aQ7n/n2ZA9W7DmAnj60v+qzqKERdBgJBO1CG2W6rc= +github.com/elastic/gosigar v0.14.1/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/sarama v1.19.1-0.20210120173147-5c8cb347d877 h1:C9LsbipColsz04JKpKoLlp0pgMJRLq2uXVTeKRDcNcY= +github.com/elastic/sarama v1.19.1-0.20210120173147-5c8cb347d877/go.mod h1:g5s5osgELxgM+Md9Qni9rzo7Rbt+vvFQI4bt/Mc93II= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -295,489 +388,666 @@ github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4s github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= -github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fatih/color v1.5.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc= +github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o= +github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/frankban/quicktest v1.4.1 h1:Wv2VwvNn73pAdFIVUQRXYDFp31lXKbqblIXo/Q5GPSg= -github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= -github.com/garyburd/redigo v1.0.1-0.20160525165706-b8dc90050f24 h1:nREVDi4H8mwnNqfxFU9NMzZrDCg8TXbEatMvHozxKwU= -github.com/garyburd/redigo v1.0.1-0.20160525165706-b8dc90050f24/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.10.2 h1:19ARM85nVi4xH7xPXuc5eM/udya5ieh7b/Sv+d844Tk= +github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= +github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= +github.com/gdamore/tcell v1.3.0/go.mod h1:Hjvr+Ofd+gLglo7RYKxxnzCBmev3BzsS67MebKS4zMM= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-critic/go-critic v0.3.5-0.20190904082202-d79a9f0c64db h1:GYXWx7Vr3+zv833u+8IoXbNnQY0AdXsxAgI0kX7xcwA= -github.com/go-critic/go-critic v0.3.5-0.20190904082202-d79a9f0c64db/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA= +github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-lintpack/lintpack v0.5.2 h1:DI5mA3+eKdWeJ40nU4d6Wc26qmdG8RCi/btYq0TuRN0= -github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-ole/go-ole v1.2.5-0.20190920104607-14974a1cf647/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.17.2/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= +github.com/go-openapi/analysis v0.19.16/go.mod h1:GLInF007N83Ad3m8a/CbQ5TPzdnGT7workfHwuVjNVk= +github.com/go-openapi/analysis v0.20.0/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= +github.com/go-openapi/analysis v0.20.1/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.17.2/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.4/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.0/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.17.2/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.17.2/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.17.2/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= +github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= +github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= +github.com/go-openapi/loads v0.19.6/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= +github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= +github.com/go-openapi/loads v0.20.0/go.mod h1:2LhKquiE513rN5xC6Aan6lYOSddlL8Mp20AW9kpviM4= +github.com/go-openapi/loads v0.20.2/go.mod h1:hTVUotJ+UonAMMZsvakEgmWKgtulweO9vYP2bQYKA/o= github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.18.0/go.mod h1:uI6pHuxWYTy94zZxgcwJkUWa9wbIlhteGfloI10GD4U= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.3/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= +github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= +github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= +github.com/go-openapi/runtime v0.19.28/go.mod h1:BvrQtn6iVb2QmiVXRsFAm6ZCAZBpbVKFfN6QWCp582M= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= +github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= +github.com/go-openapi/spec v0.20.1/go.mod h1:93x7oh+d+FQsmsieroS4cmR3u0p/ywH649a3qwC9OsQ= +github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.17.2/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= +github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= +github.com/go-openapi/strfmt v0.20.1/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.4/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/validate v0.17.2/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= +github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= +github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= +github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= +github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= +github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= +github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0= +github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= +github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-toolsmith/astcast v1.0.0 h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR6jE7g= -github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= -github.com/go-toolsmith/astcopy v1.0.0 h1:OMgl1b1MEpjFQ1m5ztEO06rz5CUd3oBv9RF7+DyvdG8= -github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astequal v1.0.0 h1:4zxD8j3JRFNyLN46lodQuqz3xdKSrur7U/sr0SDS/gQ= -github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg= -github.com/go-toolsmith/astfmt v1.0.0 h1:A0vDDXt+vsvLEdbMFJAUBI/uTbRw1ffOPnxsILnFL6k= -github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= -github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= -github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk= -github.com/go-toolsmith/astp v1.0.0 h1:alXE75TXgcmupDsMK1fRAy0YUzLzqPVvBKoyWV+KPXg= -github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= -github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks= -github.com/go-toolsmith/pkgload v1.0.0 h1:4DFWWMXVfbcN5So1sBNW9+yeiMqLFGl1wFLTL5R0Tgg= -github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= -github.com/go-toolsmith/strparse v1.0.0 h1:Vcw78DnpCAKlM20kSbAyO4mPfJn/lyYA4BJUDxe2Jb4= -github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/typep v1.0.0 h1:zKymWyA1TRYvqYrYDrfEMZULyrhcnGY3x7LDKU2XQaA= -github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.7/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8= +github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= -github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/gocarina/gocsv v0.0.0-20170324095351-ffef3ffc77be/go.mod h1:/oj50ZdPq/cUjA02lMZhijk5kR31SEydKyqah1OgBuo= +github.com/gocql/gocql v0.0.0-20200228163523-cd4b606dd2fb/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godror/godror v0.10.4/go.mod h1:9MVLtu25FBJBMHkPs0m3Ngf/VmwGcLpM2HS8PlNGw9U= -github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.7.2-0.20190320160742-5135e617513b h1:3QNh5Xo2pmr2nZXENtnztfpjej8XY8EPmvYxF5SzY9M= github.com/gofrs/flock v0.7.2-0.20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/googleapis v1.3.0/go.mod h1:d+q1s/xVJxZGKWwC/6UfPIF33J+G1Tq4GYv9Y+Tg/EU= -github.com/gogo/googleapis v1.3.1-0.20190914144012-b8d18e97a9a1 h1:wHBfjxqklJAH3XgODVzeKkZs3hx29Ujept0ST7iIfnE= -github.com/gogo/googleapis v1.3.1-0.20190914144012-b8d18e97a9a1/go.mod h1:d+q1s/xVJxZGKWwC/6UfPIF33J+G1Tq4GYv9Y+Tg/EU= +github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6 h1:YYWNAGTKWhKpcLLt7aSj/odlKrSrelQwlovBpDuf19w= -github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 h1:9kfjN3AdxcbsZBf8NjltjWihK2QfBBBZuv91cMFfDHw= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= -github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3 h1:pe9JHs3cHHDQgOFXJJdYkK6fLz2PWyYtP4hthoCMvs8= -github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o= -github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee h1:J2XAy40+7yz70uaOiMbNnluTg7gyQhtGqLQncQh+4J8= -github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= -github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= -github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.21.0 h1:HxAxpR8Z0M8omihvQdsD3PF0qPjlqYqp2vMJzstoKeI= -github.com/golangci/golangci-lint v1.21.0/go.mod h1:phxpHK52q7SE+5KpPnti4oZTdFCEsn/tKN+nFvCKXfk= -github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc h1:gLLhTLMk2/SutryVJ6D4VZCU3CUqr8YloG7FPIBWFpI= -github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770 h1:EL/O5HGrF7Jaq0yNhBLucz9hTuRzj2LdwGBOaENgxIk= -github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21 h1:leSNB7iYzLYSSx3J/s5sVf4Drkc68W2wm4Ixh/mr0us= -github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= -github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0 h1:HVfrLniijszjS1aiNg8JbBMO2+E1WIQ+j/gL4SQqGPg= -github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= -github.com/google/addlicense v0.0.0-20190510175307-22550fa7c1b0/go.mod h1:QtPG26W17m+OIQgE6gQ24gC1M6pUaMBAbFrTIDtwG/E= -github.com/google/addlicense v0.0.0-20190907113143-be125746c2c4 h1:Bptr91tgP3H4/tg/69DYMrievvj8AgXXr5ktPmm+p38= -github.com/google/addlicense v0.0.0-20190907113143-be125746c2c4/go.mod h1:QtPG26W17m+OIQgE6gQ24gC1M6pUaMBAbFrTIDtwG/E= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomodule/redigo v1.8.3 h1:HR0kYDX2RJZvAup8CsiJwxB4dTCSC0AaUq6S4SiLwUc= +github.com/gomodule/redigo v1.8.3/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/flatbuffers v1.7.2-0.20170925184458-7a6b2bf521e9/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v1.12.0 h1:/PtAHvnBY4Kqnx/xCQ3OIV9uYcSFGScBsWI3Oogeh6w= +github.com/google/flatbuffers v1.12.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-github/v28 v28.1.1 h1:kORf5ekX5qwXO2mGzXXOjMe/g6ap8ahVe0sBEulhSxo= -github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM= -github.com/google/go-github/v29 v29.0.2 h1:opYN6Wc7DOz7Ku3Oh4l7prmkOMwEcQxpFtxdU8N8Pts= -github.com/google/go-github/v29 v29.0.2/go.mod h1:CHKiKKPHJ0REzfwc14QMklvtHwCveD0PxlMjLlzAM5E= -github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.18-0.20191009163724-0ad7f2610e34/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= github.com/google/licenseclassifier v0.0.0-20200402202327-879cb1424de0/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 h1:Ak8CrdlwwXwAZxzS66vgPt4U8yUZX7JwLvVR58FN5jM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210323184331-8eee2492667d/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9 h1:2tft2559dNwKl2znYB58oVTql0grRB+Ml3LWIBbc4WM= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2-0.20190416172445-c2e93f3ae59f h1:XXzyYlFbxK3kWfcmu3Wc+Tv8/QQl/VqwsWuSYF1Rj0s= -github.com/google/uuid v1.1.2-0.20190416172445-c2e93f3ae59f/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.3.1-0.20190624222214-25d8b0b66985 h1:bCrkbuMwrQnnTsa9Y0giRda4P3fvay8f98tvYmZXVhg= -github.com/googleapis/gnostic v0.3.1-0.20190624222214-25d8b0b66985/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gophercloud/gophercloud v0.16.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorhill/cronexpr v0.0.0-20161205141322-d520615e531a/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= +github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3 h1:JVnpOZS+qxli+rgVl98ILOXVNbW+kb5wcxeGx8ShUIw= -github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.4/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.11.1/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.13.0 h1:sBDQoHXrOlfPobnKw69FIKa1wg9qsLLvvQ/Y19WtFgI= github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/h2non/filetype v1.0.12/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/h2non/filetype v1.1.1 h1:xvOwnXKAckvtLWsN398qS9QhlxlnVXBjXBydK2/UFB4= +github.com/h2non/filetype v1.1.1/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.2.0/go.mod h1:1SIkFYi2ZTXUE5Kgt179+4hH33djo11+0Eo2XgTAtkw= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.2.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/cronexpr v1.1.0 h1:dnNsWtH0V2ReN7JccYe8m//Bj14+PjJDntR1dz0Cixk= +github.com/hashicorp/cronexpr v1.1.0/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-plugin v1.4.2/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.2-0.20190520140433-59383c442f7d/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= -github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/nomad/api v0.0.0-20201203164818-6318a8ac7bf8 h1:Yrz9yGVJf5Ce2KS7x8hS/MUTIeBmGEhF8nhzolRpSqY= +github.com/hashicorp/nomad/api v0.0.0-20201203164818-6318a8ac7bf8/go.mod h1:vYHP9jMXk4/T2qNUbWlQ1OHCA1hHLil3nvqSmz8mtgc= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/serf v0.8.3/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= -github.com/haya14busa/go-actions-toolkit v0.0.0-20200105081403-ca0307860f01 h1:HiJF8Mek+I7PY0Bm+SuhkwaAZSZP83sw6rrTMrgZ0io= -github.com/haya14busa/go-actions-toolkit v0.0.0-20200105081403-ca0307860f01/go.mod h1:1DWDZmeYf0LX30zscWb7K9rUMeirNeBMd5Dum+seUhc= -github.com/haya14busa/go-checkstyle v0.0.0-20170303121022-5e9d09f51fa1/go.mod h1:RsN5RGgVYeXpcXNtWyztD5VIe7VNSEqpJvF2iEH7QvI= -github.com/haya14busa/secretbox v0.0.0-20180525171038-07c7ecf409f5/go.mod h1:FGO/dXIFZnan7KvvUSFk1hYMnoVNzB6NTMPrmke8SSI= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= +github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= +github.com/hetznercloud/hcloud-go v1.24.0/go.mod h1:3YmyK8yaZZ48syie6xpm3dt26rtB6s65AisBHylXYFA= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200715173712-053cf528c12f h1:nTaA/z8mev5oJv1dNSbu8Pwvu5CJyBZKwDvHpr9FZ4I= -github.com/ianlancetaylor/demangle v0.0.0-20200715173712-053cf528c12f/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639 h1:mV02weKRL81bEnm8A0HT1/CAelMQDBuQIfLw8n+d6xI= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb v1.7.7/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= +github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= +github.com/influxdata/influxdb v1.8.4/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= +github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= +github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= +github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= +github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= +github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= github.com/insomniacslk/dhcp v0.0.0-20180716145214-633285ba52b2/go.mod h1:CfMdguCK66I5DAUJgGKyNz8aB6vO5dZzkm9Xep6WGvw= -github.com/jaegertracing/jaeger v1.14.0/go.mod h1:LUWPSnzNPGRubM8pk0inANGitpiMOOxihXx0+53llXI= -github.com/jaegertracing/jaeger v1.16.0 h1:WPpd4nmpau+nuAE0gBHbigdzOUfQXvIm9pGMnbWHO5A= -github.com/jaegertracing/jaeger v1.16.0/go.mod h1:LUWPSnzNPGRubM8pk0inANGitpiMOOxihXx0+53llXI= -github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jaegertracing/jaeger v1.23.0/go.mod h1:gB6Qc+Kjd/IX1G82oGTArbHI3ZRO//iUkaMW+gzL9uw= +github.com/jaegertracing/jaeger v1.25.0 h1:6mevWzUxgLl0SoNwfJEvmsZhJvkTP5GdHPfJq74SSug= +github.com/jaegertracing/jaeger v1.25.0/go.mod h1:2OPl4X+hPgPPat+u6FfwdItUR8V0qfynfWfVPcsZ9c0= +github.com/jarcoal/httpmock v1.0.4 h1:jp+dy/+nonJE4g4xbVtl9QdrUNbn6/3hDT5R4nDIZnA= +github.com/jarcoal/httpmock v1.0.4/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= +github.com/jcchavezs/porto v0.1.0 h1:Xmxxn25zQMmgE7/yHYmh19KcItG81hIwfbEEFnd6w/Q= +github.com/jcchavezs/porto v0.1.0/go.mod h1:fESH0gzDHiutHRdX2hv27ojnOVFco37hg1W6E9EZF4A= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= -github.com/jessevdk/go-flags v0.0.0-20180331124232-1c38ed7ad0cc/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= +github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josephspurrier/goversioninfo v0.0.0-20190209210621-63e6d1acd3dd/go.mod h1:eJTEwMjXb7kZ633hO3Ln9mBUCOjX2+FlTljvpl9SYdE= -github.com/josephspurrier/goversioninfo v1.2.0 h1:tpLHXAxLHKHg/dCU2AAYx08A4m+v9/CWg6+WUvTF4uQ= -github.com/josephspurrier/goversioninfo v1.2.0/go.mod h1:AGP2a+Y/OVJZ+s6XM4IwFUpkETwvn0orYurY8qpw1+0= +github.com/josephspurrier/goversioninfo v1.3.0 h1:pmgDhWnG8I59p5kCR09J73s/gy9JqRPAtiaUK8jixtE= +github.com/josephspurrier/goversioninfo v1.3.0/go.mod h1:JWzv5rKQr+MmW+LvM412ToT/IkYDZjaclF2pKDss8IY= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/justinas/nosurf v1.1.0/go.mod h1:ALpWdSbuNGy2lZWtyXdjkYv4edL23oSEgfBT1gPJ5BQ= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= +github.com/kardianos/service v1.2.1-0.20210728001519-a323c3813bc7/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/karrick/godirwalk v1.15.6/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.3-0.20191122130757-c099ac9f21dd h1:eTGTdO1ZbZ0HSC6TxDLtBl7W0fgFpGlbdPBK+IF0I0g= -github.com/klauspost/compress v1.9.3-0.20191122130757-c099ac9f21dd/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= +github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/knadh/koanf v1.2.1 h1:tVR+BbAM5PA2YkB0OMyfSnEsmt3uygpn3R0WB6jKw7s= +github.com/knadh/koanf v1.2.1/go.mod h1:xpPTwMhsA/aaQLAilyCCqfpEiY1gpa160AiCuWHJUjY= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= +github.com/leoluk/perflib_exporter v0.1.0/go.mod h1:rpV0lYj7lemdTm31t7zpCqYqPnw7xs86f+BaaNBVYFM= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.2-0.20190507191818-2ff3cb3adc01/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/libp2p/go-reuseport v0.0.2 h1:XSG94b1FJfGA01BUrT82imejHQyTxO4jEWqheyCXYvU= +github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s= +github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= -github.com/magefile/mage v1.10.0 h1:3HiXzCUY12kh9bIuyXShaVe529fJfyqoVM42o/uom2g= -github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magefile/mage v1.11.0 h1:C/55Ywp9BpgVVclD3lRnSYCwXTYxmSppIgLeDYlNuls= +github.com/magefile/mage v1.11.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.1 h1:mdxE1MF9o53iCb2Ghj1VfWvh7ZOwHpnVG/xwXrV90U8= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/pkger v0.17.0/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11 h1:YFh+sjyJTMQSYjKwM4dFKhJPJC/wfo98tPUc17HdoYw= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI= -github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb h1:RHba4YImhrUVQDHUCe2BNSOz4tVy2yGyXhvYDvxGgeE= -github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= -github.com/mattn/go-colorable v0.0.8/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.7 h1:bQGKb3vps/j0E9GfJQ03JyhRuxsvdAanXlT9BTw3mdw= -github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= -github.com/mattn/go-isatty v0.0.2/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-shellwords v1.0.7 h1:KqhVjVZomx2puPACkj9vrGFqnp42Htvo9SEAWePHKOs= -github.com/mattn/go-shellwords v1.0.7/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= +github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.15 h1:CSSIDtllwGLMoA6zjdKnaE6Tx6eVUxQ29LUgGetiDCI= -github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.25 h1:dFwPR6SfLtrSwgDcIq2bcU/gVutB4sNApq2HBdqcakg= +github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/gox v1.0.1/go.mod h1:ED6BioOGXMswlXa2zxfh/xdd5QhwYliBFn9V18Ap4z4= github.com/mitchellh/hashstructure v0.0.0-20170116052023-ab25296c0f51/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= -github.com/mitchellh/hashstructure v1.0.0 h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9dGS02Q3Y= -github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= +github.com/mitchellh/hashstructure v1.1.0 h1:P6P1hdjqAAknpY/M1CGipelZgp+4y9ja9kmUZPXP+H0= +github.com/mitchellh/hashstructure v1.1.0/go.mod h1:xUDAozZz0Wmdiufv0uyhnHkUTN6/6d8ulp4AwfLKrmA= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mjibson/esc v0.2.0/go.mod h1:9Hw9gxxfHulMF5OJKCyhYD7PzlSdhzXyaGEBRPH1OPs= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d h1:AREM5mwr4u1ORQBMvzfzBgpsctsbQikCVpvC+tX285E= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/oklog/ulid v0.0.0-20170117200651-66bb6560562f/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/olivere/elastic v6.2.35+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= +github.com/olivere/elastic v6.2.37+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.5.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.2.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/open-telemetry/opentelemetry-collector v0.2.1-0.20191218182225-c300f1341702 h1:Vk9C/CWguq0bQqeOeSibEpVojUR7g6Z0JYapEBi1Wfo= -github.com/open-telemetry/opentelemetry-collector v0.2.1-0.20191218182225-c300f1341702/go.mod h1:WxiK9mcisb/hM6M6+2BRV/VIU2c8VzlCRJED2S1MWns= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.34.0 h1:6dMZexq1Ra8vuOdglKoOdaQjBf9HsLTZ5eRPOQKRPvQ= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.34.0/go.mod h1:op7F4UAdSvEBRRzzPQMK1GmEIOB9oHEKDNM48mKVxE4= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.34.0 h1:o1fWG4CPTwqmM8C1cDvtL+Vzg1YHNQvrl0DSOdXkQ20= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.34.0/go.mod h1:eKUNfTnvCzk8qZX35dq0cc2M79EFdXmwQqmGmUTlAD8= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1.0.20190228220655-ac19fd6e7483 h1:eFd3FsB01m/zNg/yBMYdm/XqiqCztcN9SVRPtGtzDHo= github.com/opencontainers/go-digest v1.0.0-rc1.0.20190228220655-ac19fd6e7483/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 h1:yN8BPXVwMBAm3Cuvh1L5XE8XpvYRMdsVLd82ILprhUU= github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= @@ -786,27 +1056,49 @@ github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rm github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opentracing-contrib/go-grpc v0.0.0-20191001143057-db30781987df/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= +github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.1-0.20190913142402-a7454ce5950e h1:fI6mGTyggeIYVmGhf80XFHxTupjOexbCppgTNDkv9AA= -github.com/opentracing/opentracing-go v1.1.1-0.20190913142402-a7454ce5950e/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/orijtech/prometheus-go-metrics-exporter v0.0.3-0.20190313163149-b321c5297f60/go.mod h1:+Mu9w51Uc2RNKSUTA95d6Pvy8cxFiRX3ANRPlCcnGLA= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= +github.com/osquery/osquery-go v0.0.0-20210622151333-99b4efa62ec5/go.mod h1:JKR5QhjsYdnIPY7hakgas5sxf8qlA/9wQnLqaMfWdcg= +github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab5aMCs5usQRVBGThelUKBNnoSOuso= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pavius/impi v0.0.0-20180302134524-c1cbdcb8df2b h1:yS0+/i6mwRZCdssUd+MkFJkCn/Evh1PlUKCYe3aCtQw= -github.com/pavius/impi v0.0.0-20180302134524-c1cbdcb8df2b/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= +github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= +github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.2.6+incompatible h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw= -github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrre/gotestcover v0.0.0-20160517101806-924dca7d15f0/go.mod h1:4xpMLz7RBWyB+ElzHu8Llua96TRCB3YwX+l5EP1wmHk= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20170505043639-c605e284fe17/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -815,180 +1107,227 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/poy/eachers v0.0.0-20181020210610-23942921fe77 h1:SNdqPRvRsVmYR0gKqFvrUKhFizPJ6yDiGQ++VAJIoDg= github.com/poy/eachers v0.0.0-20181020210610-23942921fe77/go.mod h1:x1vqpbcMW9T/KRcQ4b48diSiSVtYgvwQ5xzDByEg4WE= -github.com/prashantv/protectmem v0.0.0-20171002184600-e20412882b3a h1:AA9vgIBDjMHPC2McaGPojgV2dcI78ZC0TLNhYCXEKH8= -github.com/prashantv/protectmem v0.0.0-20171002184600-e20412882b3a/go.mod h1:lzZQ3Noex5pfAy7mkAeCjcBDteYU85uWWnJ/y6gKU8k= -github.com/prometheus/alertmanager v0.18.0/go.mod h1:WcxHBl40VSPuOaqWae6l6HpnEOVRIycEJ7i9iYkadEE= +github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.1.1-0.20190913103102-20428fa0bffc/go.mod h1:ikMPikHu8SMvBGWoKulvvOOZN227amf2E9eMYqyAwAY= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= +github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.23.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= +github.com/prometheus/common v0.25.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= -github.com/prometheus/prometheus v1.8.2-0.20190924101040-52e0504f83ea/go.mod h1:elNqjVbwD3sCZJqKzyN7uEuwGcCpeJvv67D6BrHsDbw= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2/go.mod h1:5aBj+GpLB+V5MCnrKm5+JAqEJwzDiLugOmDhgt7sDec= github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= +github.com/prometheus/statsd_exporter v0.20.0/go.mod h1:YL3FWCG8JBBtaUSxAg4Gz2ZYu22bS84XM89ZQXXTWmQ= +github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= -github.com/rakyll/statik v0.1.6/go.mod h1:OEi9wJV/fMUAGx1eNjq75DKDsJVuEv1U0oYdX6GX8Zs= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/reviewdog/errorformat v0.0.0-20200109134752-8983be9bc7dd h1:fvaEkjpr2NJbtnFRCft7D6y/mQ5/2OQU0pKJLW8dwFA= -github.com/reviewdog/errorformat v0.0.0-20200109134752-8983be9bc7dd/go.mod h1:giYAXnpegRDPsXUO7TRpDKXJo1lFGYxyWRfEt5iQ+OA= -github.com/reviewdog/reviewdog v0.9.17 h1:MKb3rlQZgkEXr3d85iqtYNITXn7gDJr2kT0IhgX/X9A= -github.com/reviewdog/reviewdog v0.9.17/go.mod h1:Y0yPFDTi9L5ohkoecJdgbvAhq+dUXp+zI7atqVibwKg= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= +github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= +github.com/rivo/tview v0.0.0-20200219210816-cd38d7432498/go.mod h1:6lkG1x+13OShEf0EaOCaTQYyB7d5nSbb181KtjlS+84= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/cors v1.8.0 h1:P2KMzcFwrPoSjkF1WLRPsp3UMLyql8L4v9hQpVeK5so= +github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735 h1:7YvPJVmEeFHR1Tj9sZEYsmarJEQfMVYpd/Vyy/A8dqE= -github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/samuel/go-parser v0.0.0-20130731160455-ca8abbf65d0e/go.mod h1:Sb6li54lXV0yYEjI4wX8cucdQ9gqUJV3+Ngg3l9g30I= github.com/samuel/go-thrift v0.0.0-20140522043831-2187045faa54/go.mod h1:Vrkh1pnjV9Bl8c3P9zH0/D4NlOHWP5d4/hF4YTULaec= -github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b/go.mod h1:8458kAagoME2+LN5//WxE71ysZ3B7r22fdgb7qVmXSY= github.com/sanathkr/yaml v0.0.0-20170819201035-0056894fa522/go.mod h1:tQTYKOQgxoH3v6dEmdHiz4JG+nbxWwM5fgPQUpSZqVQ= github.com/sanathkr/yaml v1.0.1-0.20170819201035-0056894fa522/go.mod h1:tQTYKOQgxoH3v6dEmdHiz4JG+nbxWwM5fgPQUpSZqVQ= +github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= github.com/santhosh-tekuri/jsonschema v1.2.4 h1:hNhW8e7t+H1vgY+1QeEQpveR6D4+OwKPXCfD2aieJis= github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= -github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/securego/gosec v0.0.0-20191002120514-e680875ea14d h1:BzRvVq1EHuIjxpijCEKpAxzKUUMurOQ4sknehIATRh8= -github.com/securego/gosec v0.0.0-20191002120514-e680875ea14d/go.mod h1:w5+eXa0mYznDkHaMCXA4XYffjlH+cy1oyKbfzJXa2Do= +github.com/securego/gosec v0.0.0-20200203094520-d13bb6d2420c/go.mod h1:gp0gaHj0WlmPh9BdsTmo1aq6C27yIPWdxCKGFGdVKBE= +github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc= -github.com/shirou/gopsutil v2.19.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +github.com/shirou/gopsutil v3.20.12+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil v3.21.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil v3.21.7+incompatible h1:g/wcPHcuCQvHSePVofjQljd2vX4ty0+J6VoMB+NPcdk= +github.com/shirou/gopsutil v3.21.7+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e h1:MZM7FHLqUHYI0Y/mQAt3d2aYa0SiNms/hFqC9qJYolM= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041 h1:llrF3Fs4018ePo4+G/HV/uQUqEI1HMDjCeOf2V6puPc= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/vfsgen v0.0.0-20180825020608-02ddb050ef6b/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sourcegraph/go-diff v0.5.1 h1:gO6i5zugwzo1RVTvgvfwCOSVegNuvnNi6bAD1QCmkHs= -github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.4.1-0.20190911140308-99520c81d86e h1:D4jmJ9BqzeMv7BvAqjooNtXH2PXG7m+pcYRnj2Ojlrk= -github.com/spf13/viper v1.4.1-0.20190911140308-99520c81d86e/go.mod h1:jUyf+v/KTOnRyUy2/AsjF537WfJWVv3AnlcKSNd+AIg= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25 h1:7z3LSn867ex6VSaahyKadf4WtSsJIgne6A1WLOAGM8A= -github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25/go.mod h1:lbP8tGiBjZ5YWIc2fzuRpTaz0b/53vT6PEs3QuAWzuU= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.1.5-0.20170601210322-f6abca593680/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.0/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c h1:+aPplBwWcHBo6q9xrfWdMrT9o4kltkmmvpemgIjep/8= -github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk= -github.com/tidwall/gjson v1.6.0 h1:9VEQWz6LLMUsUl6PueE49ir4Ka6CzLymOAZDxpFsTDc= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= -github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= +github.com/tidwall/gjson v1.6.5 h1:P/K9r+1pt9AK54uap7HcoIp6T3a7AoMg3v18tUis+Cg= +github.com/tidwall/gjson v1.6.5/go.mod h1:zeFuBCIqD4sN/gmqBzZ4j7Jd6UcA2Fc56x7QFsv+8fI= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= +github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE= +github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.0.1 h1:WE4RBSZ1x6McVVC8S/Md+Qse8YUv6HRObAx6ke00NY8= github.com/tidwall/pretty v1.0.1/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.0.2 h1:Z7S3cePv9Jwm1KwS0513MRaoUe3S01WPbLNV40pwWZU= +github.com/tidwall/pretty v1.0.2/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/sjson v1.1.1 h1:7h1vk049Jnd5EH9NyzNiEuwYW4b5qgreBbqRC19AS3U= github.com/tidwall/sjson v1.1.1/go.mod h1:yvVuSnpEQv5cYIrO+AT6kw4QVfd5SDZoGIS7/5+fZFs= -github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e h1:RumXZ56IrCj4CL+g1b9OL/oH0QnsF976bC8xQFYUD5Q= -github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= +github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= +github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA= +github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tsg/go-daemon v0.0.0-20200207173439-e704b93fd89b/go.mod h1:jAqhj/JBVC1PwcLTWd6rjQyGyItxxrhpiBl8LSuAGmw= github.com/tsg/gopacket v0.0.0-20200626092518-2ab8e397a786 h1:B/IVHYiI0d04dudYw+CvCAGqSMq8d0yWy56eD6p85BQ= github.com/tsg/gopacket v0.0.0-20200626092518-2ab8e397a786/go.mod h1:RIkfovP3Y7my19aXEjjbNd9E5TlHozzAyt7B8AaEcwg= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe h1:aj/vX5epIlQQBEocKoM9nSAiNpakdQzElc8SaRFPu+I= -github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe/go.mod h1:OBcG9bn7sHtXgarhUEb3OfCnNsgtGnkVf41ilSZ3K3E= -github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= -github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-client-go v2.16.0+incompatible h1:Q2Pp6v3QYiocMxomCaJuwQGFt7E53bPYqEgug/AoBtY= -github.com/uber/jaeger-client-go v2.16.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw= -github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/uber/tchannel-go v1.10.0/go.mod h1:Rrgz1eL8kMjW/nEzZos0t+Heq0O4LhnUJVA32OvWKHo= -github.com/uber/tchannel-go v1.16.0 h1:B7dirDs15/vJJYDeoHpv3xaEUjuRZ38Rvt1qq9g7pSo= -github.com/uber/tchannel-go v1.16.0/go.mod h1:Rrgz1eL8kMjW/nEzZos0t+Heq0O4LhnUJVA32OvWKHo= +github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.29.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go v1.1.8 h1:/D9x7IRpfMHDlizVOgxrag5Fh+/NY+LtI8bsr+AswRA= +github.com/ugorji/go v1.1.8/go.mod h1:0lNM99SwWUIRhCXnigEMClngXBk/EmpTXa7mgiewYWA= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ultraware/funlen v0.0.2 h1:Av96YVBwwNSe4MLR7iI/BIa3VyI7/djnto/pK3Uxbdo= -github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/whitespace v0.0.4 h1:If7Va4cM03mpgrNH9k49/VOicWpGoG70XPBFFODYDsg= -github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.1.8 h1:4dryPvxMP9OtkjIbuNeK2nb27M38XMHLGlfNSNph/5s= +github.com/ugorji/go/codec v1.1.8/go.mod h1:X00B19HDtwvKbQY2DcYjvZxKQp8mzrJoQ6EgoIY/D2E= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urso/diag v0.0.0-20200210123136-21b3cc8eb797 h1:OHNw/6pXODJAB32NujjdQO/KIYQ3KAbHQfCzH81XdCs= github.com/urso/diag v0.0.0-20200210123136-21b3cc8eb797/go.mod h1:pNWFTeQ+V1OYT/TzWpnWb6eQBdoXpdx+H+lrH97/Oyo= github.com/urso/go-bin v0.0.0-20180220135811-781c575c9f0e h1:NiofbjIUI5gR+ybDsGSVH1fWyjSeDYiYVJHT1+kcsak= github.com/urso/go-bin v0.0.0-20180220135811-781c575c9f0e/go.mod h1:6GfHrdWBQYjFRIznu7XuQH4lYB2w8nO4bnImVKkzPOM= @@ -997,99 +1336,181 @@ github.com/urso/magetools v0.0.0-20200125210132-c2e338f92f3a h1:jWAaRFnay3H2e6S0 github.com/urso/magetools v0.0.0-20200125210132-c2e338f92f3a/go.mod h1:DbaJnRzkGaWrMWm5Hz6QVnUj//x9/zjrfx8bF3J+GJY= github.com/urso/qcgen v0.0.0-20180131103024-0b059e7db4f4 h1:hhA8EBThzz9PztawVTycKvfETVuBqxAQ5keFlAVtbAw= github.com/urso/qcgen v0.0.0-20180131103024-0b059e7db4f4/go.mod h1:RspW+E2Yb7Fs7HclB2tiDaiu6Rp41BiIG4Wo1YaoXGc= -github.com/urso/sderr v0.0.0-20200210124243-c2a16f3d43ec/go.mod h1:Wp40HwmjM59FkDIVFfcCb9LzBbnc0XAMp8++hJuWvSU= -github.com/uudashr/gocognit v0.0.0-20190926065955-1655d0de0517 h1:ChMKTho2hWKpks/nD/FL2KqM1wuVt62oJeiE8+eFpGs= -github.com/uudashr/gocognit v0.0.0-20190926065955-1655d0de0517/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= -github.com/valyala/quicktemplate v1.2.0/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/urso/sderr v0.0.0-20210525210834-52b04e8f5c71 h1:CehQeKbysHV8J2V7AD0w8NL2x1h04kmmo/Ft5su4lU0= +github.com/urso/sderr v0.0.0-20210525210834-52b04e8f5c71/go.mod h1:Wp40HwmjM59FkDIVFfcCb9LzBbnc0XAMp8++hJuWvSU= github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/vektra/mockery v0.0.0-20181123154057-e78b021dcbb5/go.mod h1:ppEjwdhyy7Y31EnHRDm1JkChoC7LXIJ7Ex0VYLWtZtQ= github.com/vmware/govmomi v0.0.0-20170802214208-2cad15190b41/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= -github.com/xanzy/go-gitlab v0.22.3 h1:/rNlZ2hquUWNc6rJdntVM03tEOoTmnZ1lcNyJCl0WlU= -github.com/xanzy/go-gitlab v0.22.3/go.mod h1:t4Bmvnxj7k37S4Y17lfLx+nLqkf/oQwT2HagfWKv5Og= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= +github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM= +github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/scram v1.0.3 h1:nTadYh2Fs4BK2xdldEa2g5bbaZp0/+1nJMMPtPxS/to= +github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xeipuuv/gojsonschema v0.0.0-20181112162635-ac52e6811b56/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/gopher-lua v0.0.0-20170403160031-b402f3114ec7/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= -go.elastic.co/apm v1.7.0/go.mod h1:IYfi/330rWC5Kfns1rM+kY+RPkIdgUziRF6Cbm9qlxQ= go.elastic.co/apm v1.7.2/go.mod h1:tCw6CkOJgkWnzEthFN9HUP1uL3Gjc/Ur6m7gRPLaoH0= -go.elastic.co/apm v1.8.0 h1:AWEKpHwRal0yCMd4K8Oxy1HAa7xid+xq1yy+XjgoVU0= -go.elastic.co/apm v1.8.0/go.mod h1:tCw6CkOJgkWnzEthFN9HUP1uL3Gjc/Ur6m7gRPLaoH0= -go.elastic.co/apm/module/apmelasticsearch v1.7.2 h1:5STGHLZLSeAzxordMc+dFVKiyVtMmxADOV+TgRaXXJg= +go.elastic.co/apm v1.11.0/go.mod h1:qoOSi09pnzJDh5fKnfY7bPmQgl8yl2tULdOu03xhui0= +go.elastic.co/apm v1.12.0/go.mod h1:v8Yf+VZ3NplRjQUWlvPG4EV/GGtDNCVUMaafrCnmGEM= +go.elastic.co/apm v1.13.1 h1:ICIcUcQOImg/bve9mQVyLCvm1cSUZ1afdwK6ACnxczU= +go.elastic.co/apm v1.13.1/go.mod h1:dylGv2HKR0tiCV+wliJz1KHtDyuD8SPe69oV7VyK6WY= go.elastic.co/apm/module/apmelasticsearch v1.7.2/go.mod h1:ZyNFuyWdt42GBZkz0SogoLzDBrBGj4orxpiUuxYeYq8= -go.elastic.co/apm/module/apmgrpc v1.7.0 h1:CE6zLPSOdSOjcn7AEDJQAULOG2ZT7W+e0i4JzYLSgeY= -go.elastic.co/apm/module/apmgrpc v1.7.0/go.mod h1:fCqPNInjmGPbjOLXMPrDsGP9Ks39rcg/RDILJtHPwEI= -go.elastic.co/apm/module/apmhttp v1.7.0/go.mod h1:70/fYU6lgIII213g7As10lm2Ca/ZkGixeJBoyfrGKes= -go.elastic.co/apm/module/apmhttp v1.7.2 h1:2mRh7SwBuEVLmJlX+hsMdcSg9xaielCLElaPn/+i34w= +go.elastic.co/apm/module/apmelasticsearch v1.12.0 h1:DzDIrzuzpdLq6aaCo4yp4unH4Xly1AzsdYtdlgbrXE4= +go.elastic.co/apm/module/apmelasticsearch v1.12.0/go.mod h1:9Xy9sEsFIuAuxpZWVipG7CT/aFVdbwlGhHlXSNbuG44= +go.elastic.co/apm/module/apmgrpc v1.12.0 h1:HAPWVr+aQf6Hzpe5npr36pi/UE70sJpaUZxCP+qdHz8= +go.elastic.co/apm/module/apmgrpc v1.12.0/go.mod h1:54OQi4zyBt6dhjNdgwfL3RmIZBL1l04L9TIpQDdyUmI= go.elastic.co/apm/module/apmhttp v1.7.2/go.mod h1:sTFWiWejnhSdZv6+dMgxGec2Nxe/ZKfHfz/xtRM+cRY= -go.elastic.co/ecszap v0.1.1-0.20200424093508-cdd95a104193/go.mod h1:HTUi+QRmr3EuZMqxPX+5fyOdMNfUu5iPebgfhgsTJYQ= -go.elastic.co/ecszap v0.2.0 h1:BSZNJ2MOIsecJ7L4ezUA+JIarx14wclqZLJm/mBj044= -go.elastic.co/ecszap v0.2.0/go.mod h1:HTUi+QRmr3EuZMqxPX+5fyOdMNfUu5iPebgfhgsTJYQ= +go.elastic.co/apm/module/apmhttp v1.12.0 h1:iSXbited+ouqB58MftpSz61CvGzuarF4+sq+dSOYJJk= +go.elastic.co/apm/module/apmhttp v1.12.0/go.mod h1:9ry+uRvkUYdur2TvEuHMmu1stOlYymnrYaGJwO3fCI4= +go.elastic.co/ecszap v0.3.0/go.mod h1:HTUi+QRmr3EuZMqxPX+5fyOdMNfUu5iPebgfhgsTJYQ= +go.elastic.co/ecszap v1.0.0 h1:PdQkRUeraR3XHJ14T7JMa+ncU0XXrVrcEN/BoRa2nMI= +go.elastic.co/ecszap v1.0.0/go.mod h1:HTUi+QRmr3EuZMqxPX+5fyOdMNfUu5iPebgfhgsTJYQ= go.elastic.co/fastjson v1.0.0/go.mod h1:PmeUOMMtLHQr9ZS9J9owrAVg0FkaZDRZJEFTTGHtchs= go.elastic.co/fastjson v1.1.0 h1:3MrGBWWVIxe/xvsbpghtkFoPciPhOCmjsR/HfwEeQR4= go.elastic.co/fastjson v1.1.0/go.mod h1:boNGISWMjQsUPy/t6yqt2/1Wx4YNPSe+mZjlyw9vKKI= go.elastic.co/go-licence-detector v0.4.0/go.mod h1:fSJQU8au4SAgDK+UQFbgUPsXKYNBDv4E/dwWevrMpXU= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.0.4/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= +go.mongodb.org/mongo-driver v1.5.2/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= -go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/collector v0.28.0/go.mod h1:AP/BTXwo1eedoJO7V+HQ68CSvJU1lcdqOzJCgt1VsNs= +go.opentelemetry.io/collector v0.34.0 h1:JvIIYAOU78YnU9EBfBOrQDuWgo82zq+p8rpyCKY1+y0= +go.opentelemetry.io/collector v0.34.0/go.mod h1:dC/us+doKaoNPEFYK9rwVQKxE2thQbl46T0juCwVbuc= +go.opentelemetry.io/collector/model v0.34.0 h1:JmuBhBvX0l0bYDBAG9GtJVQKcIQRspPLgHfZqgHLpJc= +go.opentelemetry.io/collector/model v0.34.0/go.mod h1:+7YCSjJG+MqiIFjauzt7oM2qkqBsaJWh5hcsO4fwsAc= +go.opentelemetry.io/contrib v0.22.0 h1:0F7gDEjgb1WGn4ODIjaCAg75hmqF+UN0LiVgwxsCodc= +go.opentelemetry.io/contrib v0.22.0/go.mod h1:EH4yDYeNoaTqn/8yCWQmfNB78VHfGX2Jt2bvnvzBlGM= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.22.0 h1:TjqELdtCtlOJQrTnXd2y+RP6wXKZUnnJer0HR0CSo18= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.22.0/go.mod h1:KjqwX4uJNaj479ZjFpADOMJKOM4rBXq4kN7nbeuGKrY= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.22.0 h1:WHjZguqT+3UjTgFum33hWZYybDVnx8u9q5/kQDfaGTs= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.22.0/go.mod h1:o3MuU25bYroYnc2TOKe8mTk8f9X1oPFO6C5RCoPKtSU= +go.opentelemetry.io/contrib/zpages v0.22.0/go.mod h1:pO7VUk5qoCiekzXk0XCuQcKQsKBHyjx9KFIW1Vlc8dw= +go.opentelemetry.io/otel v1.0.0-RC1/go.mod h1:x9tRa9HK4hSSq7jf2TKbqFbtt58/TGk0f9XiEYISI1I= +go.opentelemetry.io/otel v1.0.0-RC2 h1:SHhxSjB+omnGZPgGlKe+QMp3MyazcOHdQ8qwo89oKbg= +go.opentelemetry.io/otel v1.0.0-RC2/go.mod h1:w1thVQ7qbAy8MHb0IFj8a5Q2QU0l2ksf8u/CN8m3NOM= +go.opentelemetry.io/otel/internal/metric v0.22.0 h1:Q9bS02XRykSRIbggaU4hVF9oWOP9PyILu26zJWoKmk0= +go.opentelemetry.io/otel/internal/metric v0.22.0/go.mod h1:7qVuMihW/ktMonEfOvBXuh6tfMvvEyoIDgeJNRloYbQ= +go.opentelemetry.io/otel/metric v0.22.0 h1:/qv10BzznqEifrXBwsTT370OCN1PRgt+mnjzMwxJKrQ= +go.opentelemetry.io/otel/metric v0.22.0/go.mod h1:KcsUkBiYGW003DJ+ugd2aqIRIfjabD9jeOUXqsAtrq0= +go.opentelemetry.io/otel/oteltest v1.0.0-RC1/go.mod h1:+eoIG0gdEOaPNftuy1YScLr1Gb4mL/9lpDkZ0JjMRq4= +go.opentelemetry.io/otel/oteltest v1.0.0-RC2 h1:xNKqMhlZYkASSyvF4JwObZFMq0jhFN3c3SP+2rCzVPk= +go.opentelemetry.io/otel/oteltest v1.0.0-RC2/go.mod h1:kiQ4tw5tAL4JLTbcOYwK1CWI1HkT5aiLzHovgOVnz/A= +go.opentelemetry.io/otel/sdk v1.0.0-RC2/go.mod h1:fgwHyiDn4e5k40TD9VX243rOxXR+jzsWBZYA2P5jpEw= +go.opentelemetry.io/otel/trace v1.0.0-RC1/go.mod h1:86UHmyHWFEtWjfWPSbu0+d0Pf9Q6e1U+3ViBOc+NXAg= +go.opentelemetry.io/otel/trace v1.0.0-RC2 h1:dunAP0qDULMIT82atj34m5RgvsIK6LcsXf1c/MsYg1w= +go.opentelemetry.io/otel/trace v1.0.0-RC2/go.mod h1:JPQ+z6nNw9mqEGT8o3eoPTdnNI+Aj5JcxEsVGREIAy4= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+HsUGaksVCXVQENPxwTfQdH4= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec= +go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= -go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1101,26 +1522,34 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0 h1:UG21uOlmZabA4fW5i7ZX6bjw1xELEGg/ZLgZq9auk/Q= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -1133,35 +1562,80 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191021144547-ec77196f6094/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210324051636-2c4c8ecb7826/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210908191846-a5e095526f91 h1:E8wdt+zBjoxD3MA65wEc3pl25BsTi7tbkpwc4ANThjc= +golang.org/x/net v0.0.0-20210908191846-a5e095526f91/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20201008141435-b3e1573b7520/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1169,89 +1643,222 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626150813-e07cf5db2756/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200102141924-c96a22e43c9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8 h1:AvbQYmiaaaza3cW3QXRyPo5kYgpFIzOAfeAAN7m3qQ4= -golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210314195730-07df6a141424/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210611083646-a4fc73990273/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0 h1:xrCZDmdtoloIiooiA9q0OQb9r8HejIHYoHGhGCe1pGg= +golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180805044716-cb6730876b98/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20200602230032-c00d67ef29d0 h1:6txNFSnY+tteYoO+hf01EpdYcYZiurdC9MDIrcUzEu4= -golang.org/x/tools v0.0.0-20200602230032-c00d67ef29d0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6 h1:Vv0JUPWTyeqUq42B2WJ1FeIDjjvGKoA2Ss+Ts0lAVbs= +golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.42.0/go.mod h1:+Oj4s6ch2SEGtPjGqfUfZonBH0GjQH89gTeKKAEGZKI= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb h1:ADPHZzpzM4tk4V4S5cnCrr5SwzvlrPRmqqCuJDB8UTs= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210312152112-fc591d9ea70f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af h1:aLMMXFYqw01RA6XJim5uaN+afqNNjc9P8HPAbnpnc5s= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1261,109 +1868,153 @@ google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.0/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc/examples v0.0.0-20201010204749-3c400e7fcc87 h1:JA56ipSuANY2Fwx4OITOAj+QXlHyCJEma6VVWTRBG+k= +google.golang.org/grpc/examples v0.0.0-20201010204749-3c400e7fcc87/go.mod h1:Lh55/1hxmVHEkOvSIQ2uj0P12QyOCUNyRwnUlSS13hw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.52.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= -gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlIrg= gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/gotestsum v0.6.0/go.mod h1:LEX+ioCVdeWhZc8GYfiBRag360eBhwixWJ62R9eDQtI= +gotest.tools/gotestsum v1.7.0 h1:RwpqwwFKBAa2h+F6pMEGpE707Edld0etUD3GhqqhDNc= +gotest.tools/gotestsum v1.7.0/go.mod h1:V1m4Jw3eBerhI/A6qCxUE07RnCg7ACkKj9BYcAm09V8= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.2.0/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= -howett.net/plist v0.0.0-20200419221736-3b63eb3a43b5 h1:AQkaJpH+/FmqRjmXZPELom5zIERYZfwTjnHpfoVMQEc= -howett.net/plist v0.0.0-20200419221736-3b63eb3a43b5/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= -k8s.io/api v0.0.0-20190813020757-36bff7324fb7/go.mod h1:3Iy+myeAORNCLgjd/Xu9ebwN7Vh59Bw0vh9jhoX+V58= -k8s.io/api v0.18.3 h1:2AJaUQdgUZLoDZHrun21PW2Nx9+ll6cUzvn3IKhSIn0= -k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA= -k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010/go.mod h1:Waf/xTS2FGRrgXCkO5FP3XxTOWh0qLf2QhL1qFZZ/R8= -k8s.io/apimachinery v0.18.3 h1:pOGcbVAhxADgUYnjS08EFXs9QMl8qaH5U4fr5LGUrSk= -k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= -k8s.io/client-go v0.18.3 h1:QaJzz92tsN67oorwzmoB0a9r9ZVHuD5ryjbCKP0U22k= -k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +howett.net/plist v0.0.0-20201203080718-1454fab16a06 h1:QDxUo/w2COstK1wIBYpzQlHX/NqaQTcf9jyz347nI58= +howett.net/plist v0.0.0-20201203080718-1454fab16a06/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= +k8s.io/api v0.19.4 h1:I+1I4cgJYuCDgiLNjKx7SLmIbwgj9w7N7Zr5vSIdwpo= +k8s.io/api v0.19.4/go.mod h1:SbtJ2aHCItirzdJ36YslycFNzWADYH3tgOhvBEFtZAk= +k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= +k8s.io/apimachinery v0.19.4 h1:+ZoddM7nbzrDCp0T3SWnyxqf8cbWPT2fkZImoyvHUG0= +k8s.io/apimachinery v0.19.4/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/client-go v0.19.4 h1:85D3mDNoLF+xqpyE9Dh/OtrJDyJrSRKkHmDXIbEzer8= +k8s.io/client-go v0.19.4/go.mod h1:ZrEy7+wj9PjH5VMBCuu/BDlvtUAku0oVFk4MmnW9mWA= +k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= -k8s.io/kube-openapi v0.0.0-20190722073852-5e22f3d471e6/go.mod h1:RZvgC8MSN6DjiMV6oIfEE9pDL9CYXokkfaCKZeHm3nc= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/utils v0.0.0-20190809000727-6c36bc71fc4a/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f h1:Cq7MalBHYACRd6EesksG1Q8EoIAKOsiZviGKbOLIej4= -mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f/go.mod h1:4G1h5nDURzA3bwVMZIVpwbkw+04kSxk3rAtzlimaUJw= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e h1:4Z09Hglb792X0kfOBBJUPFEyvVfQWrYT/l8h5EKA6JQ= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4 h1:JPJh2pk3+X4lXAkZIk2RuE/7/FoK9maXw+TNPJhVS/c= -sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/idxmgmt/datastreams.go b/idxmgmt/datastreams.go new file mode 100644 index 00000000000..15210ccb21a --- /dev/null +++ b/idxmgmt/datastreams.go @@ -0,0 +1,71 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package idxmgmt + +import ( + "github.com/pkg/errors" + + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/common/fmtstr" + "github.com/elastic/beats/v7/libbeat/idxmgmt" + "github.com/elastic/beats/v7/libbeat/outputs" + "github.com/elastic/beats/v7/libbeat/outputs/outil" + + "github.com/elastic/apm-server/datastreams" +) + +type dataStreamsSupporter struct{} + +// BuildSelector returns an outputs.IndexSelector which routes events through +// to data streams based on well-defined data_stream.* fields in events. +func (dataStreamsSupporter) BuildSelector(*common.Config) (outputs.IndexSelector, error) { + fmtstr, err := fmtstr.CompileEvent(datastreams.IndexFormat) + if err != nil { + return nil, err + } + expr, err := outil.FmtSelectorExpr(fmtstr, "", outil.SelectorLowerCase) + if err != nil { + return nil, err + } + return outil.MakeSelector(expr), nil +} + +// Enabled always returns false, indicating that this idxmgmt.Supporter does +// not setting up templates or ILM policies. +func (dataStreamsSupporter) Enabled() bool { + return false +} + +// Manager returns a no-op idxmgmt.Manager. +func (dataStreamsSupporter) Manager(client idxmgmt.ClientHandler, assets idxmgmt.Asseter) idxmgmt.Manager { + return dataStreamsManager{} +} + +type dataStreamsManager struct{} + +// VerifySetup always returns true and an empty string, to avoid logging +// duplicate warnings. +func (dataStreamsManager) VerifySetup(template, ilm idxmgmt.LoadMode) (bool, string) { + // Just return true to avoid logging warnings. We'll error out in Setup. + return true, "" +} + +// Setup will always return an error, in response to manual setup (i.e. `apm-server setup`). +func (dataStreamsManager) Setup(template, ilm idxmgmt.LoadMode) error { + return errors.New("index setup must be performed externally when using data streams, by installing the 'apm' integration package") +} diff --git a/idxmgmt/feature.go b/idxmgmt/feature.go index 519bf2bd596..611d81cb3fb 100644 --- a/idxmgmt/feature.go +++ b/idxmgmt/feature.go @@ -22,14 +22,10 @@ import ( ) type feature struct { - enabled, overwrite, load, supported bool - - warn string - info string - err error + enabled, overwrite, load bool } -func newFeature(enabled, overwrite, load, supported bool, mode libidxmgmt.LoadMode) feature { +func newFeature(enabled, overwrite, load bool, mode libidxmgmt.LoadMode) feature { if mode == libidxmgmt.LoadModeUnset { mode = libidxmgmt.LoadModeDisabled } @@ -39,25 +35,10 @@ func newFeature(enabled, overwrite, load, supported bool, mode libidxmgmt.LoadMo if mode == libidxmgmt.LoadModeForce { load = true } - if !supported { - enabled = false - } load = load && mode.Enabled() return feature{ enabled: enabled, overwrite: overwrite, load: load, - supported: supported} -} - -func (f *feature) warning() string { - return f.warn -} - -func (f *feature) information() string { - return f.info -} - -func (f *feature) error() error { - return f.err + } } diff --git a/idxmgmt/ilm/config.go b/idxmgmt/ilm/config.go index 9a3b76bd68f..4f80c69d30c 100644 --- a/idxmgmt/ilm/config.go +++ b/idxmgmt/ilm/config.go @@ -19,13 +19,13 @@ package ilm import ( "fmt" + "strings" "time" - libcommon "github.com/elastic/beats/v7/libbeat/common" - "github.com/pkg/errors" "github.com/elastic/beats/v7/libbeat/beat" + libcommon "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/fmtstr" libilm "github.com/elastic/beats/v7/libbeat/idxmgmt/ilm" @@ -68,22 +68,27 @@ type Policy struct { Body map[string]interface{} `config:"policy"` } -// NewConfig extracts given configuration and merges with default configuration -// https://github.com/elastic/go-ucfg/issues/167 describes a bug in go-ucfg -// that panics when trying to unpack an empty configuration for an attribute -// of type map[string]interface{} into a variable with existing values for the map. -// This requires some workaround in merging configured policies with default policies -// TODO(simitt): when the bug is fixed -// - move the validation part into a `Validate` method -// - remove the extra handling for `defaultPolicies` and add to defaultConfig instead. +// NewConfig extracts given configuration and merges with default configuration. func NewConfig(info beat.Info, cfg *libcommon.Config) (Config, error) { - config := Config{Mode: libilm.ModeAuto, - Setup: Setup{Enabled: true, RequirePolicy: true, Mappings: defaultMappings()}} + config := Config{ + Mode: libilm.ModeAuto, + Setup: Setup{Enabled: true, RequirePolicy: true, Mappings: defaultMappings()}, + } if cfg != nil { if err := cfg.Unpack(&config); err != nil { return Config{}, err } } + if len(config.Setup.Policies) == 0 { + // https://github.com/elastic/go-ucfg/issues/167 describes a bug in go-ucfg + // that panics when trying to unpack an empty configuration for an attribute + // of type map[string]interface{} into a variable with existing values for the map. + // This requires some workaround in merging configured policies with default policies + // TODO(simitt): when the bug is fixed + // - move the validation part into a `Validate` method + // - remove the extra handling for `defaultPolicies` and add to defaultConfig instead. + config.Setup.Policies = defaultPolicies() + } // replace variable rollover_alias parts with beat information if available // otherwise fail as the full alias needs to be known during setup. for et, m := range config.Setup.Mappings { @@ -91,25 +96,28 @@ func NewConfig(info beat.Info, cfg *libcommon.Config) (Config, error) { if err != nil { return Config{}, errors.Wrap(err, "variable part of index suffix cannot be resolved") } - m.Index = idx + m.Index = strings.ToLower(idx) config.Setup.Mappings[et] = m - } - if len(config.Setup.Policies) == 0 { - config.Setup.Policies = defaultPolicies() + if config.Setup.RequirePolicy { + continue + } + if _, ok := config.Setup.Policies[m.PolicyName]; !ok { + // if require_policy=false and policy does not exist, add it with an empty body + config.Setup.Policies[m.PolicyName] = Policy{Name: m.PolicyName} + } } return config, validate(&config) } func (c *Config) SelectorConfig() (*libcommon.Config, error) { + indicesCfg, err := libcommon.NewConfigFrom(c.conditionalIndices()) + if err != nil { + return nil, err + } var idcsCfg = libcommon.NewConfig() - // set fallback index for ingested events with unknown event type idcsCfg.SetString("index", -1, common.FallbackIndex) - - if indicesCfg, err := libcommon.NewConfigFrom(c.conditionalIndices()); err == nil { - idcsCfg.SetChild("indices", -1, indicesCfg) - } + idcsCfg.SetChild("indices", -1, indicesCfg) return idcsCfg, nil - } func (m *Mappings) Unpack(cfg *libcommon.Config) error { @@ -165,8 +173,11 @@ func validate(c *Config) error { return nil } if _, ok := c.Setup.Policies[m.PolicyName]; !ok { - return errors.Errorf("policy '%s' not configured for ILM setup, "+ - "set `apm-server.ilm.require_policy: false` to disable verification", m.PolicyName) + return errors.Errorf(""+ + "policy '%s' not configured for ILM setup, "+ + "set `apm-server.ilm.require_policy: false` to disable verification", + m.PolicyName, + ) } } return nil diff --git a/idxmgmt/ilm/config_test.go b/idxmgmt/ilm/config_test.go index f848f83466a..5256666fd5c 100644 --- a/idxmgmt/ilm/config_test.go +++ b/idxmgmt/ilm/config_test.go @@ -20,6 +20,7 @@ package ilm import ( "fmt" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -111,6 +112,7 @@ func TestConfig_RequirePolicy(t *testing.T) { } func TestConfig_Valid(t *testing.T) { + now := time.Now() for _, tc := range []struct { name string cfg string @@ -118,18 +120,18 @@ func TestConfig_Valid(t *testing.T) { expected Config }{ {name: "new policy and index suffix", - cfg: `{"setup":{"mapping":[{"event_type":"span","policy_name":"spanPolicy"},{"event_type":"metric","index_suffix":"production"},{"event_type":"error","index_suffix":"%{[observer.name]}"}],"policies":[{"name":"spanPolicy","policy":{"phases":{"foo":{}}}}]}}`, + cfg: `{"setup":{"mapping":[{"event_type":"span","policy_name":"spanPolicy"},{"event_type":"metric","index_suffix":"ProdUCtion"},{"event_type":"error","index_suffix":"%{[observer.name]}-%{+yyyy-MM-dd}"}],"policies":[{"name":"spanPolicy","policy":{"phases":{"foo":{}}}}]}}`, expected: Config{Mode: libilm.ModeAuto, Setup: Setup{Enabled: true, Overwrite: false, RequirePolicy: true, Mappings: map[string]Mapping{ "error": {EventType: "error", PolicyName: defaultPolicyName, - Index: "apm-9.9.9-error-mockapm", IndexSuffix: "%{[observer.name]}"}, + Index: fmt.Sprintf("apm-9.9.9-error-mockapm-%d-%02d-%02d", now.Year(), now.Month(), now.Day()), IndexSuffix: "%{[observer.name]}-%{+yyyy-MM-dd}"}, "span": {EventType: "span", PolicyName: "spanPolicy", Index: "apm-9.9.9-span"}, "transaction": {EventType: "transaction", PolicyName: defaultPolicyName, Index: "apm-9.9.9-transaction"}, "metric": {EventType: "metric", PolicyName: defaultPolicyName, - Index: "apm-9.9.9-metric-production", IndexSuffix: "production"}, + Index: "apm-9.9.9-metric-production", IndexSuffix: "ProdUCtion"}, "profile": {EventType: "profile", PolicyName: defaultPolicyName, Index: "apm-9.9.9-profile"}, }, @@ -163,7 +165,11 @@ func TestConfig_Valid(t *testing.T) { Index: "apm-9.9.9-error"} return m }(), - Policies: defaultPolicies(), + Policies: func() map[string]Policy { + p := defaultPolicies() + p["errorPolicy"] = Policy{Name: "errorPolicy"} + return p + }(), }}, }, } { diff --git a/idxmgmt/ilm/supporter_factory.go b/idxmgmt/ilm/supporter_factory.go index 7ac22282e42..29102f9a13b 100644 --- a/idxmgmt/ilm/supporter_factory.go +++ b/idxmgmt/ilm/supporter_factory.go @@ -27,11 +27,7 @@ import ( const pattern = "000001" // MakeDefaultSupporter creates the ILM supporter for APM that is passed to libbeat. -func MakeDefaultSupporter( - log *logp.Logger, - mode libilm.Mode, - ilmConfig Config) ([]libilm.Supporter, error) { - +func MakeDefaultSupporter(log *logp.Logger, ilmConfig Config) []libilm.Supporter { if log == nil { log = logp.NewLogger(logs.Ilm) } else { @@ -39,12 +35,17 @@ func MakeDefaultSupporter( } var supporters []libilm.Supporter - for _, m := range ilmConfig.Setup.Mappings { policy := ilmConfig.Setup.Policies[m.PolicyName] - supporter := libilm.NewStdSupport(log, mode, libilm.Alias{Name: m.Index, Pattern: pattern}, - libilm.Policy{Name: policy.Name, Body: policy.Body}, ilmConfig.Setup.Overwrite, true) + supporter := libilm.NewStdSupport( + log, + ilmConfig.Mode, + libilm.Alias{Name: m.Index, Pattern: pattern}, + libilm.Policy{Name: policy.Name, Body: policy.Body}, + ilmConfig.Setup.Overwrite, + true, // check exists + ) supporters = append(supporters, supporter) } - return supporters, nil + return supporters } diff --git a/idxmgmt/ilm/supporter_factory_test.go b/idxmgmt/ilm/supporter_factory_test.go index f842c536873..de7efc1e784 100644 --- a/idxmgmt/ilm/supporter_factory_test.go +++ b/idxmgmt/ilm/supporter_factory_test.go @@ -18,28 +18,34 @@ package ilm import ( + "strings" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/libbeat/beat" + libcommon "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/apm-server/idxmgmt/common" ) func TestMakeDefaultSupporter(t *testing.T) { info := beat.Info{Beat: "mockapm", Version: "9.9.9"} - cfg, err := NewConfig(info, nil) + input := `{"setup":{"require_policy":false,"mapping":[{"event_type":"span","policy_name":"rollover-10d"}]}}` + cfg, err := NewConfig(info, libcommon.MustNewConfigFrom(input)) require.NoError(t, err) - s, err := MakeDefaultSupporter(nil, 0, cfg) - require.NoError(t, err) + s := MakeDefaultSupporter(nil, cfg) assert.Equal(t, 5, len(s)) var aliases []string for _, sup := range s { aliases = append(aliases, sup.Alias().Name) - assert.Equal(t, defaultPolicyName, sup.Policy().Name) + expectedPolicyName := defaultPolicyName + if strings.Contains(sup.Alias().Name, "span") { + expectedPolicyName = "rollover-10d" + } + assert.Equal(t, expectedPolicyName, sup.Policy().Name) } var defaultAliases []string for _, et := range common.EventTypes { diff --git a/idxmgmt/manager.go b/idxmgmt/manager.go index de235d530da..1265f5ab1cf 100644 --- a/idxmgmt/manager.go +++ b/idxmgmt/manager.go @@ -19,6 +19,7 @@ package idxmgmt import ( "fmt" + "strings" "github.com/pkg/errors" @@ -27,17 +28,21 @@ import ( "github.com/elastic/apm-server/idxmgmt/common" "github.com/elastic/apm-server/idxmgmt/ilm" - "github.com/elastic/apm-server/utility" ) const ( - msgErrIlmDisabledES = "automatically disabled ILM as not supported by configured Elasticsearch" + // SetupDeprecatedWarning holds the warning message to display to users + // when setting up index management and/or pipelines. + SetupDeprecatedWarning = `WARNING: setting up Elasticsearch directly with apm-server is deprecated, and will be removed in 8.0. +New installations are encouraged to use the Elastic Agent integration. For more details, refer to +https://www.elastic.co/guide/en/apm/server/current/breaking-changes.html#_7_16` + msgIlmDisabledES = "Automatically disabled ILM as configured Elasticsearch not eligible for auto enabling." msgIlmDisabledCfg = "Automatically disabled ILM as custom index settings configured." msgIdxCfgIgnored = "Custom index configuration ignored when ILM is enabled." - msgIlmSetupDisabled = "Manage ILM setup is disabled. " - msgIlmSetupOverwriteDisabled = "Overwrite ILM setup is disabled. " - msgTemplateSetupDisabled = "Template loading is disabled. " + msgIlmSetupDisabled = "Manage ILM setup is disabled." + msgIlmSetupOverwriteDisabled = "Overwrite ILM setup is disabled." + msgTemplateSetupDisabled = "Template loading is disabled." ) type manager struct { @@ -46,24 +51,25 @@ type manager struct { assets libidxmgmt.Asseter } +// VerifySetup provides an opportunity to print a warning message to the console, +// for users who are running apm-server interactively. func (m *manager) VerifySetup(loadTemplate, loadILM libidxmgmt.LoadMode) (bool, string) { + warnings := []string{"\n" + SetupDeprecatedWarning + "\n"} templateFeature := m.templateFeature(loadTemplate) - ilmFeature := m.ilmFeature(loadILM) - - if err := ilmFeature.error(); err != nil { - return false, err.Error() - } - - var warn string - if !templateFeature.load { - warn += msgTemplateSetupDisabled - } - if ilmWarn := ilmFeature.warning(); ilmWarn != "" { - warn += ilmWarn + if _, _, ilmWarn, _, err := m.ilmFeature(loadILM); err != nil { + warnings = append(warnings, err.Error()) + } else { + if !templateFeature.load { + warnings = append(warnings, msgTemplateSetupDisabled) + } + if ilmWarn != "" { + warnings = append(warnings, ilmWarn) + } } - return warn == "", warn + return false, strings.Join(warnings, " ") } +// Setup is called for new Elasticsearch connections to ensure indices and templates are setup. func (m *manager) Setup(loadTemplate, loadILM libidxmgmt.LoadMode) error { log := m.supporter.log @@ -79,23 +85,22 @@ func (m *manager) Setup(loadTemplate, loadILM libidxmgmt.LoadMode) error { // as this step also automatically creates an index, it is important the matching templates are already there //(0) prepare template and ilm handlers, check if ILM is supported, fall back to ordinary index handling otherwise - - ilmFeature := m.ilmFeature(loadILM) - if info := ilmFeature.information(); info != "" { + ilmFeature, ilmSupporters, warn, info, err := m.ilmFeature(loadILM) + if err != nil { + return err + } + if info != "" { log.Info(info) } - if warn := ilmFeature.warning(); warn != "" { + if warn != "" { log.Warn(warn) } - if err := ilmFeature.error(); err != nil { - log.Error(err) - } + m.supporter.ilmEnabled.Store(ilmFeature.enabled) + //(1) load general apm template templateFeature := m.templateFeature(loadTemplate) m.supporter.templateConfig.Enabled = templateFeature.enabled m.supporter.templateConfig.Overwrite = templateFeature.overwrite - - //(1) load general apm template if err := m.loadTemplate(templateFeature, ilmFeature); err != nil { return err } @@ -103,23 +108,21 @@ func (m *manager) Setup(loadTemplate, loadILM libidxmgmt.LoadMode) error { if !ilmFeature.load { return nil } - - var policiesLoaded []string - var err error - for _, ilmSupporter := range m.supporter.ilmSupporters { + policiesLoaded := make(map[string]bool) + for _, ilmSupporter := range ilmSupporters { //(2) load event type policies, respecting ILM settings - if policiesLoaded, err = m.loadPolicy(ilmFeature, ilmSupporter, policiesLoaded); err != nil { + if err := m.loadPolicy(ilmFeature, ilmSupporter, policiesLoaded); err != nil { return err } // (3) load event type specific template respecting index lifecycle information - if err = m.loadEventTemplate(ilmFeature, ilmSupporter); err != nil { + if err := m.loadEventTemplate(ilmFeature, ilmSupporter); err != nil { return err } //(4) load ilm write aliases // ensure write aliases are created AFTER template creation - if err = m.loadAlias(ilmFeature, ilmSupporter); err != nil { + if err := m.loadAlias(ilmFeature, ilmSupporter); err != nil { return err } } @@ -129,67 +132,60 @@ func (m *manager) Setup(loadTemplate, loadILM libidxmgmt.LoadMode) error { } func (m *manager) templateFeature(loadMode libidxmgmt.LoadMode) feature { - return newFeature(m.supporter.templateConfig.Enabled, m.supporter.templateConfig.Overwrite, - m.supporter.templateConfig.Enabled, true, loadMode) + return newFeature( + m.supporter.templateConfig.Enabled, + m.supporter.templateConfig.Overwrite, + m.supporter.templateConfig.Enabled, + loadMode, + ) } -func (m *manager) ilmFeature(loadMode libidxmgmt.LoadMode) feature { - // Do not use configured `m.supporter.ilmConfig.Mode` to check if ilm is enabled. - // The configuration might be set to `true` or `auto` but preconditions are not met, - // e.g. ilm support by Elasticsearch - // In these cases the supporter holds an internal state `m.supporter.st.ilmEnabled` that is set to false. - // The originally configured value is preserved allowing to collect warnings and errors to be - // returned to the user. - - warning := func(f feature) string { - if !f.load { - return msgIlmSetupDisabled - } - return "" - } - information := func(f feature) string { - if !f.overwrite { - return msgIlmSetupOverwriteDisabled +func (m *manager) ilmFeature(loadMode libidxmgmt.LoadMode) (_ feature, _ []libilm.Supporter, warn, info string, _ error) { + ilmEnabled := false + ilmSupporters := ilm.MakeDefaultSupporter(m.supporter.log, m.supporter.ilmConfig) + if m.supporter.ilmConfig.Mode != libilm.ModeDisabled { + checkSupported := true + if m.supporter.outputConfig.Name() != esKey { + // Output is not Elasticsearch: ILM is disabled. + warn += msgIlmDisabledES + checkSupported = false + } else if m.supporter.unmanagedIdxConfig.Customized() { + // Indices have been customised: "auto" becomes "disabled". + switch m.supporter.ilmConfig.Mode { + case libilm.ModeAuto: + warn += msgIlmDisabledCfg + checkSupported = false + case libilm.ModeEnabled: + warn += msgIdxCfgIgnored + } } - return "" - } - // m.supporter.st.ilmEnabled.Load() only returns true for cases where - // ilm mode is configured `auto` or `true` and preconditions to enable ilm are true - if enabled := m.supporter.st.ilmEnabled.Load(); enabled { - f := newFeature(enabled, m.supporter.ilmConfig.Setup.Overwrite, - m.supporter.ilmConfig.Setup.Enabled, true, loadMode) - f.warn = warning(f) - if m.supporter.unmanagedIdxConfig.Customized() { - f.warn += msgIdxCfgIgnored + if checkSupported && len(ilmSupporters) > 0 { + // Check if ILM is supported by Elasticsearch. + supporter := ilmSupporters[0] + enabled, err := supporter.Manager(m.clientHandler).CheckEnabled() + if err != nil { + return feature{}, nil, "", "", err + } + ilmEnabled = enabled + if !ilmEnabled { + warn += msgIlmDisabledES + } } - f.info = information(f) - return f } - var ( - err error - supported = true + f := newFeature( + ilmEnabled, + m.supporter.ilmConfig.Setup.Overwrite, + m.supporter.ilmConfig.Setup.Enabled, + loadMode, ) - // collect warnings when ilm is configured `auto` but it cannot be enabled - // collect error when ilm is configured `true` but it cannot be enabled as preconditions are not met - var warn string - if m.supporter.ilmConfig.Mode == libilm.ModeAuto { - if m.supporter.unmanagedIdxConfig.Customized() { - warn = msgIlmDisabledCfg - } else { - warn = msgIlmDisabledES - supported = false - } - } else if m.supporter.ilmConfig.Mode == libilm.ModeEnabled { - err = errors.New(msgErrIlmDisabledES) - supported = false + if !f.load { + warn += msgIlmSetupDisabled } - f := newFeature(false, m.supporter.ilmConfig.Setup.Overwrite, m.supporter.ilmConfig.Setup.Enabled, supported, loadMode) - f.warn = warning(f) - f.warn += warn - f.info = information(f) - f.err = err - return f + if !f.overwrite { + info = msgIlmSetupOverwriteDisabled + } + return f, ilmSupporters, warn, info, nil } func (m *manager) loadTemplate(templateFeature, ilmFeature feature) error { @@ -205,8 +201,12 @@ func (m *manager) loadTemplate(templateFeature, ilmFeature feature) error { m.supporter.templateConfig.Pattern = m.supporter.templateConfig.Name + "*" m.supporter.log.Infof("Set setup.template.pattern to '%s'.", m.supporter.templateConfig.Pattern) } - if err := m.clientHandler.Load(m.supporter.templateConfig, m.supporter.info, - m.assets.Fields(m.supporter.info.Beat), m.supporter.migration); err != nil { + if err := m.clientHandler.Load( + m.supporter.templateConfig, + m.supporter.info, + m.assets.Fields(m.supporter.info.Beat), + false, // migration + ); err != nil { return fmt.Errorf("error loading Elasticsearch template: %+v", err) } m.supporter.log.Infof("Finished loading index template.") @@ -218,29 +218,38 @@ func (m *manager) loadEventTemplate(feature feature, ilmSupporter libilm.Support ilmSupporter.Alias().Name, ilmSupporter.Policy().Name) - if err := m.clientHandler.Load(templateCfg, m.supporter.info, nil, m.supporter.migration); err != nil { + if err := m.clientHandler.Load( + templateCfg, + m.supporter.info, + nil, // fields + false, // migration + ); err != nil { return errors.Wrapf(err, "error loading template %+v", templateCfg.Name) } m.supporter.log.Infof("Finished template setup for %s.", templateCfg.Name) return nil } -func (m *manager) loadPolicy(ilmFeature feature, ilmSupporter libilm.Supporter, policiesLoaded []string) ([]string, error) { +func (m *manager) loadPolicy(ilmFeature feature, ilmSupporter libilm.Supporter, policiesLoaded map[string]bool) error { + if !ilmFeature.enabled { + return nil + } policy := ilmSupporter.Policy().Name - if !ilmFeature.enabled || utility.Contains(policy, policiesLoaded) { - return policiesLoaded, nil + if policiesLoaded[policy] { + return nil } if ilmSupporter.Policy().Body == nil { m.supporter.log.Infof("ILM policy %s not loaded.", policy) - return policiesLoaded, nil + return nil } - _, err := ilmSupporter.Manager(m.clientHandler).EnsurePolicy(ilmFeature.overwrite) - if err != nil { - return policiesLoaded, err + if _, err := ilmSupporter.Manager(m.clientHandler).EnsurePolicy(ilmFeature.overwrite); err != nil { + return err } m.supporter.log.Infof("ILM policy %s successfully loaded.", policy) - return append(policiesLoaded, policy), nil + policiesLoaded[policy] = true + return nil } + func (m *manager) loadAlias(ilmFeature feature, ilmSupporter libilm.Supporter) error { if !ilmFeature.enabled { return nil diff --git a/idxmgmt/manager_test.go b/idxmgmt/manager_test.go index 99f090ed62b..989156f0f53 100644 --- a/idxmgmt/manager_test.go +++ b/idxmgmt/manager_test.go @@ -43,7 +43,6 @@ func TestManager_VerifySetup(t *testing.T) { version string esCfg common.MapStr - ok bool warn string }{ "SetupTemplateDisabled": { @@ -67,7 +66,6 @@ func TestManager_VerifySetup(t *testing.T) { "ILMEnabledButUnsupported": { version: "6.2.0", ilmEnabled: "true", loadILM: libidxmgmt.LoadModeEnabled, - warn: msgErrIlmDisabledES, }, "ILMAutoButUnsupported": { version: "6.2.0", @@ -97,12 +95,11 @@ func TestManager_VerifySetup(t *testing.T) { esCfg: common.MapStr{ "output.elasticsearch.enabled": false, "output.logstash.enabled": true}, - warn: "automatically disabled ILM", + warn: "Automatically disabled ILM", }, "EverythingEnabled": { templateEnabled: true, loadTemplate: libidxmgmt.LoadModeEnabled, ilmSetupEnabled: true, ilmSetupOverwrite: true, loadILM: libidxmgmt.LoadModeEnabled, - ok: true, }, } { t.Run(name, func(t *testing.T) { @@ -124,7 +121,8 @@ func TestManager_VerifySetup(t *testing.T) { } manager := support.Manager(newMockClientHandler(version), nil) ok, warn := manager.VerifySetup(tc.loadTemplate, tc.loadILM) - require.Equal(t, tc.ok, ok, warn) + require.False(t, ok, warn) + assert.Contains(t, warn, SetupDeprecatedWarning) assert.Contains(t, warn, tc.warn) }) } @@ -200,6 +198,7 @@ func TestManager_SetupTemplate(t *testing.T) { } } } + func TestManager_SetupILM(t *testing.T) { fields := []byte("apm-server fields") @@ -207,6 +206,7 @@ func TestManager_SetupILM(t *testing.T) { cfg common.MapStr loadMode libidxmgmt.LoadMode + err string templatesILMEnabled, templatesILMDisabled int policiesLoaded, aliasesLoaded int version string @@ -253,7 +253,7 @@ func TestManager_SetupILM(t *testing.T) { "apm-server.ilm.setup.policies": []common.MapStr{policyRollover1Day}, }, loadMode: libidxmgmt.LoadModeEnabled, - templatesILMEnabled: 5, policiesLoaded: 2, aliasesLoaded: 4, + templatesILMEnabled: 5, policiesLoaded: 2, aliasesLoaded: 5, }, "LoadModeOverwrite": { loadMode: libidxmgmt.LoadModeOverwrite, @@ -323,6 +323,7 @@ func TestManager_SetupILM(t *testing.T) { loadMode: libidxmgmt.LoadModeEnabled, version: "6.2.0", templatesILMDisabled: 4, + err: "ILM not supported", }, "Default ES Unsupported ILM setup disabled": { cfg: common.MapStr{"apm-server.ilm.setup.enabled": false}, @@ -333,6 +334,7 @@ func TestManager_SetupILM(t *testing.T) { cfg: common.MapStr{"apm-server.ilm.setup.enabled": false, "apm-server.ilm.enabled": true}, loadMode: libidxmgmt.LoadModeEnabled, version: "6.2.0", + err: "ILM not supported", }, } var testCasesILMNotSupportedByIndexSettings = map[string]testCase{ @@ -415,11 +417,16 @@ func TestManager_SetupILM(t *testing.T) { clientHandler := newMockClientHandler(version) m := defaultSupporter(t, tc.cfg).Manager(clientHandler, libidxmgmt.BeatsAssets(fields)) indexManager := m.(*manager) - require.NoError(t, indexManager.Setup(libidxmgmt.LoadModeDisabled, tc.loadMode)) - assert.Equal(t, tc.policiesLoaded, len(clientHandler.policies), "policies") - assert.Equal(t, tc.aliasesLoaded, len(clientHandler.aliases), "aliases") - require.Equal(t, tc.templatesILMEnabled, clientHandler.templatesILMEnabled, "ILM enabled templates") - require.Equal(t, tc.templatesILMDisabled, clientHandler.templates, "ILM disabled templates") + err := indexManager.Setup(libidxmgmt.LoadModeDisabled, tc.loadMode) + if tc.err != "" { + require.EqualError(t, err, tc.err) + } else { + require.NoError(t, err) + assert.Len(t, clientHandler.policies, tc.policiesLoaded) + assert.Len(t, clientHandler.aliases, tc.aliasesLoaded) + require.Equal(t, tc.templatesILMEnabled, clientHandler.templatesILMEnabled, "ILM enabled templates") + require.Equal(t, tc.templatesILMDisabled, clientHandler.templates, "ILM disabled templates") + } }) } } @@ -495,6 +502,12 @@ func (h *mockClientHandler) HasAlias(name string) (bool, error) { func (h *mockClientHandler) CreateAlias(alias libilm.Alias) error { h.aliases = append(h.aliases, alias.Name) + if alias.Name == existingILMAlias { + return reasonedError{ + error: errors.New("CreateAlias failed"), + reason: libilm.ErrAliasAlreadyExists, + } + } return nil } @@ -506,3 +519,12 @@ func (h *mockClientHandler) CreateILMPolicy(policy libilm.Policy) error { h.policies = append(h.policies, policy.Name) return nil } + +type reasonedError struct { + error + reason error +} + +func (e reasonedError) Reason() error { + return e.reason +} diff --git a/idxmgmt/supporter.go b/idxmgmt/supporter.go index e4d3f32c596..abbd22fa647 100644 --- a/idxmgmt/supporter.go +++ b/idxmgmt/supporter.go @@ -19,8 +19,8 @@ package idxmgmt import ( "fmt" + "strings" - "github.com/pkg/errors" "go.uber.org/atomic" "github.com/elastic/beats/v7/libbeat/beat" @@ -50,72 +50,29 @@ type supporter struct { info beat.Info templateConfig template.TemplateConfig ilmConfig ilm.Config - unmanagedIdxConfig *unmanaged.Config - migration bool - ilmSupporters []libilm.Supporter - - st indexState -} - -type indexState struct { - ilmEnabled atomic.Bool - isSet atomic.Bool + outputConfig common.ConfigNamespace + unmanagedIdxConfig unmanaged.Config + ilmEnabled atomic.Bool } -type unmanagedIndexSelector outil.Selector +type indexSelector outil.Selector -type ilmIndexSelector struct { - unmanagedSel unmanagedIndexSelector - ilmSel outil.Selector - st *indexState +// autoSelector is an outputs.IndexSelector that delegates to either an +// unmanaged or ILM index selector depending on whether ILM is enabled. +type autoSelector struct { + ilmEnabled *atomic.Bool + unmanaged outil.Selector + ilm outil.Selector } -func newSupporter( - log *logp.Logger, - info beat.Info, - templateConfig template.TemplateConfig, - ilmConfig ilm.Config, - outConfig common.ConfigNamespace, -) (*supporter, error) { - - var ( - unmanagedIdxCfg unmanaged.Config - mode = ilmConfig.Mode - st = indexState{} - ) - - if outConfig.Name() == esKey { - if err := outConfig.Config().Unpack(&unmanagedIdxCfg); err != nil { - return nil, fmt.Errorf("unpacking output elasticsearch index config fails: %+v", err) - } - - if err := checkTemplateESSettings(templateConfig, &unmanagedIdxCfg); err != nil { - return nil, err - } - } - - if outConfig.Name() != esKey || - ilmConfig.Mode == libilm.ModeDisabled || - ilmConfig.Mode == libilm.ModeAuto && unmanagedIdxCfg.Customized() { - - mode = libilm.ModeDisabled - st.isSet.CAS(false, true) - } - - ilmSupporters, err := ilm.MakeDefaultSupporter(log, mode, ilmConfig) - if err != nil { - return nil, err - } - +func newSupporter(log *logp.Logger, info beat.Info, cfg *IndexManagementConfig) (*supporter, error) { return &supporter{ log: log, info: info, - templateConfig: templateConfig, - ilmConfig: ilmConfig, - unmanagedIdxConfig: &unmanagedIdxCfg, - migration: false, - st: st, - ilmSupporters: ilmSupporters, + templateConfig: cfg.Template, + ilmConfig: cfg.ILM, + outputConfig: cfg.Output, + unmanagedIdxConfig: cfg.unmanagedIdxCfg, }, nil } @@ -128,11 +85,7 @@ func (s *supporter) Enabled() bool { // Manager instance takes only care of the setup. // A clientHandler is passed in, which is required for figuring out the ILM state if set to `auto`. -func (s *supporter) Manager( - clientHandler libidxmgmt.ClientHandler, - assets libidxmgmt.Asseter, -) libidxmgmt.Manager { - s.setIlmState(clientHandler) +func (s *supporter) Manager(clientHandler libidxmgmt.ClientHandler, assets libidxmgmt.Asseter) libidxmgmt.Manager { return &manager{ supporter: s, clientHandler: clientHandler, @@ -144,26 +97,22 @@ func (s *supporter) Manager( // depending on the supporter's config an ILM instance or an unmanaged index selector instance is returned. // The ILM instance decides on every Select call whether or not to return ILM indices or regular ones. func (s *supporter) BuildSelector(_ *common.Config) (outputs.IndexSelector, error) { - sel, err := s.buildSelector(s.unmanagedIdxConfig.SelectorConfig()) + unmanagedSelector, err := s.buildSelector(s.unmanagedIdxConfig.SelectorConfig()) if err != nil { return nil, err } - unmanagedSel := unmanagedIndexSelector(sel) - - if s.st.isSet.Load() && !s.st.ilmEnabled.Load() { - return unmanagedSel, nil - } - - ilmSel, err := s.buildSelector(s.ilmConfig.SelectorConfig()) + ilmSelector, err := s.buildSelector(s.ilmConfig.SelectorConfig()) if err != nil { return nil, err } - return &ilmIndexSelector{ - unmanagedSel: unmanagedSel, - ilmSel: ilmSel, - st: &s.st, - }, nil + switch s.ilmConfig.Mode { + case libilm.ModeDisabled: + return indexSelector(unmanagedSelector), nil + case libilm.ModeEnabled: + return indexSelector(ilmSelector), nil + } + return &autoSelector{ilmEnabled: &s.ilmEnabled, unmanaged: unmanagedSelector, ilm: ilmSelector}, nil } func (s *supporter) buildSelector(cfg *common.Config, err error) (outil.Selector, error) { @@ -176,51 +125,25 @@ func (s *supporter) buildSelector(cfg *common.Config, err error) (outil.Selector MultiKey: "indices", EnableSingleOnly: true, FailEmpty: true, + Case: outil.SelectorLowerCase, } return outil.BuildSelectorFromConfig(cfg, buildSettings) } -func (s *supporter) setIlmState(handler libidxmgmt.ClientHandler) { - stSet := func() { s.st.isSet.CAS(false, true) } - - if s.st.isSet.Load() { - return - } - if s.st.ilmEnabled.Load() { - stSet() - return - } - - for _, ilmSupporter := range s.ilmSupporters { - if enabled, err := ilmSupporter.Manager(handler).CheckEnabled(); !enabled || err != nil { - stSet() - return - } - } - - s.st.ilmEnabled.CAS(false, true) - stSet() -} - -// Select either returns the index from the event's metadata or -// decides based on the supporter's ILM state whether or not an ILM index is returned -func (s *ilmIndexSelector) Select(evt *beat.Event) (string, error) { +// Select returns the index from the event's metadata if specified, +// otherwise delegating to the ILM or unmanaged indices selector. +func (s *autoSelector) Select(evt *beat.Event) (string, error) { if idx := getEventCustomIndex(evt); idx != "" { return idx, nil } - if !s.st.isSet.Load() { - return "", errors.New("setup not finished") + if s.ilmEnabled.Load() { + return s.ilm.Select(evt) } - - if s.st.ilmEnabled.Load() { - return s.ilmSel.Select(evt) - } - return s.unmanagedSel.Select(evt) + return s.unmanaged.Select(evt) } -// Select either returns the index from the event's metadata or -// the regular index. -func (s unmanagedIndexSelector) Select(evt *beat.Event) (string, error) { +// Select either returns the index from the event's metadata or the regular index. +func (s indexSelector) Select(evt *beat.Event) (string, error) { if idx := getEventCustomIndex(evt); idx != "" { return idx, nil } @@ -236,7 +159,7 @@ func getEventCustomIndex(evt *beat.Event) string { // returns index from alias if tmp := evt.Meta["alias"]; tmp != nil { if alias, ok := tmp.(string); ok { - return alias + return strings.ToLower(alias) } } @@ -245,20 +168,9 @@ func getEventCustomIndex(evt *beat.Event) string { if idx, ok := tmp.(string); ok { ts := evt.Timestamp.UTC() return fmt.Sprintf("%s-%d.%02d.%02d", - idx, ts.Year(), ts.Month(), ts.Day()) + strings.ToLower(idx), ts.Year(), ts.Month(), ts.Day()) } } return "" } - -func checkTemplateESSettings(tmplCfg template.TemplateConfig, indexCfg *unmanaged.Config) error { - if !tmplCfg.Enabled || indexCfg == nil { - return nil - } - - if indexCfg.Index != "" && (tmplCfg.Name == "" || tmplCfg.Pattern == "") { - return errors.New("`setup.template.name` and `setup.template.pattern` have to be set if `output.elasticsearch` index name is modified") - } - return nil -} diff --git a/idxmgmt/supporter_factory.go b/idxmgmt/supporter_factory.go index bb22010140d..138f8888cd6 100644 --- a/idxmgmt/supporter_factory.go +++ b/idxmgmt/supporter_factory.go @@ -18,7 +18,7 @@ package idxmgmt import ( - "fmt" + "github.com/pkg/errors" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" @@ -27,66 +27,160 @@ import ( "github.com/elastic/beats/v7/libbeat/template" "github.com/elastic/apm-server/idxmgmt/ilm" + "github.com/elastic/apm-server/idxmgmt/unmanaged" logs "github.com/elastic/apm-server/log" ) -// functionality largely copied from libbeat - type IndexManagementConfig struct { - Template template.TemplateConfig - ILM ilm.Config - Output common.ConfigNamespace + DataStreams bool + Template template.TemplateConfig + ILM ilm.Config + Output common.ConfigNamespace + + unmanagedIdxCfg unmanaged.Config + registerIngestPipelineSpecified bool + setupTemplateSpecified bool + ilmSpecified bool } -// MakeDefaultSupporter creates the index management supporter for APM that is passed to libbeat. +// MakeDefaultSupporter creates a new idxmgmt.Supporter, using the given root config. +// +// The Supporter will operate in one of three modes: data streams, legacy +// managed, and legacy unmanaged. The legacy modes exist purely to run +// apm-server without data streams or Fleet integration. +// +// If (Fleet) management is enabled, then any index, template, and ILM config +// defined will be ignored and warnings logged. Index (data stream) names will +// be well defined, based on the data type, service name, and user-defined +// namespace. +// +// If management is disabled, then the Supporter will operate in one of the +// legacy modes based on configuration. func MakeDefaultSupporter(log *logp.Logger, info beat.Info, configRoot *common.Config) (idxmgmt.Supporter, error) { cfg, err := NewIndexManagementConfig(info, configRoot) if err != nil { return nil, err } + log = namedLogger(log) + cfg.logWarnings(log) + if cfg.DataStreams { + return dataStreamsSupporter{}, nil + } + return newSupporter(log, info, cfg) +} +func namedLogger(log *logp.Logger) *logp.Logger { if log == nil { - log = logp.NewLogger(logs.IndexManagement) - } else { - log = log.Named(logs.IndexManagement) + return logp.NewLogger(logs.IndexManagement) } - return newSupporter(log, info, cfg.Template, cfg.ILM, cfg.Output) + return log.Named(logs.IndexManagement) } +// NewIndexManagementConfig extracts and validates index management config from info and configRoot. func NewIndexManagementConfig(info beat.Info, configRoot *common.Config) (*IndexManagementConfig, error) { - cfg := struct { - ILM *common.Config `config:"apm-server.ilm"` - Template *common.Config `config:"setup.template"` - Output common.ConfigNamespace `config:"output"` - }{} + var cfg struct { + DataStreams *common.Config `config:"apm-server.data_streams"` + RegisterIngestPipeline *common.Config `config:"apm-server.register.ingest.pipeline"` + ILM *common.Config `config:"apm-server.ilm"` + Template *common.Config `config:"setup.template"` + Output common.ConfigNamespace `config:"output"` + } + + var setupTemplateSpecified bool if configRoot != nil { - if err := configRoot.Unpack(&cfg); err != nil { + ok, err := configRoot.Has("setup.template", -1) + if err != nil { return nil, err } + setupTemplateSpecified = ok } - tmplConfig, err := unpackTemplateConfig(cfg.Template) + configRoot, err := mergeDefaultConfig(configRoot) if err != nil { - return nil, fmt.Errorf("unpacking template config fails: %+v", err) + return nil, errors.Wrap(err, "merging config defaults failed") + } + if err := configRoot.Unpack(&cfg); err != nil { + return nil, err + } + + templateConfig := template.DefaultConfig() + if err := cfg.Template.Unpack(&templateConfig); err != nil { + return nil, errors.Wrap(err, "unpacking template config failed") } ilmConfig, err := ilm.NewConfig(info, cfg.ILM) if err != nil { - return nil, fmt.Errorf("creating ILM config fails: %v", err) + return nil, errors.Wrap(err, "creating ILM config fails") + } + + var unmanagedIdxCfg unmanaged.Config + if cfg.Output.Name() == esKey { + if err := cfg.Output.Config().Unpack(&unmanagedIdxCfg); err != nil { + return nil, errors.Wrap(err, "failed to unpack output.elasticsearch config") + } + if err := checkTemplateESSettings(templateConfig, &unmanagedIdxCfg); err != nil { + return nil, err + } } return &IndexManagementConfig{ - Template: tmplConfig, - ILM: ilmConfig, - Output: cfg.Output, + DataStreams: cfg.DataStreams.Enabled(), + Output: cfg.Output, + Template: templateConfig, + ILM: ilmConfig, + + unmanagedIdxCfg: unmanagedIdxCfg, + registerIngestPipelineSpecified: cfg.RegisterIngestPipeline != nil, + setupTemplateSpecified: setupTemplateSpecified, + ilmSpecified: cfg.ILM != nil, }, nil } -func unpackTemplateConfig(cfg *common.Config) (template.TemplateConfig, error) { - config := template.DefaultConfig() - if cfg == nil { - return config, nil +func checkTemplateESSettings(tmplCfg template.TemplateConfig, indexCfg *unmanaged.Config) error { + if !tmplCfg.Enabled || indexCfg == nil { + return nil + } + if indexCfg.Index != "" && (tmplCfg.Name == "" || tmplCfg.Pattern == "") { + return errors.New("`setup.template.name` and `setup.template.pattern` have to be set if `output.elasticsearch` index name is modified") + } + return nil +} + +func (cfg *IndexManagementConfig) logWarnings(log *logp.Logger) { + format := "deprecated config `%s` specified. This config will be removed in 8.0." + if cfg.DataStreams { + format = "`%s` specified, but will be ignored as data streams are enabled" + } + if cfg.setupTemplateSpecified { + log.Warnf(format, "setup.template") + } + if cfg.ilmSpecified { + log.Warnf(format, "apm-server.ilm") + } + if cfg.registerIngestPipelineSpecified { + log.Warnf(format, "apm-server.register.ingest.pipeline") + } + if cfg.unmanagedIdxCfg.Customized() { + log.Warnf(format, "output.elasticsearch.{index,indices}") + } +} + +func mergeDefaultConfig(configRoot *common.Config) (*common.Config, error) { + defaultConfig := common.MustNewConfigFrom(` +setup.template.settings: + index: + codec: best_compression + mapping.total_fields.limit: 2000 + number_of_shards: 1 + _source.enabled: true`) + if configRoot == nil { + return defaultConfig, nil } - err := cfg.Unpack(&config) - return config, err + // NOTE(axw) it's important that we merge onto the root config, + // due to how config variable resolution works; variables are + // resolved using the root of the left-most config in the merge. + // + // We merge the root config back over the defaults to ensure + // user-defined config takes precedence. + return common.MergeConfigs(configRoot, defaultConfig, configRoot) } diff --git a/idxmgmt/supporter_factory_test.go b/idxmgmt/supporter_factory_test.go index 132007d303f..db4290d160c 100644 --- a/idxmgmt/supporter_factory_test.go +++ b/idxmgmt/supporter_factory_test.go @@ -21,13 +21,20 @@ import ( "testing" "github.com/stretchr/testify/assert" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/idxmgmt" libilm "github.com/elastic/beats/v7/libbeat/idxmgmt/ilm" + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/template" + "github.com/elastic/apm-server/datastreams" "github.com/elastic/apm-server/idxmgmt/unmanaged" ) @@ -55,9 +62,10 @@ func TestMakeDefaultSupporter(t *testing.T) { assert.True(t, s.Enabled()) assert.NotNil(t, s.log) assert.True(t, s.templateConfig.Enabled) + assert.Equal(t, "best_compression", s.templateConfig.Settings.Index["codec"]) assert.Equal(t, libilm.ModeAuto, s.ilmConfig.Mode) assert.True(t, s.ilmConfig.Setup.Enabled) - assert.Equal(t, &unmanaged.Config{}, s.unmanagedIdxConfig) + assert.Equal(t, unmanaged.Config{}, s.unmanagedIdxConfig) }) t.Run("ILMDisabled", func(t *testing.T) { @@ -73,6 +81,7 @@ func TestMakeDefaultSupporter(t *testing.T) { assert.Equal(t, libilm.ModeDisabled, s.ilmConfig.Mode) assert.True(t, s.ilmConfig.Setup.Enabled) }) + t.Run("SetupTemplateConfigConflicting", func(t *testing.T) { s, err := buildSupporter(map[string]interface{}{ "output.elasticsearch.index": "custom-index", @@ -80,6 +89,131 @@ func TestMakeDefaultSupporter(t *testing.T) { require.Error(t, err) assert.Contains(t, err.Error(), "`setup.template.name` and `setup.template.pattern` have to be set ") assert.Nil(t, s) + }) + +} + +func TestMakeDefaultSupporterDataStreams(t *testing.T) { + supporter, err := MakeDefaultSupporter(nil, beat.Info{}, common.MustNewConfigFrom(map[string]interface{}{ + "apm-server.data_streams.enabled": "true", + })) + require.NoError(t, err) + + // The data streams supporter does not set up templates or ILM. These + // are expected to be set up externally, typically by Fleet. + assert.False(t, supporter.Enabled()) + + // Manager will fail when invoked; it should never be invoked automatically + // as supporter.Enabled() returns false. It will be invoked when running the + // "setup" command. + manager := supporter.Manager(nil, nil) + assert.NotNil(t, manager) + ok, warnings := manager.VerifySetup(idxmgmt.LoadModeEnabled, idxmgmt.LoadModeEnabled) + assert.True(t, ok) + assert.Zero(t, warnings) + err = manager.Setup(idxmgmt.LoadModeEnabled, idxmgmt.LoadModeEnabled) + assert.EqualError(t, err, "index setup must be performed externally when using data streams, by installing the 'apm' integration package") + + selector, err := supporter.BuildSelector(nil) + require.NoError(t, err) + index, err := selector.Select(&beat.Event{ + Fields: common.MapStr{ + datastreams.TypeField: datastreams.TracesType, + datastreams.DatasetField: "apm", + datastreams.NamespaceField: "production", + }, + }) + require.NoError(t, err) + assert.Equal(t, "traces-apm-production", index) +} + +func TestMakeDefaultSupporterDataStreamsWarnings(t *testing.T) { + core, observed := observer.New(zapcore.DebugLevel) + logger := logp.NewLogger("", zap.WrapCore(func(in zapcore.Core) zapcore.Core { + return zapcore.NewTee(in, core) + })) + + attrs := map[string]interface{}{ + "apm-server.data_streams.enabled": "true", + "apm-server.ilm.enabled": "auto", + "apm-server.register.ingest.pipeline.enabled": "true", + "output.elasticsearch.indices": map[string]interface{}{}, + "setup.template.name": "custom", + "setup.template.pattern": "custom", + } + s, err := MakeDefaultSupporter(logger, beat.Info{}, common.MustNewConfigFrom(attrs)) + assert.NoError(t, err) + assert.NotNil(t, s) + + var warnings []string + for _, record := range observed.All() { + assert.Equal(t, zapcore.WarnLevel, record.Level, record.Message) + warnings = append(warnings, record.Message) + } + assert.Equal(t, []string{ + "`setup.template` specified, but will be ignored as data streams are enabled", + "`apm-server.ilm` specified, but will be ignored as data streams are enabled", + "`apm-server.register.ingest.pipeline` specified, but will be ignored as data streams are enabled", + "`output.elasticsearch.{index,indices}` specified, but will be ignored as data streams are enabled", + }, warnings) +} + +func TestMakeDefaultSupporterStandaloneWarnings(t *testing.T) { + core, observed := observer.New(zapcore.DebugLevel) + logger := logp.NewLogger("", zap.WrapCore(func(in zapcore.Core) zapcore.Core { + return zapcore.NewTee(in, core) + })) + + attrs := map[string]interface{}{ + "apm-server.ilm.enabled": "auto", + "apm-server.register.ingest.pipeline.enabled": "true", + "output.elasticsearch.indices": map[string]interface{}{}, + "setup.template.name": "custom", + "setup.template.pattern": "custom", + } + + s, err := MakeDefaultSupporter(logger, beat.Info{}, common.MustNewConfigFrom(attrs)) + assert.NoError(t, err) + assert.NotNil(t, s) + + var warnings []string + for _, record := range observed.All() { + assert.Equal(t, zapcore.WarnLevel, record.Level, record.Message) + warnings = append(warnings, record.Message) + } + assert.Equal(t, []string{ + "deprecated config `setup.template` specified. This config will be removed in 8.0.", + "deprecated config `apm-server.ilm` specified. This config will be removed in 8.0.", + "deprecated config `apm-server.register.ingest.pipeline` specified. This config will be removed in 8.0.", + "deprecated config `output.elasticsearch.{index,indices}` specified. This config will be removed in 8.0.", + }, warnings) +} + +func TestNewIndexManagementConfig(t *testing.T) { + cfg := common.MustNewConfigFrom(map[string]interface{}{ + "path.config": "/dev/null", + "setup.template.fields": "${path.config}/fields.yml", }) + indexManagementConfig, err := NewIndexManagementConfig(beat.Info{}, cfg) + assert.NoError(t, err) + require.NotNil(t, indexManagementConfig) + + templateConfig := template.DefaultConfig() + templateConfig.Fields = "/dev/null/fields.yml" + templateConfig.Settings = template.TemplateSettings{ + Index: map[string]interface{}{ + "codec": "best_compression", + "mapping": map[string]interface{}{ + "total_fields": map[string]interface{}{ + "limit": uint64(2000), + }, + }, + "number_of_shards": uint64(1), + }, + Source: map[string]interface{}{ + "enabled": true, + }, + } + assert.Equal(t, templateConfig, indexManagementConfig.Template) } diff --git a/idxmgmt/supporter_test.go b/idxmgmt/supporter_test.go index 8260c6c5e0d..e42858a7787 100644 --- a/idxmgmt/supporter_test.go +++ b/idxmgmt/supporter_test.go @@ -15,23 +15,6 @@ // specific language governing permissions and limitations // under the License. -//// Licensed to Elasticsearch B.V. under one or more contributor -//// license agreements. See the NOTICE file distributed with -//// this work for additional information regarding copyright -//// ownership. Elasticsearch B.V. licenses this file to you under -//// the Apache License, Version 2.0 (the "License"); you may -//// not use this file except in compliance with the License. -//// You may obtain a copy of the License at -//// -//// http://www.apache.org/licenses/LICENSE-2.0 -//// -//// Unless required by applicable law or agreed to in writing, -//// software distributed under the License is distributed on an -//// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -//// KIND, either express or implied. See the License for the -//// specific language governing permissions and limitations -//// under the License. -// package idxmgmt import ( @@ -148,26 +131,26 @@ func TestIndexSupport_BuildSelector(t *testing.T) { withIlm: "apm-7.0.0-sourcemap", fields: common.MapStr{"processor.event": "sourcemap"}, }, - "MetaInformationAlias": { + "MetaInformationAlia-lowercased": { noIlm: "apm-7.0.0-meta", withIlm: "apm-7.0.0-meta", //meta overwrites ilm fields: common.MapStr{"processor.event": "span"}, - meta: common.MapStr{"alias": "apm-7.0.0-meta", "index": "test-123"}, + meta: common.MapStr{"alias": "APM-7.0.0-meta", "index": "test-123"}, cfg: common.MapStr{"output.elasticsearch.index": "apm-customized"}, }, "MetaInformationIndex": { noIlm: fmt.Sprintf("apm-7.0.0-%s", day), withIlm: fmt.Sprintf("apm-7.0.0-%s", day), //meta overwrites ilm fields: common.MapStr{"processor.event": "span"}, - meta: common.MapStr{"index": "apm-7.0.0"}, + meta: common.MapStr{"index": "APM-7.0.0"}, cfg: common.MapStr{"output.elasticsearch.index": "apm-customized"}, }, - "CustomIndex": { + "CustomIndex-lowercased": { noIlm: "apm-customized", withIlm: "apm-7.0.0-metric", //custom index ignored when ilm enabled ilmAuto: "apm-customized", //custom respected for ilm auto fields: common.MapStr{"processor.event": "metric"}, - cfg: common.MapStr{"output.elasticsearch.index": "apm-customized"}, + cfg: common.MapStr{"output.elasticsearch.index": "APM-customized"}, }, "DifferentCustomIndices": { noIlm: fmt.Sprintf("apm-7.0.0-%s", day), @@ -203,7 +186,8 @@ func TestIndexSupport_BuildSelector(t *testing.T) { // create initialized supporter and selector supporter := defaultSupporter(t, test.cfg) - supporter.setIlmState(handler) + err := supporter.Manager(handler, nil).Setup(libidxmgmt.LoadModeDisabled, libidxmgmt.LoadModeDisabled) + require.NoError(t, err) s, err := supporter.BuildSelector(nil) require.NoError(t, err) @@ -225,11 +209,6 @@ func TestIndexSupport_BuildSelector(t *testing.T) { test.expected = test.withIlm checkIndexSelector(t, "ILMTrueSupported"+name, test, ilmSupportedHandler) - //ilm true but unsupported - test.cfg["apm-server.ilm.enabled"] = true - test.expected = test.noIlm - checkIndexSelector(t, "ILMTrueUnsupported"+name, test, ilmUnsupportedHandler) - //ilm=false test.cfg["apm-server.ilm.enabled"] = false test.expected = test.noIlm @@ -248,20 +227,6 @@ func TestIndexSupport_BuildSelector(t *testing.T) { test.expected = test.noIlm checkIndexSelector(t, "ILMAutoUnsupported"+name, test, ilmUnsupportedHandler) } - - t.Run("uninitializedSupporter", func(t *testing.T) { - // create initialized supporter and selector - supporter := defaultSupporter(t, common.MapStr{}) - - s, err := supporter.BuildSelector(common.MustNewConfigFrom(common.MapStr{})) - require.NoError(t, err) - - // test selected index - idx, err := s.Select(testEvent(common.MapStr{}, common.MapStr{})) - require.Error(t, err) - assert.Contains(t, err.Error(), "setup not finished") - assert.Equal(t, "", idx) - }) } func testEvent(fields, meta common.MapStr) *beat.Event { diff --git a/idxmgmt/unmanaged/config.go b/idxmgmt/unmanaged/config.go index d73a700454e..d036a0a9e9b 100644 --- a/idxmgmt/unmanaged/config.go +++ b/idxmgmt/unmanaged/config.go @@ -47,9 +47,11 @@ func (cfg *Config) SelectorConfig() (*libcommon.Config, error) { // set default indices if not set if cfg.Indices == nil { - if indicesCfg, err := libcommon.NewConfigFrom(conditionalIndices()); err == nil { - idcsCfg.SetChild("indices", -1, indicesCfg) + indicesCfg, err := libcommon.NewConfigFrom(conditionalIndices()) + if err != nil { + return nil, err } + idcsCfg.SetChild("indices", -1, indicesCfg) } } diff --git a/include/fields.go b/include/fields.go index d7e274b54b2..38ccb8292be 100644 --- a/include/fields.go +++ b/include/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetBuildFieldsFieldsYml returns asset data. -// This is the base64 encoded gzipped contents of build/fields/fields.yml. +// This is the base64 encoded zlib format compressed contents of build/fields/fields.yml. func AssetBuildFieldsFieldsYml() string { - return "eJzsvXtTHLmSOPr/fApdNuKHOdsUD4ONuXcjfgwwM8TamDH4zJ5Zb9DqKnW3DlVSjaQC92zsd7+hTEmlegCNTfkxy5zdGbq7SkqlUql857+Q3w7enZ6c/vz/kCNJhDSEZdwQM+eaTHnOSMYVS02+GBFuyA3VZMYEU9SwjEwWxMwZOT48J6WS/2SpGf3wL2RCNcuIFPD9NVOaS0G2kt1kM/nhX8hZzqhm5JprbsjcmFLvb2zMuJlXkySVxQbLqTY83WCpJkYSXc1mTBuSzqmYMfjKDjvlLM908sMP6+SKLfYJS/UPhBhucrZvH/iBkIzpVPHScCngK/KTe4e4t/d/IGSdCFqwfbL6fw0vmDa0KFd/IISQnF2zfJ+kUjH4rNgfFVcs2ydGVfiVWZRsn2TU4MfGfKtH1LANOya5mTMBaGLXTBgiFZ9xYdGX/ADvEXJhcc01PJSF99hHo2hq0TxVsqhHGNmJeUrzfEEUKxXTTBguZjCRG7GernfDtKxUysL8J9PoBfyNzKkmQnpocxLQM0LSuKZ5xQDoAEwpyyq307hh3WRTrrSB91tgKZYyfl1DVfKS5VzUcL1zOMf9IlOpCM1zHEEnuE/sIy1Ku+mr25tbL9Y3d9e3n19s7u1v7u4/30n2dp//vhptc04nLNe9G4y7KSeWiuEL/PMSv79iixupsp6NPqy0kYV9YANxUlKudFjDIRVkwkhlj4SRhGYZKZihhIupVAW1g9jv3ZrI+VxWeQbHMJXCUC6IYNpuHYID5Gv/Ochz3ANNqGJEG2kRRbWHNABw7BE0zmR6xdSYUJGR8dWeHjt0dDD53yu0LHOeAnQr+2RlKuX6hKqVEVlh4tp+UyqZVSn8/j8xggumNZ2xOzBs2EfTg8afpCK5nDlEAD24sdzuO3TgT/ZJ9/OIyNLwgv8Z6M7SyTVnN/ZMcEEoPG2/YCpgxU6njapSU1m85XKmyQ03c1kZQkVN9g0YRkSaOVOOfZAUtzaVIqWGiYjyjbRAFISSeVVQsa4YzegkZ0RXRUHVgsjoxMXHsKhyw8s8rF0T9pFre+TnbFFPWEy4YBnhwkgiRXi6vZG/sDyX5Dep8izaIkNnd52AmNL5TEjFLulEXrN9srW5vdPduddcG7se954OpG7ojDCazv0qmzT2nzEJIV1tr/xXTEp0xgRSimPrB+GLmZJVuU+2e+joYs7wzbBL7hg55koJndhNRjY4NTf29FgGauwFN3VbQcXC4pzaU5jn9tyNSMYM/iEVkRPN1LXdHiRXaclsLu1OSUUMvWKaFIzqSrHCPuCGDY+1T6cmXKR5lTHyI6OWD8BaNSnogtBcS6IqYd928yqdwI0GC03+5pbqhtRzyyQnrObHQNkWfspz7WkPkaQqIew5kYggC1u0PuWGvJkzFXPvOS1LZinQLhZOalgqcHaLAOGocSqlEdLYPfeL3ScnOF1qJQE5xUXDubUHcVTDl1hSIE4SmTBqkuj8Hpy9AZnE3ZzNBbkdp2W5YZfCU5aQmjZi7ptJ5lEHbBcEDcKnSC1cE3u/EjNXsprNyR8Vq+z4eqENKzTJ+RUj/06nV3RE3rGMI32USqZMay5mflPc47pK55ZLv5YzbaieE1wHOQd0O5ThQQQiRxQGcaU+Haycs4Ipml9yz3XceWYfDRNZzYs6p/rWc90+S8d+DsIze0SmnCkkH64dIp/xKXAgYFN6LdC1F2rsVaYKEA+8BEdTJbW9/bWhyp6nSWXIGLebZ2PYD7sTDhkR09ijO9Pdzc1pAxHt5Qd29llLfy/4H1a+efi6w31rSRQJG967gYt9wgiQMc9uXV7WWJ799xALdGILnK+YI3R2UBOKTyE7xCtoxq8ZyC1UuNfwaffznOXltMrtIbKH2q0wDGxuJPnJHWjChTZUpE6OafEjbScGpmSJxF2npL5OWUkVnOIwNtdEMJahAnIz5+m8O1U42aks7GRWvo7WfTK1kq/nPLBUZEn+Kzk1TJCcTQ1hRWkW3a2cStnYRbtRQ+zixaK8Y/s8t7MTEG3oQhOa39j/BNxaWVDPPWnitjpxHN+1t3lSo0YEnh2wWj+LJO6mmLD6EbjC+LSx8fWOtQmgsfkFTedWJ+iiOB7H49lpmwOg+u9Oj20iuwXTi2Qz2VxX6XYsxuiGDFMZKWQhK03O4Uq4R545EITWr+AtQp4dnK/hwXTSiQMslUIw0BhPhGFKMEPOlDQylbmD9NnJ2RpRsgJ9sVRsyj8yTSqRMbzIrbCkZG4Hs9xNKlJIxYhg5kaqKyJLq0dKZQUer+SxOc2n9gVK7H2XM0KzgguujT2Z1164smNlskBJjBri9FZcRFFIMSJpzqjKFwH7UxByA7Qy5+kCBMs5s6IvLDBZ+sIUVTEJAs1dV2Uuw63d2Ap3JeA4VhGVKQhXDqLONjl5I3wdCN7tohvo2cH56RqpYPB8Ud84GoXngHo8EyeNdUekt7W79eJVY8FSzajgfwJ7TLrXyOeICaCmXMZYjlid1+9IV+UjIGOpQu+TKc11fSNkbEqr3OCQzR8be/A2WhPM18HDz1JaGnz9+jA6g2nOW7rEYf3NHcrEgXvTHjZPj1Q7AuSG27OApO+3yR1BC95UempzSoJiM6oyEB6tbCiFHkXPo+A44Whu49Jqn9Nc3hDFUqtXNVTXi8MzNyreTDWYHdjsF/bxCDI4gJqJoDLYZ87/cUpKml4x80yvJTALarulYyGdqdCsZEW7xqRe11FgM2PawuGkcY8lo6jQFIBJyLksWJCPK416hmGqICveVibVSq1ZKzb13MqBIloL1Hj03M9OD8SdnbCgB4EeGCHAHUsLlpj5ba6niOFHjdYRkZ/A3l6VrixC3Ki1AsaFBe+flcANAH0MNSxvyewZrMavkKYzpBWscL/W4UR7E1IwPOF4G36eYCqEw4OiGs0yollBheEp8H720Tipjn1EeX2EQpTnCDrIdkaSa26Xy/9ktXJtF8oUKNyam4q67TiZkoWsVJhjSvPcE5+/ESw3nUm1GNlHvVCiDc9zwoRVLx3don3SCi4Z08aSh0WpRdiU53lgaLQslSwVp4bliwcoVjTLFNN6KJ0KqB21aEdbbkIn/wQ2U0z4rJKVzhdIzfBOYJg3Fi1aFgzssiTnGuxWJ2cjQv09KxWh9mL5SLS0dJIQ8o8as05MA8Nhza/njCh642HydD9O3BdjRFlTyhRWCa+FyKxC2yFejeOEl2MLyjhBsMYjkrGSicyJ+SijS1EDASq927Faikr+113gVCdPd3gE1WRhmL5HtI/2Hi08zdcagPxof0DrTvCwuDPpSAJZZ3er9nYagCFhD6B0OB6O4yeNOWdMJik3i8uBDASHVmbv3Z03VkdgNO+CI4XhggkzFEynkbEiTNaB71QqMycHBVM8pT1AVsKoxSXX8jKV2SCowynIyflbYqfoQHh4cCtYQ+2mA6l3Qw+poFkXU8Ae71emZ0xelpKHu6npHJBixk2V4X2dUwMfOhCs/jdZycHVtP7yefJia2fv+eaIrOTUrOyTnd1kd3P31dYe+Z/VDpCPyxNbNkDN1Lq/j6OfUOL36BkRZwNBKUxOyUxRUeVUcbOIL9YFSe0FD2JndIEe+nszWJiQwrlCiSpl9sZwwvc0l1K5i2cEFpU5r0Xb+oZC8HJSzhea2z+8hyP1x1pHIJxKE7lxwX/D0e5QwAU5Y9KvtmuHmUhtpFjP0s7eKDbjUgx50t7BDHcdtPVfD2+Da6Cj5mDqPWm/VmzCmoji5T0whAeaxHlyFoQ0zxHhsogpC42x3pDjXYsnZ9c79ouTs+sXtfDZkrcKmg6AmzcHh7dBTRo2b5O08dJ7rG/BzYVVL1FLOjmzEzmdAQNTTg8uggJOnrFkljhrEs1jQwFBbdMbmhqujXBWIp3TKrVgfhQzkkuakQnNqUjh6E65YjdW5QEdX8nKnugWxu2iS6nMwwRcL+Roo3i/1Btjw47/veADddsHyHuNVZ/h258k3W034ejsyTJC5+37ceb24Dbit9xJG6ZYdtknVz7e9WaVmzmfzZk20aQeRzj3CBZSlizzIOtq4sXRsP8/1T4evKai4ZwuOpUKwkiSGcj2SSqLFcI1WYk+t11PGE7jXEoZM0wVcBWXiqVcW10L7CgUtV9wxEIYUTXJeUp0NZ3yj2FEeObZ3Jhyf2MDH8EnrI61lpALtbCUaiQaDj5ye/Xh9TpZEM2LMl8QQ6/qXUVtOafagF8DY2lQMRfSEFD6bliew9ovXh/Vzt+VVCbV1Ur3Lq2R0SAJI8tL2P4vQBFsOrUH+JrZWZ1M4/bwGbt4fbQ2Qm/OlZA3wlvJGmARh/qRN0cCikpak70bD67ILvG05w3DWjzWGALq+b7JBkjmNoqpN2I52oHvG2RTaaaSYSkm1sjQcC0VmoPt5OijKhiYSeT0No5BBXl9dHAGoRC44qMwVEwqq93VsYLyfKDFWfGfwAReZkm6AEyrPO+RJL9Lw4xd8KomdkkwHSgY9JrynE7yrjB7kE+YMuSYC22YI7EGbsDO+tUIEGYfngJxkYPF4HTjUKYu5grX513lYJHcKHNqrATSQ6gI54DqcrwTOFkXiDnV88G0dcQU8B07j+XJqVSKWdG3EfA1RcM4MChBqJBiEYePohAXkcp7zVwwyxhWwTM0aMMHu7pxCDJMpZjiXtG8MScVmb2SakcO8VHBfUQ1SExTh5SCDgZzdqF4PAX5q7G087mVttGqAsGFXHQXHfE0Cjyt4TmWFS4vOI79F7f7jTHRgCDpBf8CDEXAGTpVNAQf12GV6ADCmCSvTkBkErk1jHJK3jCjeIrhTToOn6KCHB9uY/CUpb4pM+mcaTAqRaMTbrSLXK2BtJTbDLhuRM5yHcJymiC4cVUlXEisYoU0IYiHyMponrFopjZkCBMlLmbTL8gTmKhfdQaxZmw4DloPBMGpbnKv8tlhua5BdQh7iIswBXPtcFx/9aJGEM4FQbmx44RnIdDanegFyfh0ylSssIPZj0N4sb0H7TFcN0xQYQgT11xJUTRtRjVtHfx2Hibn2cg7ZYD+ydt3P5OTDEOhIUigajOXroD64sWLly9f7u3tvXrV8nOhiMFzbhaXf9aewMfG6kE0D7HzWKyg+xFoGo5KfYg6zKHS64xqs77VsuC5+LXhyOHExy2eHHnuBbD6Q9gGlK9vbT/f2X3xcu/VJp2kGZtu9kM8oDgQYI4jTLtQR/ZG+LIbKPloEL3xfCCKmbwTjWY7KVjGq6YyXip5zbOlHNGf7eOCs+YnTPzhjPN+6I0eEfpnpdiIzNJyFA6yVCTjM25oLlNGRfemu9GNZaFRfKBFOZv4Jx63+DqWGbvUfCaovTob97LMGDlv/HL7BX0xZ5q1E0Qa4hrcdBMuqFrApCRMqpcPOcTg8HtEqImUOaOiD20/4k8gydIShAWOcZYOFos+F9XT9akZVbHVMOwt8pIHVRtqqsGCXg6yjLuQti6WgdKZstdGakV1BKUnDr1COdyliczstZ2qRWnkTNFyzlPClJIK87g6o17TnGexR86qUarSxs9HXjN6zUgloqgtPIb+1foVfz7r8cOwN1STSqRzll6xnhj/43fv3r67fH968e79+cXx0eW7t28vlt6jCjMSB3JcnePwDYYdSD/wuzoMgKdKajk15FCqUjbC8O9dCqCRLXNf3nE8Vs+NVAzl03gre7aHpPOmyfrvdk8pRPrVr9/2HqRhYeKdD20ageRq+VitNYIo6uKgpMgXzRysyYIYKXONUWwUzAyQFcPSK5RNkQ47JPOwgwzE+pl47ec7aGKBK6XJga6ZsiJfRujMCuGRNjdnNQ8Vpilp9h432kD+PWdpGcTUFwcweUfG4c6Iv7wjDjg82Iz1dFGYnXzeKMOwZKldjQMyQIFE4Ozjzhsnp/EgUXJ4dFfNWV5GVg1QdNCLF4bWToUSC3uzGh7MVsvcWEMaHurF86wp/PGCzgYVRmOhCiYLIUQIkCW0ScVzY/XAHtAMnQ0EWU1ZDi46a5mZo5T1u6ePUtfvSF5vi+kwq8sDb8w74HbUi66jJIIcijQ7lCCKo5OCCjpD5s91TQgdIQpT5iM+EoUcx5zkqPX1HbwkevTu0HRkuNHTEHaEbvGNZuZ4z5hRNPp9cejIflwc+rcYKN2I814qWjrcMq7axCNFS4dhIWr6KVr6KVr6f3e0dHwwfVCNKy3T3q8vFTIds8KnuOmnuOnHAekpbnp5nD3FTT/FTX9PcdPRJfa9BU83QCfDRFDz0s4W3/T3hA2zRrxwqfg1NYwcvfl9rS9iGE4N6CHfVNA0ROlGxhm3UjDZ1LgxkkwWgIkjBiWGHn+FQ4RBP0Bs+3Kx0LfS8tcOiM46EuVTVPRTVPRTVPRTVPRTVPRTVHSb4J6iop+iop+iop+ior9llvbZUdFZjteL9369fg0f7y7Lu0zEFcSb5HyiqOJMk2whaIFqlEe5pJmvfOyKrIJJxv38hoqFq1IXF2l1JaMkWdFzCkmOjXlWXIFcHz6Lhh4fSzepQjV8CPBgBseDWvQ0zz3qpjLP5Q0Xs30Pzd/IES5gPefiys23IM/GSZbn4zVX+M6riFKQ37jI5I2u3z9HcN9iZM6zcaJl33vvBf+4DjJbZ+0dWBpgLHI+6RuwoOnb8+Vdgc2wvOQ7intrQf4UBvfth8G1t+yvExXXWtlTkNxQQXItRD/FzN2CJysxJkW2OxBDfHO0i1M8CB49p1sDAXT+y8HWp0G0vftiOJi2d198GlS7zn47CFS7W9sPg2ogDt3Qdp1w074261KaBS21N3rHPB1aHUlBMq6vusfmiinB8ufbiZd8l1huSc1Qat1PVZ4jxHaSztpbwB/uf3CC5QesOf18+8MnLQgsjCUVi4GWdRLKzuA0nQ0a+WSYjEBrjqLkOVuHGNdHvYhLlkSADb3alov8ExZ7RuM4gvsXZ4e/7K2V/viru24WTn/gyl4kz5NXLzY3k62XO1u7D1ii7+BzCWsdNNHNLfRziPX87ODk9CI5/o/jByzRNdAZel1ums9Z30o4jR8+Hhx7NRf+fhsUVuRNK3cjIFggRKOs/tHp+X0WiJ8asbZ2wqPTc/JHxcDSYAVVKvQNi1p32d9dYrYTWBmHZNdQSrmuee/HWpBScQm2hhkzWEkah3WDPhtnQkOa4z48P15zTXQWfpJ4dLA6+1LMaC6r2xm5EXHaEDqs0VlCdWybcDCgWH3DFKv3Di2nXOM4XSjx1fHaQyKDGyt+9Jj11QNBqFJ04ZGBWHbvo5uIpnMHBtGu6rliplIiMmj6ZniuDFgkMTAC1u0rtnAoq+N1/d7gFmjm+7I1wpEnC3J8eF63zXiHJdxxrLmV4aGtQmwEKOrl4I9+ckFu7FvHh+du+HYEkt1mS34Q9YR+fOxaAr80Q8rtc57MyYEhBRe8qIqR+7K2CrhFFVbjiztoje0sYwscpP53lsF17RsZWWErDEntaCkIK9z4No5Uk1JqzSfob8igIrm9+WltKnFGQx933A8o1STFjjaNOPYWRSZpTgeLWMecfYrROWFDfG5BhhTDofERxpRgYf8Oszw57QU9qtswiIsboI24I0YstDpFusPBKBZN8HF0+GrJRKa97wWyrIFheZTEA/q1dwTtrc3E/18vFoaMW7xoOuEtxUXpyi3QSYll7nWzcRB1xhA5JYenB2+O7YGYMIss+35+zbJRzJxWVzUZo7OkZjEmyl+QwjdekkoxXUqL4mDZiwaBc5mQk8CrhDTe094e0zc3HEN7Bh8sP7Y3D4PGpJ1tubm5SW4Jw/A7Y8wyLufbApUs7iEzB2LIrsFCajk3rBcQ0LsJ3uZE03nM2NkU+FIjz4LrlKqMZQn5nSnpc+gLsNnMXSgqstAaf5MaaThFT1x7P50OWMfgYl7XMPhEFgOk2bQYMJoxdTnNfXPIIczfcGfLKdkmOTOGKeCSODOBmRuFSEpsZVQXO9gnBwcjcnE4Iu+ORuTdwYgcHI3I4dGIHL3tkKz7uE7eHdV/NuPHB3NP2x2yS8PYvdhNTTWYjeuWt0rOFC2QAkOb3oAE+wiIZZhcEw0EWWslr/NxkDnoHg1qe2trq7FuWfbEFT/64p0nSgo0l6MYhemwzhx9xQUE0KEA25BpSWhpGkcvQS9G43FXN4fBwHIcBmVkwAw4CeMxb8XRr++P3/2jgaPAGb+YxODa/LjbAvWSe4WDBgMf8l6EC7EFWnzvBXNaqyCTkGK9VFwY6NeXzim0tFaaPJuwXN6Q59uQeGchIFvbL9ZGEe1L3Xij5uVBQ8J2TEyntLRnimpGtjbhCpnBHB+Ojo7WajH8R5peEZ1TPXca3x+VhKSmMLIbKiEXdKJHJKVKcTpjTnfQKKPmPEq/mzKWxSOkUlwz5YKDP5gR+aDwrQ8C6I85n8aD7tiwzV89FvYp/vWbiX8NRBGQPyQxhElAxastC26BdQvBDol2GYUbaA4qoUusAKCBEYaZRjVqdDXZtuvcShxWgDRGDZzXEDacjF57rcdYGSGJCEmMojyH7oJMcdkv+PYj/Sn6GNnfU/Txg6KPa/r5MgqC05PuFioODg6akrHXVS8/J4fooGOiy3NycmZlOAa1wMaxaWPcsjH4H8fe1Odoh0+nPK1ysCBVmo3IhKW00sEyfU0VZ2bhlaOYUAtqtFUK7VAOrIQcfzTKt/wD+KIKAx5Qg+3PJQGraISccS2uQst3boI5C3slZOyjfbuwVBIPjSIBvgS/M6o5hKiFEevmeiipWOF2Krt1FYN20zadNL/bam8wSMJfQhHwc/WnGp6+hVigBnQDno3V+HAEA78P2chGDtFWJgX6a15e0MOwLtcTOQgglGXGr5mG7oWRa6HRzhAeSxWLQ6UyocMoU4St7SNYFooaAG/wd+6ABhCt+aGNOWChZMqt/5ks0fqaL+wQWspwrzhtDU/HWkIORAb1WlMpasXVYbV59m93VHh7vtXjHE/o8NJg+A3V9dKGC+j48D4X0Btm6HpsrPbVmZw1evnCfve1mVbsj4orlkGhs0eIcDg+PA9+VLjHAn7tYjQxMiFjlurEPTTGCH8PRs0EQTAC1lNpg/UJIdo777QPJeS3ORO4Z7CB2LU/yGtcZDxlmqyvOyOpc2BYgCw+dc5nc5P3FaWNVgPvR8G1ObMs2upvyrUppdk/Lag+TTGds4K28E8873dL6BqVk81kM6YcpWSjENhx+GLpEGZoQ++dQS7iEsh3AXaNgMf32NC2QPkBn3NuoLJkUNAlZ1gC2aLZMwIIwk+pvYVu8PYJdgzce240y6e1ok0Fjv4AN91AyeWATDT6tNwJCOCdNrhhYvpDekgPBM7QdA8YUfB9z2K9saoxsDY0vbq00sVfIQ3qAoMvU2jenLLg+wGMWmItc/ARso+tfkZfSNANuzvCk+ZK5ZpgYovDF9jHlJV1pnHEKv5Jr2mSUzFLTqs8P5Pgjjj2j8c85LrVUfz4eomG4qGRb28hQd8duT84PJdeXcGag4qnDV4QWM6BfbTVstyyh/ad7G9iaAhWMDPHcxp4U60pvJaBM8HFwUWaV66OO3htqAmuMtC0xKweI9QUtxPVi3Dj+aGoT+ewVKaML2LvStPXDdadTR0VmpDW7sb0/m/Q/eLE7RGW9+rp0j5h5saK+TS0Y3byjLoObmaczDU4Z1DDP82ltms78DtxP7qxlIQ/x1JBbS0otpOTglFdKVZgFwAImu7DbPQYBPoaesUCDcdojsmjxnHBCgkRKkxDP203XFZj2rXVvuaBZxlWgCG/Uiwh5wz3fIzl5+xFN8Zlc+MKPANT0HUL/MiTH45wHJHgILXzamP19MYlvlw1/iWq7XyyroCjBwXBOx+a9feclSPUk8FCk3FYhIjeIidQ+hNIoBZB51R4vPpO6OPadB021zKMMSBknWbZeETG7tysw7lh8NWU52wdxfxsjL4j70Fp3AYg30dBK1gfs8yBwvpq+FeaqfWSam2RuY5hSU2ZwoE+zHZgAgwcpCmZWjXIypKHOKcvkoaBXqhhg5RKDe5IbQsDZcUZtNzW2IE88GTOmaIqncdxxO29qcU/3O6VCZ+RSQX1NlYsfNGInOmmUS2SyHPDlON2rSn23c6OycJdFkFMx94izsrlHgtjQtoENwvnO0PJmmvkWfki7kviZrSbMnad/l2KkWVj9YhEVxMPVpvqw/hejXPzgg2N5rm8sRBa3TJtbpS7d9ySIlMcNVYOga0J+kaEya5qWJm5FfWiulu3y7iPZ0o4cfJlGrk5QzQdLApyxUG/hoy4CHNRdUsfslVpFi6NjOlGZw8nYGpSiajU5YgoNqMqy+PdB+4PTxMrx1T2D6mIXR7ocaBP4UUjr5mCW8Zq8UFk8pIdj7eE+aBNlHPIyVF3G3Ze7Ow1kY8c6B5ekNXGiCZ+3WnAQTrtaNgG3I83VksNvBVuxSlXUUKNYhR4m6XOGeyJVPYzWFFKXrIcej/cQtMZtzJE6orn/F+oH2poUSLboCb+ysRtUE1sJQ+3OUNro5X3fDGeEI3TvlJOBCnslay5qVAZHrmQQ3MjSZjWHbQJ61G5kfX7j2kczSJ8pjVmLOUpJBS5Sjw5hNWgYBRbm1yEgou3RBKvmUQstsC2wKuAdNyTkLGbEW4cl2hBUkjBjazj++ohVldBLfY7Zj/6Xi5GkivGSlKV6EaAl+LD1cSqVasR0iYe7dWKJy6l+Sje2dq9G+Wmx1lV25tbL9Y3d9e3n19s7u1v7u4/30n2dl/+3oxCzKihmt1XQenzKz7gNK3ANNHACLpWwBFeYClbKjDYzOlTVoWQyl83WN+Lpo17JpezkdP/cjlbG8WTh1vESCfjLOratdF5TWURld/Ddlc12LDpiqWyKIBnQy62kCZYtmB4K/c05gZVLwTJFTKr8pr0sYYHJmuj1ENJJrH9legM03PZlDSdsyTCRdjeSi1T+LGnQlbrTS7Kylz6HwUV0kXCef2vMvEDVL/hec57n0EHG9DIVi/hHLmpGzY0Ap7AMG2TkpBPIdbtmcfPzKpNijkfpKmdfo24xj5e5BkNzC4yrwrYPeWd6iJMLBO0dduVUoPauU3aFwnSm704/fderAqA27sGfIZyAupiq6r9gGU9fqF6Tp6VTM1pqe3h08Z+M+VixhSE26yB84/euJvMSLsBFP1Ske2nkEIbZZcPJgMwvFrJsU30dT+pvr8Ofjw8+mJWvZMju5pQMj1Sxlow79Gd6e7mZtaETMxYN6l6eZnkItwJQBeBq1Kl+LWPwGRQfFTR3AWUGqk6EgbIFr7eBAgD4/rCiWXxFl16cSFfEJmmlVIsSxynrG/iXMvO6A1pKp6gYBR7ovu8ZUzwsfd1VImfBAGKaHrTqwOfCKdU2tOFSr9Vw7SuCisxCEns2kDbGQVJwd293jU1V1LIXM4aRT/sVSOvfFgA1/sNXJH/r724+hu/3eOl7uzdZGtz6/els6OveJsZfWN6rg/g+iRFF4076FG0A637Udq2SUhP8WJD/LPp1OH3XBcDcKDFFtrxIkecL1IdHKK13aRXg3bxwV5rQX6HYvus4npOaM6U8YIMnIWGdawVd4CXVnO0loyKayRzeePkcYsqgKCRLRZdcGRORZZDXOGcLcBVdmNVZWGiY6qYXTMYK+svUcwAhCiZ16vmBkaBkw5NYSAASxtLDDdzBmlqIaIdW4qCo8+AW3BW5VSFUPtadVRWuOoReXLm6n4Gp0ksUw0myOIsUY4JRD3DWtqSovOKO/UBFBTkVVVZSuVMNKkUKSsh5AmHRo0ir2YgCXQtKbVbnsJJEF56Rnn4AERBuH/XRv7c4MjjVvhZQxWsXRFgBrTP3yZnNrDuef8QeH9nmTr7aILxwJKzMFyF0/fekf8dUsMtSrSV2CEWhqF0l8n0MuphmHFtJZMMDKNYDgzUWWY5E8tqorfSv4vfgShgozi79rr0+BL3pofVn7OSbL0im3v72y/2tzbR0n14/NP+5v/5l63tnf/3nKWVXQB+ImZu7xFoEcMUfreVuEe3Nt0ftRRoeYGu4JxOK3svayPLkmX+BfyvVum/bW0m9n9bJNPm37aTrWQ72dal+bet7efNOruyMlYx+qYvF6s+ferd4tY39sF4GRMQiB1zLrwxIiMr9VgGX06tM1KeW6klGFRKpnyYdbg/oIo7GmwwnZllvSLMqTQuVQHFO5/eCzWfnSsgMvRnDRMlcgvM72pdfJZX+6ItEXev764WYkbQehctdngn8tomEi0wAv3AXgUiwO8FUYqhcXAJlLLy+hp5FtaGn12SGd7PYdA6PBdFMrdG0PXrimh1cmyoSxO0b7xP7ejRfahDxBUyZnkN1TniDV5qW6/jsBK3sXHI1k+VAnqq0SJcwqzj7GA6g4RcK91qLVPn4cN9uEXkMA3uVtcWsYPXKJi23LSWMvysZh6b3vetRDFu9G6lYhFEFlBCOeQMesBIJhny1YJe1bujmdA9V4lDa4PFDNzGdvU8xKf1nTM0IsOpwuvZh9KeL7SzPHVtzq/lLLKxFigsNS7WOijOK2b+TulpFEG0nJobqthd2VfusMB1f77QhZXO5saU2Ro2v56ib8T1OHIDt4vwhRGfYdmVUV2dZN0tcd3fQesHlVWdxGzttio0jW2ESoSROeXR9/Gdn4C8f/ea5Fxc+djqu4vZeRdIWyjwo2D1RPD58jT2ITscRiOQg0iCH4XrqJHIHykt+yCuWhaqGPK9QgrwrgAzDB4a7M3VQbLdXb2/seG6Wl0zkUmVpLLAnmsb/7K5CaaPZbVExfXVpY4u79uu82kuaW+M0TuurwiMAOKq4lJxjHBuU6h2RES0zCvQv6Psp/eaOWM+rAzM6c71gEx6zlS7GV+A/dJq9kvQ2K2LWD0F0wD/k2Uw7D0LGmFMgk4peKTCIjYt2WxtbvaYUwrKXQlLV5d2ISvY9qaB2x1VLDAH6Zg6Akg3/Rl2iBtnHtHMkpOol4FYc4GRcH1hyc2WyVKzP6olT+jDelScu4F9a7VbeC1EbrUehfBQhN87AsAUrjtuyRF4ZehVM4WcfaSpIVJlzncdVN/IPxl7J8OpDuazYJjuYOuaRR2AHqXNBGYwYrBNmKB5fhri1l3+o99CrniQ4sKIcU55lK+AT3kzt3f30ihc2jMnnTifR1V6U0gUjhF2AoJ33KzcKVGpFJprEwtEjjJjywdce/YK7K3r4C7fsJ4Js2iGvobjXM4SDb8n/vcklRkbJ573+q/rpIjYuFgHy2LNFTdFW8xtOqmQq/k2KfXRPDk6X0t8NlnjjSAXObIm3OrvNyLMiJHwVh6vQ9zDuKksMQjm9uVGURNhwd1L5GWTpg1dqkXN3W4L9Inc67hwYUCx6yKiCHRh1G7yW3wX9pz+WXeZHCAL427tobEkeyBqxmF3OCwILQsuGNHB3BRHcsVotnCU5C5rT+i1/Tm6JvEAeuIg0ioQN1w3VK00ZSVmNIdJfX4R1Cmg9vhLATL5yZGbfOW4UrJkGweFNkxltFiJsp3pZKLYNSof/vHzi5U11AXIL7/sF0XNTDjN/VPrm7v7m5sray022o26/cbMB2bO1SeGYEG0UtMy0IosWtHVZB1jsVbgph8hSWFcU3R3kFpR7cR3IXkiTx8RJux+6yhgy/HVDPydMrJI4KIg97BUdktB5nTatk/ravcb+4KhVE7hX5SdxmWVGqptyGpbexAwNhSY8xKZdM0pK3uEr5k2fOZX11S9l1AsBJxbPzSmUHCxnrHSzDuj45XUbNVO0L0GQlOIdXe5YgICb0mZ05Tdqp3copXUJ/6ztJNi4fSTYuGyrK2GAnNs7G6/3MpYNlmf7k4213e2t/bW915ON9d3aLqz93KTPt+bsru1F08PU+6M/C7G/Sf/+Y4Q9wMsTNqKh4bCHR3/EISaazKxclEzWMyFbNtfIXbOBynbsd3K/f7/BJVbXR0wJ3ZFphw44GDx9Vvko8D9ZyqyDanqxZJG1MvIVaIIdsPJAqc88XZv8qb2OvznTydv/suXTNR1vLe9ZHnK9FqCL7vwf2eF6Wn8TSHVmGWIzdZ6/HGMvMLO1PSguGmMxfoMwWT1NXVeYhJq6FrRwg/da1n1Jrh6KzWGbxlF0yswqaAVsCf8gxqj+KTqdDYeoEgR4j3MF1//4UtsFIHs+ZqqhaWN0G2G/MIUhqlBFRT2cU4rDeZLSGCXU3e3NLm1ZQvM1z7y8fTueNr7kF+zEdhyIZE4G9X9fewdBY0AYpcJ+8jSyrARmfMsY2IE4ZD4bynyxchxyBG5Udz0mA5X/3PFP7syIiv49Mp/fWql9afOEE+dIZ46Qzx1hnjqDGG+784QvaH9D5MdQA6CcUAYhLrRS4oLEFGHxNZ4vykspFH42mNJN7VA4GQuihE2kAnVL+/gb6GALQzjNhAlh6oEO864sFONncrH7VlhmoxhFeNIX8Vgf8zjwNrbwapnHx1ZTTMNw3lt0sMdV/Bu4auR9/fYVxw2SHa+ad3y1gWA2kSpW/31g7AzFJShwWHIug/qDLRyd1Eqjk3FebCZ4tdRdAQUuHRmh8gU0FnhxlwWbIPmHvNhpXa4SxzmcxfbS9xHCkRRLMR5x2qbhglgzIrl7JpGlua6dVlvNF2UPlGWTFlFFy+AhvkOrs+8r1X+4bJcCVAzYFMDYFlhks5els6uFJrmD1Zh9Ezxwl4E2O7y5Ig8+/nkaO3Oo7S6tbm51TzwtX44NITt3gE9LQbbB+CL9h76Sg2GvmIXoa/YKqiOxR8uOfPEjl3biL2gitxNhL+9Kal9VrZ3Xzzfe948LQUv2OWA1SzenLw5xjhqf7v47E+AFpTCZrciRbRRjELcyWRhIlNCpaEEgzMW3tzcJJwKmkg120CfNySAbhQs43QdLMHx38nHuSny/zw5OD2oWfx0ylNOc7Qb/9fIXRm+3FmC5YJ6csms/FGC3D9x1QTDmJjeGGK/o6X7TLtlGX8xHCW9sYQUo50LIlMrtgfqor2lRFY3X+xstkjoMyXSHoE0SJIUQolBdWgeswFLA5+2G2jhZR7q/fibso73N3FH6g7KfHHP9kUqb8RgkWpoPrYTrIIFRUHa3/330+O29/pqdX2glRh0EYv0k1FrI2FvsTRoR/ht6KdZJFQ+TPjduG3vn7qOPXUde+o69tR17Gt2HYtCefifDwzk6zF62UGsGAEyW6Qxv42Va+SeUMrHRTxwTVbsx55Cw1svnu/tNAA1VM2YufyL3FIXsBq8pyCYYlGAr/+LlZqDfQMJ9RlSYcYVeKgdJGsd6gvu5BBcMWi/ESu5gCHgPRgCVB0LHJVBfHbeshKg4HO7rSBYChhmjbs4gJ/dxzvCAH5mMq6VmVKlFpjEh04tWgv+YGrCDm2hMFGwpTdjPVwzVxleib1lobw4pmJjwCNL55A3XqcYWMhOzryLVCqnbKh1XVk9JdjGlyqhyc1iKP/Sod28XmH0jRRW72tmAmDsDBOD+btOG34uN1m3nrNUZk4OsLBdC8BKGLW45Fr2lJ1+HJThFOTk/G1/tenDg16QhtpBB07vJh5SQVvWbU/V94AyY/KylLHsFauIUsy4gYqKIiM5NfChe8L/m6zkUqzsk/WXz5MXWzt7zzdHZCWnZmWf7Owmu5u7r7b2yP+sfilVcvW9PYI+ZKglnNKAmpH3d2CQnZySmaKiyqmKXdfQTjOFCCvLbKIr9jAuRhLJFly5VGmItMZKS2SaS6lcyPwInXZxlb8wKIKXk3K+0JglB/mGI2APGCPS6tlYpzFBSCIXhFZGFsD9IvbWvegnUhsp1rO0sS+KzbgUQ56sdzDDXQdr/dfDPpgGOloOnt6T9WvFJiz9oc/O7e+v8MXtN5i9VNF4HZVq7Qlnh2d0HbzTco7EYe3LFxgftqdIo1hU8HiZsGDIDimYSyq5raUPFeT10cGZvUEPMC2z9p7F3USaLGQwIej2os+4KNeXEi2+GyFK60vxtxjnAFDyQ0+pIEefv/jP95QSnmPVHyDPmiLrnBP4neYzqbiZF6GyLFcu9CyKoWR55qLZsBIxhKXOsVUWhpq/OdodgQNjDei8VMxx64QcZJkHYxpCHjEC1w0xWUDCuEqp9kalJnDIjC2AaLvGehaQI6ZZSRU1MnQUproRXf1MC3qF8bMjgnlwc/r8cndr+yFNi7+0q+nLe5m+joPpS/qWwnmSulGb+xf/+c64ZQgSbsctu+xusDRUBsuoaENFlDx1fHgO7yZ/84fg1oz4bpwvTCpFXeQ51ntCEW1QNUGhua8YNKwVnTQtC+2cquyGKjYi11yZiuakoOmcC6ZH5EimV0yFTqLKpW78ezVhSjCIdJUZe1BVZpXOuWGpqe5NfP2UjX/bSrFuzNeRCD7uvbh8sfO1bli8C+U02jtPav6ave2OrQMrUPZMY/HVDrK6qm+7fcOIUpFTZn48eXve7fL1movqY8/YNdDRTGFEuPd9BYGeeI23pxdvz98GzNxjU5sxmXxDijSA860r0wjkN6dQx2B9I0q1BembV6wtkE/K9bepXNu9+RYV7Aiur6lkN6WugSBZ/cWNHd9IjUrBdT+DkCF941P1xx6yMSg29vy6hr5eK4T72IlD9yisj7Mep62iHBDHDR/ogEdfOo3mN3ShSQWvjCBX0FUaCEaHglHBxQwKX7i620xccyUh0KfRVt3tH/SerhSoiZUv+DaeMGqAEY3bWCjvwUJ/E0gQRnlZNz5s9V6i6QDI/cVt5m2zDkWjp3fSZ9R1EikzosqIGt8L/tEXEnGMEorK/VHRHIJ7wpiRLOfb20BlB9djPTT0qDRTiasCAl16M5byDKqtWXEUSKlm7tBVs7X5UidTWvB8qAiMt+cExyfPvJNGsQzStjM24VSMyFQxNtHZiNygONz1t+GTHbir/BFTmr+a/7Oj7uCuN6N0QsyD677WL/LS1OL7jfwnvWZtbEUFpgbY5fYacLYANqjbit64Qi4dyHeSnWRzfWtrex10cp62oX9cAepb2+s4gs6h7LbN/Y82Zry180vtrJ/PnWcr90k9ItWkEqa66wxTdcM7Z3jYkKEO8MvS49ZmsrWTNPvqDlZ2w5VXbl0rVoM/zGWVBWXc2wnqindOqsHgBSihPTbbScEyXhVjKKJzXbRKGzYsAcEm1Gish9XvwMIbu+BrOSSM2CePtKpOlEuGxd4WVXOObQpqSS4UFUAze3Pbnm/vNqe39+PXcrhA2MaQ/hZYHSsoH4qtW9WSwARe3kq6ANhr+JHD4b4af7YLXtUglvlreEroNeU5nfRkthzkE6YMOeZCG9ZiboAb9Ab9dT1+0SK/aedfBOeX9gO2gBiwc4hXPIHvgAcOyu4oDL1q8HJo3ugYlCBUSLEo+J9xN2lAYfj4PhReHMMqeDa2lIIfvPaN+k8qxRT3ql3wQGSuAngYttl0qYGnL9M8OCTEw5xdKB5PnfxqLO18LpUPtYXaEbXpv150Ixtigh0BgunHmEaAxS8XF2fw+XaH20/ebR1i/uxLUfNC1zmbjCuV+2pcmmEpThNh2AKpcg+vYn9UTD8g1MK/MJHZIomzqB5YqDN+tYncONq3BSaBWdvo3dt7eTuILuHnL3CRXjjjBm78nRj5heW5JDdSubYaHcwMsG8XEmsz3LF7zyywwLTmjFrpu6vSbO0879/Mgpm5HOo+XG2gFKdqpWZH5e2wqfOExcVtjQwBG1iV7I+KqYXVg0IX4EymVeHT38LYvvfvyomvXGp1q+PD856w9RkzI1JCh+eyMr1oggLXarDsr3du+LrwWoy5zm76jMpJLmeJz1hKZbHRgl2XUmj2xXkKTrssU4mB/Otylbtwcjtb8bj50nzFQftpjMUBjZVwehxVn19zuolTVy+o11+1s9mMtxjWiANw3WYV2wIjTZ11bpia0rRR2PCk8eXdQaFhgE4Pf4gLTaXKCBczqwljf0T8szkvaYi9kOqjWCmVK3VEhS/Mq9pFkImSFWRX5pJmZEJzKlKm1sKowWjDPoZ08TAW9KGC7kg9vfATaOFm6q4hbszQKSQMU6MAgfNjaSa0VK50e0kFsStaw6IhMRyJw08PKnpCp5aX5WjO6VA12gKJ4CzopKh3rFYvRz0OaL97gZuFst7Y2RdNaxaVXGiesRGRlXF/KJIVf4YWHzXqBS36zJLuxR/u4ZqDx+PW+Do5aiOrQd41ts5P35x1zgkhJ0c93G9z2QUOnYTp94LdThHdPHczvwf+OiVkFvOp1+7jHXGMR50Qw1BE2xcFLFg6p4LrgkSVAkMzlijZCjrL1GGN0Csl7Na9oY2d6dy4oes01BDz5VfD/FG8fNP8hPXYw0RYnd6PCZ7NuGz738aNhfi34laDnTr/rRUKaWARLIvH/1so4jupDFHUGcF9sd+/gdXDKtDww/HhuUPfA4IngVCbRPs4foS3vuOHRWSI8nGb1W3oOe2p04X4cv4GDeE5YSgFclwFnYh8uf1GkT9X+Qt7QFNDZpLV7QVgEHRJxE3HM8m0WF01oY+0FFEvJl/Nv6xMvJ+Bmizdh24DULIkNPOJex2sdXrzI9Uh0Y9vqBLjERkzpex/OPyrvrVo3tMDAIptNrfV0pIaYF8vWp2NcCJ3l0D5N6zAgrd8XS60AjKPS7LEo6Q51T5KALrzeNUwzAC3ky+5TNJKG1n0u52lmiUsp9rwFPv6JRMpjTaKlsmP/q8GsjCVHooGJDlfqhUBdCIMCO5gyI7S6pUSSqhQLrwb3ZEduNBdy3I8Ne3eUNGRaa12Z/vWpQx4HbWp4JEWF5UyNI5yLGM0XZrrL+0Vtjf5J72mvYipRDpgyYsOXtx0roLjXGYdVNyzv/Y09CxkmM6c/rgC44z5t+/USdv9zEH9jZ4IGzthU0ioKXNuMJfBkKpsNAcoqWr0xD3BqCUFlYcwl23shvVGWUReHN+E1f0VhSLWdsRmCX8WA9doJdhYhl/sqLMg39UtjIkt/FyvD+iEgLWQUideU8zsRv83E6mEoBmpiGA3wBes6FbI6/gQSJJC3daqbIP8uY1OiZauj6m91iYMbGtxaNfEx3mAde6z+51CAC04xt8sgkQZ8nPgIlzi6GGJffcVfrjsI+vO2XNXbSiW2uzzxWOxAvJY7NVdcBNzpGtO3TAJOcuZVU81Y+TdT4ea7O5s79itfL71YifpWVoypSnPfQOfx7aIrEYr9C2m/IQd2artKg7rO4jbINWrsjRkl+XOSLuaJhX+ygvdpTbDkPbd7edd4th+fieOBr6ffOcd9tGsT6hVBJZGVmsdQNQv+9biG8o9+la3tvmWxnWfvsWsHpJrskf+ViPnX4OkmjR5T93QzaobyN9D/wDXUgVYsqOeQCgw89arrZ5iMs93+9Da6IP1MNzee2LaTdnuPzF9zb9czy+L45phxKpKnRnbnrjmNIClts3t5Oh8bRRrJVat6ADvTuZM9jYJuxP00LfMKznU9bBPTat1mb0N7mpd1m7itlS/sl6eEDZ8yMyUb4EYmg38wqhLEQGYWW+hgEip/YqbH0HR7bbgdNRgLENDbmxyOo2+uicd3ZuBmzm0aI8uiko4cQzLOMlrFvoa1wm7BIWyqEGPy4HVDWuOe+KTMm796D7SwA3bbhkUOgg/IOe11rKHOi4HqMnM+DUTro9WNKuzw5RKGpnK3Kn6XkFXE24UVTwiHCwG65pVG3tYNMrIBZROc02LRiCQ0lxLmGyBikD9sL5alJFJhqd/jOzNxSZSXo2IubGynPKtzOL6rlbz0NxUTkqvq5Bj190wIpSzAljqIk/2FspCUae6uyUcqY2MaUNOzrC+lR6BI0KPSDTmDVe+qu436BmnvGiQVo8jcpmeqLc6IVfRC4neR5C4wQ8OOzKR9txAZJ/dliafHbvOofDmGISIsUW21Zu5FOF7xciVkDdiRMb+sLqfUFSJ+tnrqui5kV7sNRDgOIhZXA7msVg9wIg4aKaH5mAB2ZJ+ceTkDF16jpqoJjcszx2TC+vxx69OP2zyv9oCR6GnyTqdCamNvfkMFRlVQGO++nMYdpo36+u/ZlS5isvUhMiEGTfzagIxCZZAcj6bm42AvHWerdtLpkfo25+//Vd9uvPLv775effNPzb25ifqP87+SHd+//XPzX9rbEUgjQGsHStHfnB/+3t2bRSdTnmafBDvmF0P7Dmptev9D4J8CMj5QP5GuJjISmQfBCF/I7Iy0SfuykziJ9+JED9VAgj3g/ggfpszEY9Z0LKMWj8C08HLyykzRd0JzrlgR+FCiuwc8ZiBc0GSvSaQgAzdwTi7SRCGWyb2qJGKlEzxghmmEJAG0MvBVAPSgMD+F0QeN1k8cpg0WelayADbDbqZSnVDVcayy8/JJjw583HmdZtYd1yjn5y9rFTyYzfsY+vVdrKVbCVNKy2ngl6iOjUQgzk5OD0gZ547nKLm9uzeKu2en6wjcN0vsF571MP23PERuK98tzn/lnb8h+bQ+xw4GEg8p8z8lMsb4HAa/nLBmWHcXM68Q6By0Zl9a+rW020iWixXzfuTDE5OXE1gkthxSbPMcWPXa80yWX81XedUuIdjA6DPRkejJQwJNev//vrgFKnvj3Uu1v/ALwxFf2fUgo4c5FZWiGKmESDf9ITYiROO1kL4G0tznAD0EVQtz2SlozEBEM1E5ty4lk3ijgar7t7mdrL1B2EipaW2Jx/kLSs/tmI3WsrP74xdjchvXDE9p+oqWQsovy+swC4gcasb6DgB0rvBBY1Ak87RXzpuIFrBgPrvW6fM4WJuCyO4dTkPDPYYOq8B1ZLJgkhIqpMKaMzJvbquBuGPXXs5P0O46m98yhtglzS9Yve2jbzd3gSirhvkk4Rd926PuFv/0iPw+h9rzciJvv0i73YzYs7z6wGkrNXXLz2jrKVV5DzsYwKy5IjkwMv/SVOrw4XgjKBbfns6U0hCCHGmHuohUHjuzqrf7Eh8QH0ZEr6or2dnl/jvOE98DIkXc2sM53RhxYIqK0fEpOWI8PL6xTpPi3JEmEmTtW8P8yZtIX6gNFgXnvj2/ATasuQovt7E6aqerF9bLCYWdzuIwcg+UWqWjkjJC0Dot4dOC3QDn9/zPfpXuEGDm9+NAk87++jb+Lu76gtGMY+d5uglg95KjpeMQvF2LOzRMStip8YQSJcxw1Iz8uNjVA4G19074npTxncKpr3nsKG4btZeD6nhIdzHlxXEQSn0y1fQ8B2W2mryLsWUzypV77skqhLLI4BoOTV2usSXsmmXOfT2ej0iN2wCGiBn0JjfqAoS+xFdXIqNUsF6YVxfcsXLw7Xa/IM/wVZAdsPGIEUzgn87lxo0gM7QFqsHZ28canTyQ812An1GFm2KnT5vMWi7e8PHHPMpoWLhmRxgHdepA11oH2qJtKFr4f8OfMMqvA4WusyTNy725I+KVTgwOb54DVUypQAS8savUsmUaR1ZL8IwoZ6rYuD+SCUErFnJzOMDogOPD88fYIVncWj5o+uX/rgnLqx/LlGfqyPYwSQehWmjmg/tLmkRmcktY0Sa+FOKZuqtkQSj7/h04fMHvP2LkHOMxqeqaFic6qvG2cTbul0rLt/7TDA83+rzt4TnYywMNWwmFf+TBUiWvQFwAUlASfIUpv9gza2Dw7983H5nxd9nIH9nQd+zLBcv4TsX6TqLskx4KNuIY8PA5+U0+CKCse6O1REjw4GKeTCkNNSeKaoYBNa5y8KP7Oqh+65aI3LsXB31NXT05vcR+eXdiLxmM/uEVTHbGD2rJjlPL3EYtnTPt6fCvk+FfR8OUu+GPhX2fSrs+1TY969X2Ldd17d5qde+mC+j0/m07eGVOj/T96vVudGe1DryOdnXHST+5fW67pK/d8XOr+h71uwaa/jLqHZ+VV9Qt+MilUUciPFpul2dj05x1KZel3h21dHrQJ8Lo96j1x29+X1pVH5ayFYdklVXuem/44epBf/m4PB2ABrzDymlH9aZ0V0khM2qo0LhQbDhu3DnON47vNmI7p6zvJxWeVyjt77upnUkUHBWBAcCxWxJlteFbDCFU6oZFfxPlKkbcRFCxsnekPnIWMYypwBgKifClbOpIawozaIn5vQS4vPOf25sxFO1effDt1aB/Kna/FO1+adq848M/OdUmy+VzKr0EYv2ddJ13Qy33FwtEPX25mYDPs0Up/mwMdVed3eTOc28KVoMVpV/7srqt8usgXWeGkogYgLEwamSRTNmTrkGP1En1RCrXY+0KJlO+krS+Gh6Na7FvbG/3aE+TabhPyX8B25a+EPmOYMqNmg/sH/VQQk9OYIN7bku5xclaD0mUv8OAy9HcOeLggrTMlb1nt/H6TnpNyViiHUBkFpWgnd9dFD7+3tSKONxfCQIE4qncyQoCAFpVMwOeY2pLEoqvNRkxUCwpzaIsZXkGOdU6lDP0IqSkG1KlaJiBvE8U54b5qy9UH3ZC4lQ7gJCfgU86AXNAEa9nodUwPoKleKb4i4ZTDX4eld9TFteXKtvvgbZhmvqHK6pe0j3AoIyPf34kgP9ZCpbN+Dy1R2/S63gSSVo4eh2leA71gf+KhzikZWB71gT+ObVgDg5xtf4ctz7LPrqTqZd3/m382y447WhORauwuhbP6uH78TUpbt8x/Seofxro+DNQgKLGIfmf8ajQtGBMLQDBMd0gbD1WIb7/hVpdIkvVbjh1mblj7bjbk8e3Kd8UvE8uxyWGlcPXEpk767ZUw9Q1Ns0dfmQjiwCnwlUEb6JCriGlNFUFgU35PyXA4xSEBiFziCD2g/RUxBgujN9yfZeZdmLrcnmq729ydY2Y5ubm5NXe69evNh78fLl1mZaO3jvMWinc5Ze6Woo3nTohu8gy68Q5M5rpkKVum7W7N7k+farjL7ae/WcPd/ZfPUqfZnt0Ww3nbxKX+00de1o8oFWdNSMLoH06iYXCJC/LZkIdXiUnClagBKcUzGr7NqNdCSlwRW7oVjO6SRnG2w65SmvQ85JHfDf1A8QnZc6lW3d/hGdhxlsjZiRubyJFwx16sKOuiC7SjO1DiEtIzLL5YTmHbzg130LYcvoOxk1/S0PLOODLOBe+JqYy3nKhB7M1fEah3cFkzFXvI05f9ibzaMIJTr0IXI4hZglN2KssilZkPOzo/8gfrrXXBusH1MzI6k1n+SszrDXZfYRsuvdkHpjrctnDkqazlkYeDvZHFDS670ioilqypFNwYqaoTqEnVEzjyrx+H3jHYKKoNuotNoA0t84ZHlO1cZMbmwlW9vJq3ZnFCi5lQ6Fwl9kYUFGm0WYjLx/9zq4u7wEA50SuK5FEl6XKL296mAosyItL7PEtOx9YwWbJVb9oIqEnmIazUS698j29vP72pQ+YkE3ZxDtygLgrnThSV7ejEkM6hXbmUe+qrqZ0+YjBRW0rvBMXM6yzwTbJ6osRiQrr2YjMlHsZkSE/WLGihERFXz9T6q6Z16VxbLbOKwk5je0OUvcyWQ7eRUL/025/5j8Au1iPkXy/w2VI3ImlbGkT44/srTCP5+dHa+F+q3Li9VNi+QgsT1WZHXTNGzGlpZGvtpfRqiBp3jO1q2W0NVeodyZnBpyKFUpVTPZ8h6SGF70CkvNujLYA1d6RuMw6HtWZsceWPcIS2spFw9c1ovkefLqxeZmsvVyZ2t32fX5CtOXsNCh49DsKj+HRs/PDk5OL5Lj/zhedn3DOgjDovq8hA9c3Eo4gR8+Hhx7ZgR/t23RK3evPlp76qNdPX+MvrrbD7OUYcRP0e9FSamoPSl1h1WX+dps/wT1Jv1whGcbESm6Wl+N6udgcB/76UvotDo1VucydKF9EyicinCjWT4lVITdtasqOeaO2wdRLfFlwMB6i+DWwfTLWVFmQ4X/rh4oRReuihUgiaoZVFnQI7toBfQBeLQLohMt88owrDQaRdlB6dVwr0WyyRu6IBPm3FyImVJJw6ACq9Acuh1He9aRIdzHdZSFJ1xs6NDEd52s5+FPqyaGD1ubif3f1osOIi8h2+ZhAmNLE2NiZuZBVXfEYscGx96iv4q9C9uqsJlvXOHClZmzKLCfJlV6xQyhguYLzTWRwmrJYcjC3shhk8iN1ScCN4AWrlTFZ4i8gUKG4YUCNySq8c+dOo53hK50yVMuK123jO3IdTvLMspUZuxS85mgYJdjH7m+t97QRMqcUdGH+x/xJ4ywL+2QkJ9PwgxxjbA20KtGVWz1EyHHlnyDncL77IQpUwYNWr47YE98Y0RbvkVUqhalkTNFyzlPsXOOro9zPOo1zXkWZy1B66hKGz8fec3oNSOVqOsmuBYD/tX6FZ+nV48fhr2hmlQCjISh+XRcOPndu7fvLt+fXrx7f35xfHT57u3bi0/dsgrTVAbKsDnH4RuXM3jnoPKvelRJuLUyQPJSlq07ztLquZGKaVckqd7ons0j6ZzyOFT173bHUXaoX7/tPc9yrJwC5S9Yhpk8jQ5Wrg81arGQY9Mo0TFZQElXjdG7wJlYvkBjM9ofkEo7BPVZpx4o+zPR3M+zIHiEzzi2LI24F1qurWQ3o1xo07hiJ1xQtSCuqWyzZm33bNLGXtxz8B6Kp6KgIrtcsoHU1/HPNvfhpyrPPdzYsgpICe5L15jI3Zlt97uXesJcTvppST1I1DTP69u23fyscw1/ulzUkIfIOhRFVi25Z5kkfYhlGrD28+1xQW0pH6XvZgoZMhW83lyHwTrdA4OmwBuCleF0HM1XX2RTcgMh/40K6WCIhZxcDwgGIMDhef/+5Ghk1aJCCq/dkJ/fnxzpUXw/0qiudWGPn11qvgglprE0cKjcA0657qoPpdBGVanB/rGoNOQLN1yMOchhsCQsBSmVZYIpuHwKbvgsvmTPTo6IYpVmjVLade1rXxprCt1WcHnQN8DqkCNC7VWl2yFnxGdPWuxJbXqYbbqd7uzuZq+mr149f7m7tMuwPkPfLC9ZPtbjoKUjxbTe0JHuOM8t7HDzCU2nuzGQdiAUUZq6S51MjqXTmVVEoipVvSUpo25JEytuu0stBN/Wk/nzjl0nsP5tbESw/wAX7nEabble3EsQkT2KSZHtDsTI3hzt4hTdSfWcbg006/kvB1t3TLu9+2K4ibd3X9wx9e7W9nBT725t90z9FwkGW/UXCobxNSQEy381SV1AA3r4nYahiOYFz/vcLG2OUVJlj+3XsRsNYvx5uM1nGStujaYnq9CXtAo5xH+/xqH+BTzZiL59G9EtO/fXMRX1L/DJYjSUxagf30+Go/vQ9WQ/+kvYj9x+PpmRnsxIX92M5Gnx27cmDWMwegiKnkxKy2Pri1qWHgjWl7M9PRywL2idejhwX9B+tTxw37SF6wsZsZbHVjlbSt54UOT3SX1NOo4GsVmRpYvpBoOeMDu+vRYfutllG/plGs/eEbMeoty6ObbbO9sPBa4D3WNE1UNXcIe5VVL2g7r1QFCB0S8B661ZPlYf5QVrbKsT67t2ou3NrRfrm7vr288vNvf2N3f3n+8ke7vPf3+oBmTmitFsubKGD8LyBQxMTo4egwwclANG8Dpwe1Pacfb1pYsteqC5+V5kv8BGAeaWVGRpEb4foWKAfDXUlqM6UCumaxxSgXm9E1Y34d8PQ0YV7AglEyVvNJT3MaAxcOOA8BIoNPmhM0bSStmBcug+KCITwLL7UZUW8s8QNc9ZKkXW5Luh9VFVdpO5n28vHaruYLyR6oqL2SV2LJTqEZMrhqQfSyYOdBJAbzshOorDXBZsg+Y8XbrgZ8mS/yVJJyVL/rp5JyVL/uqpJyVL/vLZJyz535iAEiHgWxT8A3BfXqwPU39toT3k5H5DInm4ar+iwN2C4VsQpwNI37Sw/AlRNd+fJO3x8/XkZA/B9yMFL08YjyAi11UWZlwbhxWX+/gu/u725MefMHnRNYW1lOHzwv0AvoAfNEsnS6YGQt44VCcYiJ+svnXCFNZAIDeKG8NcauWEavZihzCRygyKaoXN+UmqsEDVXWBdW+qcmb/TvGLHH8H7+Y7Nfq2YWrjvRk2PP6RP6hJpXNbOO2hBhQ69cV5e2u/GSQh5kb41wqQyXm6px5wwY5giiqXymik64Tk3C4CldkfUznF78t8d/3z548npwbt/4MqZa2vd48j6/dcfq4PDzYO///rjxcHBwQF8xn/+bVlhB7YYb5/7gqM+rYY+xgRgnRu7vVA9DeZzVXLrbT0LiKCaWB4JUYB9b8K+uD3yBJAAWWjoxxOGdM8HIoEpyTOL5PPfR4Ds4/84Ozg9ujz/fQ3pIXYUBRh4KNxCoGSqq/OGU7I/KiZSbFTgJgQCtqO/ef/64gTmgrH9cNAjOIx4TRXUUSI5hPnhsKKCPnOw1pqi7ZhHv719d4QEffzz5a/2UwP0iPrabYixAWHKC5oTxVy4GnrOnrFkRsYrWyvjHrfW6n+uHO5/UIZ+UCy7NKb8MOHiQ7GgZZmwj2zlv5a22gDBDVTa+dxQkVGVNfcbL1THRXyQim6vEEli2VXM+fUQCziYTBS7xkq/oBV5V6Sdr3ON/PLvr98sC/AVWwwA7y/8mmErcn7tPMxyakfq3nnnb3+6+O3g3fGHWmPzLPz04sMhyi5/R5X+w0lhBZqfeKhnYgkUm9DoDzdcWEAt3S2t0nUKLz3K8iFox44dx+TYrRrZ4eCEAu/u27gPn42QcMx7EPPhiE2qWV1z5/4CORGcQzXWhDn8Hd/tarMUxLWwVPe/D7JS/dWddSJCfLRmxl7hBaPC2OtkSlN7QVPDSMmvJca6KOj5SknJWWqX4uGDmjruA4RPwQMa+/7UEbQuBltbIRliD8WClDlNoQO+vWGOD89d1AK5iEFwQ2sGtSfFzPOCYoSlvOvbSU4hrgumQFnB3Y1cRUJNrV/i4rkgY4fFZBxWcmAZZKqYCTFKFkNxP6CRKw/ng8uhYtxcahM61quRD3iqKcK3vB2RNOdMmBHxj0I3PmzHlPjq+NklLxNyMsV65mXJXOjayZnn20bW0PNyPMJ6HVh3SjikAcao68JzckaM4tec5vliRIQkBQXRLK4+xw1MRhXLRlbcC9Hy0VT7W6+2k81kO9naHT+gysac6qFKvx3kOd4RVM+ZRjKQwiJEecJykhWGDHryh7Y/NRepNKqXENBf48+NGuqicEE0N5VrwYcV5xayWlWWFHSlGMSx1fqWA4zQfCYVN/PC0tMzDLdlik0lvGEJyrJMuPQCAGvLtzUsl0Buf68riz7HoE7OetHXVKP1YE0x/EZCrKSd7XZo7uePVd4oMvbOf76DM9pnfB2c0FQqig8Gi4aLyMNAQbGoe16EvhJ0ZgV+C4CLjvYhi4TmTBlNpCISCsUJiYXKYGG1JuALw9kpovBJN9oNSOderkUVIAIcL2K273mKByoruAZ3gRUAlcxD1Wk9Cq05JTIycnJ0vnFydl7/ENpvjcgNm/ghSwwfx54P4YFK5S5wVo8IExmojyRjhqWYUiGsfGpZsmbk2fHRuzVXTTqEbTKTPqR+T2Xm7Z4ej9cnD4p6xj0WoLlmqVmVSbEIdXIRCAg3hb8sZ5AkVYyaqNBw2CtPWYEygCs16LuTpHVuqFp/HfeCva+KAPbmG8qneFA3/0MaQPHGDYVLdDHArqUHcliPhIAVy2Vr8vCxxL3IIAfGsKK06sFJJGO8ZvRqaf1rcPfjBTa5b3seYePdhns89C/yx1ymV0RZtVobkGVK6GRPjk7PMQL4l4uLs3OyQS5en0Ngukxlrpe+K4YKIz/ANZ4cIaPi2kdHW9XbVfeCysfIO5FRRlJTbWHwDLKXcB5EMFubSwc8DVtiOFYE8luqDd/OGwJqMCbXCu00Y3dUfHX1gH0d4CWWP6jbpNF/HdcJxiqfYbPcuXj99vDfL49Ozy/tIbi8eH2+7NqGLuC7+q5RtNdIqy7cnU8Y73XY3d77IPxq0WiHT6FpNkedDbtbiEyq1VVNMplWdV5GczZQKOzJXF2t6UlIU1PRyIq/aeSdoSTn4grWQwoZ9ilHhwuiYOKl6vqac7V0Qdzp2tJ8MWImkht+xUuWcQr1re2njU/aXitrsaH89actytXMjEgpc54uRiiboEyArlx/61pFAU72g25/DOgvWN0NLjYhOfPe5Zlj+Zc/oZy1LJ6q6hvh/WB5kCoEAQQcwZWg6ztBj1qXAWd6qeugyTC718LW5ib+/9IGokGDei6iPkQbRLFrrtuiw4TZVQPtgF7vctW7S0vuWVPU59B3E3ZK0nn9zR1q0oF7zm6y7wBItfNFgKnF/iailv+pFMJtzzSI6qj0EMVmVIHhUDNQUPQoeh73f8LRtYj8dJrLG/AoqazWmX6SilwcnrlRsaOvDmAibCnj13UAChfccJqT83+cQqFuZp7pNfejG9QOWMOCbgmkxSB0tWdyDDJfdPDxQ80FPF6MokJTNzjY0JwmRGhqKswvc91HDFMFWQnjrVj+AbdaNKyHQrQA1wnQl/vZ6YmOeTPfkKa+LLzhDVv8UJfypltTxOtwVpbzxgSoQcMq3IhRFiyoof+sBBIFuGbQLube7husRq2QpjPkFFiw3cZ1OJxtpfoQh9/wS2h6f9DAQ7OMaFZQYXiKjpKPxrWvZh/TORUzNmowda5DB2sjyTW3y/W90LF5oYBkX9qwGnnLngpzTK3q7McUvoc2XiRo2nNOOW14nhOGhibMkHUt10UWmxkBYVMedeigZalkqTg1LF88RL1Gu+dQghO2CIWrz21M3ffcriEwmGLCZ5WsdL5AaoZ3ApcHj6IO2THQkJQKcnI2IpRksrAbAMbQSvCPREtLJwkh/6gxS/MbutBoWm5e2fTGw+Tpfpy4L8aIsqaMJqwUVTtRs8pn2YPRNuHl2IIyThCs8YhkrGRgnybSyQyk7vwPVlmuW8EsVCdL96e9LZ7FJf3iOITm0ICqLq9MKyOFLGSlfctDwHv9dQDQd13DgZ4dnJ+uddJs7b3NaDqvbU2ISgyGZD039O7Wi1ftNTeaXX7T6VzLR9D09rdsoOJnKWc5I69fHzbw0ROYskwwZPxas8ILhKBAaihU7474vSMJZNHdrdprNv9Cwr4Hsk/ybyM0OH7TLD1jMkm5WQxVZOSQm0X/7ryRwijW6o8E4EhhuGBisMInp42CJ26yDnynUpk5OYBgCtoDZCWMWlxyLXtSlh8HdTgFOTl/C/nFHQgPD24Fa6jddCD1bughFTTrYsr357sHnBmTl6Cc9837WooZN1WG93VODXzoxtz+N1nJpVjZJ+svnycvtnb2nm+OyEpOzco+2dlNdjd3X23tkf9Z7QA5oBFn9b1mat3fxy0DJw3tC0eEoskBpTA5JTNFRZVTFZc2MnO2IClUdrBiZ6PQgrs3TdNoxF0b55QJdC1AtHwuMVJowlSdFO9F2/qGQvByUs4Xmts/0LA4Iqk/1nEc1qk0Fk/2QZTAsWt0ZWQBF+SMydCssWPdmEhtpFjP0s7eKDbjUgx50t7BDHcdtPVfD2+Da6Cj5mDqPWm/VmzS6oPedmR2YOh3Yq7WHvrQMst1X68pCx32rY7f5OTsesd+cXJ2/aIWPlvyVkHTAXDz5uDwNqhJwzJrks9w8K5eWDXTKV6QchErChPoX3l6cBH0b1fxgTvJrD6zkpSKX1PDyNGb39cimbd5VkCbyyXNyITmVKRwWiMHoVREycoe4haS7TpLuVRqw4NSCGIE2PG/YRSgBvsAqa7Th4uZT5PhWrkunW34zDwbh/bbSBwDFpli2WWf9PiIfd4gmHA2Z9pEk3oc4dwjWEhZsiyAXE280Bm2POoRO4oCcWE4p3FOpSIrUymTGUjwSSqLFcI1WYk+t6sIohfVBRdlDGu7QKUHlnJtNSrXdwd03JxfuTQe9BDqajrlH8OI8Aw0ktzf2MBH8AmrSa0l5ALDe4xE88BHXgRz9GSBXU4XxNCreldRJ86pNsTcSJLTCcs1qt9CGkgFwFpGdu0Xr490iNxdSWVSXa10b8waGQ2SMLK8hO3/AhTBplMGJezsrE5ycXv4jF28PloboUvkSsgb4W1hDbCIQ/3ImxsBRSWtyd6NhykwHeJpzxuGtXisMQTU832TDZDMbRRTb8RytAPfN8im0kwlw1JMrHfVOS8hcily4RA5vY1jUEFeHx2c2avgAFd8FIaKSWW1uzpWUJ4PtDgr5BOYwEsm3fCvZFrl+SNn/n4184td8KomdkkwHagRd/jV8wlThhxzoQ1rNd8H3IA19asRIDrUBqdAXORgzsTbyxE6h6HzJ4LdccMHsvUQKsI5oFIc7wRO1gViwNBXX7gR+A6EmRoZde2LIw8wFhgZlCBUSLEo+J9RcBqiMHx8j6WM+ZSMYRXQrU+5D3Z149BkMJViinvVjnYQUIO7dtcQX9mxj6juzex+FFIKmhbM2YXi8dTgr8bSzkM/coKFqLnoLjriaRR4Wssz7MuXRK5h/9XdTSj92x1Ho4l/w2BJ0FHq+KeMGuqAu6GapDLPWWqijuuNVpWhTeWUiwxpLVB+LmfakXyooennhrQU9LU/wA/GyjkrmKL5gGVYj/0cMevz8W0e/Gd8CjYMLOi+1qlCngHxgC6KLkvtS4UqBkn+Guuwjt2AcLIzybQVx7oS1h7dme5ubk4byBjkqPZUoQ3xD0JghABCjIFMNTVBa9CiVFxH/ExOMdlEyIw5c2FjybWHLmSqA8GAXJqxbnn3kLPaKSEbA+MyYwt6xTThpu7nH3PmWtK2dGoJ0jdYhYMhWIdqmykb9sBY3YKnVU4VwBuGZAU3vmRyO4LsVBrnNuaYWyKY62DAWP2CxnPZAAPiwmUD7XW8ZuSgxshvvKGpIWP7nrsu7O0BHy32QX6iPQWvs+cv2S6bTNkmZS/SnVcvt7MJezXd3Hq5Q7dePH85mext77ycvmhZjgaxXTYELU9s6NePuBNgqxWmJ3pehDKr7mTCPQyJOY5eaJ7LG9z+jGuj+KSKI8fdGC4FQFWQFBFMmFDot3n1o0HCR1toQyFBFyxd9QkRwcgegX+C36ZUwwqOrdLGU5cR0zhFXgpod8ZP80qbTrt7K3v+yKjRfYOg5uguOKifXIYqAuFRu5HjWl7BLK6pPRiA7rj6dJeuWLyOdXfcmkQkMzaoA8VTEw0kAVO2+ExECeZGIi8KpGRH8C97ruilYfsbHNMooDSusAFpteDEx7SjUbQJfumBLdb+j4mvmR0GdddJgMynmPnRlqOlFkuOQOhSVAsA+yzueRRd2CRUR4OJBcFO71O1GidZMi1WV2upa06vmfempqw0uLgwG0IMKPbClQPS5StFDWeipA8JJ5qLWcX1POxafSjhSNv7glRl46p395zUFlQSS9GuzoLDi2DaW6wDS6iHb3GhJtXUDMZTzxpZR64QcOwWVVCBIWma9YgJfr71TfdPqzm0jlI6H9WTi3nCOH5rrU3pfqCcexB5fcTzg+8JeDGiGggLBh23R55tyAnhho4Ec7+SaJJjv0EnUxxEqjAGVawFXfuE3sJ6b7zkNG5w1fE9XLexHb3xtI+zI39vFsbzGxKC8hq6RXdXah5sJMmlvCLUXkmYiccMNkNp6RZRLb7A3bvYeJ5sJzuxngWxew01q/7mDi0Ln7o/ktMHB2JPA3AObTRFwuZIUcjmPcGasfvMRWx+kyGFLjjyKaTwKaTwKaTwGwkpxDPpK0zVjOQrxhUiSE9xhU9xhY8D0lNc4fI4e4orfIor/K7iCuGy+O7iCh3UZMi4Qne13xNPR3MXhFafWhlC7Xpj6qJUNmIUBWVLzL75GMNb0ZF8Jj6+wRjD5YW6Lxho2EPzXz3QMBY1nwINnwINnwINnwINnwINnwIN2wT3FGj4FGj4FGj4FGj4LbO0zw40hJ4pCIxzgF3U39zhAHP9HiwN5lRrPl34yCVs8g5lNmmaSqwsA/WrcC5i6EcpZOFNRv7itzC/4UYxcnBx8X8O/51MFS0YFOXtDT6E+hpSwTqbgLjZQTWiobYqV6GKJ+h+bsyTo/MROf35p99GUPVyzQc0hA7iHlz0lOAaEgNdxZO/ARS+erMbMS5WavUPJ+yFslRufxw2UA9d4UVJU7Oy1pyFpXMg6uRvXv2q1x5qRvv5XA1bLkCXAXGNpnMoBBUqQYINzYDb1dM5TDWCHUpTWZQ51xhlNJM09+BFVUSFPfpWt0Yf68raA/yOYUu/AI92+A1TBu/+tFJQQSgUz0SbrSefhhiL+wy/h80IMZHMqs4Q5we7RX4KU7mxeMOuTLzMHnqLQcAVlM0Ss1CClTAr4GMTCkO4mFn9FRvOS0UUM0rqEiXnPAKWzma4PF91p3Xy35xcvDt2R6upfCEpD3bDW3rmqF4jMhvU6HH3D1c821dbijlBWOQbahT/SC5wnGbx01HctSghz9jHJNS5o8bQ9Cop7JhQ5w4h0RsXB5ubO5sbYYK1NtbwgT58fSFJI8S1LI+7Gl0xN/3yuEOW1oe7oYtBXsDp9PUgK5V/pxh80Ai1vOEvjS9xpANTbOIV97n/VIf1PjpePTB642Jr59Wru861/f0WtP1FtN1GEPR3uk23ix237N3X4SxLY7chWwzEXJbH7oPGCLh2ZfK8tuBqxD6kMxz9/9n79ue2ceTP3/evQHnqW463JFpybMfJ1dyWYzsT1+Thi53dvd2akiESkjAhCYYg7Wj++is0HgRIUKJeSSY3U1tbMUUC3Y1Go9FofBpQs21YR8exn7Cw5HrjX2HQasBHRAtO4gn4ZBQqKQEoZTxH+IFRwN/vRyQrZgags3LYJAlfgpPBc+2sk7yQjpqs/LpCbbqQZrOdVWK4lVW8aBqBE6nQVmWXUs2iMjePVQquJdKGwXtzO7q6uHx9Nfpwez761/Xd69H51e1oeHQ2unh5Mbp9fX50cvq3JRbGcC4RLCzZ7UgKN1dv+7oGHS9wGvVxzFLijBqD5HqDdK9og1C5UX3YA8msyqSUuJ598iWMS04fwEDeN1kahTNM03vEaRqqiLddogjJYwJ5B8xARsaUN/N03l5fB0HnQiJtlOxIxOe6gI8ta6vzRna8I/1qazODbMz2sVhrDKqEZz0KuFDnH+7lsQnNeeGohb4JMzMJZZ6KDs7I9NcbqBnmsyCJTnY0PheOgUqnJM9ysSJWEMxvL09QRGGbyCbo8uqDGUY3wxsu5HWYOa/krQpOeUHSUJ0mSdBdiDvKAk89ay0zh1LVoMjIYFVJscwyksMtFJBXfYoMXj07vXj26uji5OTlq8tnl2dXZy/PXh2/fPXy1eDi+dXFOmPCZ3j4zQbl9vX58E8/Ks+vnj5/evn86fDp2dnZ2eXR2dnR6enF0eXz4cnR8PhyeDm8uLh6eXS+5uhUK843GZ+jk1P/CBkZWncKNh+hqlU5UtuZN6dnz16dnp6eD06Or14Nn50Pzq6OXh0NT4+uzl8eX7y8GFwenZ5cDS+fnT07eXn17Pjlq6cXz4ZHF+fPjy7PX3UuTaF4pJyXO3N5Lqs7Wrr4pPD3y/HvJDRH65IC/Rd4ct71SEFLN0apLsCLdz+/nV/KI7APjBXo4ryH3n/8+Tqd5JgXeRlCbPWO4KSHLi9+TuY6ceTy4medx9BdgL/jp7tax9WhEFwtrtLzZb/q3qlwqmfsUeZoZiQXyiaU7Pb2zWHlaCM0w2nEZ/hT80w0OiYn4+FZdDo+OQmfDY+eHZ09f3p0NAyfn47x0fGq+pSyYoQnRSeVaqulf4kLcnhHE2I7y1CyV+GZO14BRymDfCaiJmskprI9Nz31//ePBkfD/kD8724weAH/CwaDwX8615y1+B3D1c+vyLDyjTozO3z+bLANZiWi25aTB2rl6jhDIY5jYS5TdPvuWlnVgsSxA5cvz0ZmjBepqu/XrAyipEc5wrLGlTq4UruqAP1LyNiy2uJNp3BLrfjxlAixZ1RdErJz8tQ1oYbwHx8fA3VjLwjZqgKXpvJbmueGQa4MsRHLUoOczHWFzvcff7506ulsyw7zMpOHNyO5pd7VVTizu1Ld+H0HZy8vn8xIHLPWfUvLbv7o5HT0y8VbsZt/enbsefvq4rLD+/tBEHSf7GVeL0S96yCI6LEqwwJHlXD7Xcq4J22hqo3oS+zhJMyOTk7zzpVnCC/wOAbF78DpmLGY4NTH0Ev5E5rE2GGLTnSwC6Vkygoqtf0RQ15cSDiflDHCqXWnPccph/pWKqaWIpKG+Rwq8xVlmpK480Y2JV+KkQ6vfdWhNDE9WVpH0k2iAN0QObCqmLCVJAn3C8/fnVcV1p/oOKYwnhSnspQV5pxOU2E5+GER8z5wIrx5wUNfttv6Q/BlViTxTzjO0r6msU8jflDbX6la+5X7HrNHOFnmTa0TVB4uLQ1k50nzMtmpwlFeC8SCwql+IX2iinWlMtIlvq1paWc1U6iz32XUUNG2atSwydK3ihq2UbLrdW0HUUN7LNYag+86aqjI/WGihnq0/sxRQ3tMfoyo4bcclW1HDWuj84NEDTuOkL1Z/9NFDRWPO40a3q4UH2zEBaulwsLE/wbxQdX97/jpzrai/gChqvK5rQDh0+fHx8dDPD49eXZyTI6OBs/GQzIcH588Gz89PR5GK8pjGwHCO5qIDVySNeJlKjj0PQQILX43DhCuyvBXDxAqZncbr7rtHJmqmWSPCRA7Sz2zg5AlOzEBu61v+64EnBDnnqJeqTKcc40/Jp6znE5pimO1v/VoQHDUebBVJ7sOMLwDYE/6B4nkJhxWPxNfgHClzeYyFotl1fxNPlSOQ335UedEWY/a86IuK5BR3YgfsxbSmP4g2h5juaXJWTmdsVLPHowSGubMICzn4YwWRGomjmOxsRFb4AdKHqudVZXwryaBRTiyrk6gnHwuidix9isl0dV7H8lY/663T5OcpUWfpFENG68v2PlcklwsPFA+X/FRYTaMcfjJ/nKFfCxB/Q6TXtvBkWXH1X2qc/lEkssr3tQFGXkjtyo8rPbKYyJWHVSwKRHeH3iGpsnqJp+816UFLhbiWA6eBTxZkLyvojrEkmTjSu3xePL8aPL05Nmz8dPjCJ/ipyF5fvQ8GpABOX729LQuXlMq+dsI2XRfE7V+ru9j60v/BqcG7mQkBPMyV7ANcMHHADvz0joKEh60kS9kK6p1oSG+wWAyOH2G8WCMnw+Oxs8sq1DmsW0RPn54s8QafPzwRuc/amhRdUYBQW6Yp6Qgqsw9TLyPH97wHqRBqje1xRIyGOcELmWjiD2mQiUY4uGMJKRnkA8yXMzU9wzpOF6XibbbG6/K2da32PK4V90Nd4/H9lycW84SopBmMcgzwXOZrKsC5Nc3gttDIUIhV3mdNp73QCNYWRhUQdOqvMF/rU79RNvyCr+FSSOROKdMI2/cq6M9BSLYUBrPCZ85ZtCR6F2J9m6mkmz1fU6uwmDCOOnOPW6Amg1GLGUe11BUa01QLjE6OQGcc1qoiGdPjGLKCmEK8znkT89gvrnf1xqPCYZLhBnJKYtQUvICGhkLWxfGZUQiD8yC3CPDy2OC9rJ0ulfFOcTne4F41hyhTK2A1qW1aVKBw2x9VG5YXlhgqUIosOWR6vTTvaX/Bcv2asK5/+leblpcCApNdO327aSMt+iAfbO7DdcTeYtfmEC4DEkTMaXVhUgo7F5yUk3YuRUrATDQao9DU3Qv9Fm0dw9nhxB7gQmvAM45yonYHYGrLzbJud47aIfHxS21UW886fauBXhxfPz0UKLz/uPzzw5a708Fy5zR0xPyBxjB/Y9pwiJAiq/sDKg+R5yQ1JFsE/HLKqOQGvTRhKW0YMKdlxaAjWHljsxiMCbC1CjF6Uk8csxtVcBw2Ao4zbIN8SncIChIin4vAUqo2jiC7RLraB2jxWiOuaVrPjPNYvD0HzE3hPacdd5bDGQtJRKttfzs6FeGObe0Zuvncqr52q4iqNFQ7ApC4QYXs1rflm1VAtqrkbMDpDIbIatBx/Hx04blOD5+6hAltlDzXToJ0IFSYoO5CPTKX9S5t48H24/eqylbY+36B6xdcJ4X2QEIuxfA4JcOnfFaUia+hRlqXVSTsTuLdl2mJpe5WtDfuCzMWz2rM8msdFNMixJIKUUkyYqKHiBdvnmvvq4ByDsVH9CYFI+EuCkMxSOTvmptgf7W6GjCBP8Fjfb9QKPJTduulOAWWm+3ibDa7NXWXXkL8v6F1++U9LasW2484S/QN/QX6NtaoG87TCn+qJr3+Cg2BU5wR/+9pCofBO7qFSMcDCVTNQJele4t3JwlD9jsL1Scwa0ioS7ZCv2AEjpQng6AsG1AXPGEEq5WVI0khRIGaDVYhohppLfJOhCFU4Qh30c53LBacys+nKwAAfPD4vV9S6i+v1D6vCh9PzpA358Am+9bw/L9hci3FJHvm4Px/YXDJ52KEZ7qMKLlWqDqaQcHQ7ah3YyqDi1LiALEQ+OcPVpniDa63lwFuviMPSJhvFI43tWnylC+LGSJcA7NXl2dqpeGVL1PXsEnIKYQ5VewEqq3+pDQm5ku0NSumDshqBJdg6hbPME5dYj67oPANTtg6cfI0Y86r2/ZHzSO8eFJMEBP5Gj8L3Rx81GNDHp/i4ZHo6Hc3LzFoXjw7wN0nmUx+RcZ/0qLw9PBSTAMhieGvCe/vr57+6Ynv/mFhJ/YAVLF6Q6HR8EAvWVjGpPD4cnV8PhMifvwdHCs7mkYofNgghMa7yrq9v4WyfbRE70nykk0w0UPRWRMcdpDk5yQMY966JGmEXvkB83LufBmg+4f48jnfUZybAElat8QdiM6P9ek3uZQJqWlrJNUnbfsd/xA6tL6RPKU7MqNb/AgezNky9QD/Ng2Q46D42DQHw6P+lOSkpyGdep/kC1Ay1jrY3prpNsG9991yWjv9GuNrO5PzeeQpAXjPVSOy7QoF81hnD/SxhzebWpgg/iu+jgcBMO6pdwtqbXCogtWTmHdLf/qIVaWUXlW/3xz/q6LTyXec4tzygi/KTx/NjgKhp9RgadP+IFd51NHUTCX4S/MEU2nkDMiXHMi/wntY85ZKG/TyXLOqT4ShP0CbCgE1wZi2Kp7KjtTlZAN+pd67508GQ0E9z4uchKyPBLN0XQaK24LPAWoWThCLSERAS4P6sGzykl/7tO0/xmRNMQZLyWVvKe2Oz7KkHPaaUpxqaZtYFxsjnU5STnLFRLxfwj51EP/ojnhM5x/OoAzS4DCVXi8urJyjicTGjYkQdOU5K2jKptA8iXFXDXAHD3RoTTVqvrN5f+ghcnF7Dmg1KtyuYA9B5MAknL0OZXYiUYRVZql6XF0BcogRTJdWomjwNMp2ALV5PuxvuVhKbfW3sDWcnWX16N/+nXVpNFtezsL+etmVqhUSr0JjigPcwKb7voMU20CBVZ7beNilW9StZt6ckdnV3laYWuzs+AMMHR9KT1FBUSt8tiN9Jv2+m9LFuKvsPN5n0nARskBbJlX4YGVBacRWcyIsfplnJIcj2msSxRq89/4oX0dEMuA01CHID72dI0aEX19cf/BLGCdcCcVkPyOxscpp64cAmHP7YxyYKRoyAXD6Y7BHteA/Sr1RrtEfTO/n0zsGOglbF9EX7cfb68OxD/AzcUxvGgarT7ABR7DSpSjV2reHjhnbxU2wOcSx3M+LXEeBfLfQciSw8+PZDwjcXY4YSPIIIsPP6XsMSbRlIimDx0GRxqXlfBgViT//T/QkCHMFUb17m8H3uwgnZqoj1eap1/7/93TfO39tgL8jgd8fhdAuG5H5lKJIwUesrzyLJ3BqTbpdlITXEYCBIfwgfPDBmjtxT9vb7tKwqL4u90VNaRaq7/aFClMPrVmcbOE4xhWQ7s339ct0yN8IBb+L9iwwwn+DGoe/xQ+kBGcJo4s4vgozAkuSPTfCyiUYbq1bSslci2++pIxLizHxT+vbA5/a4zvdYoSHL6/RfIaHDoKhkfBac9O43HFoRIFP9xcrHALn6RlApuenU4QbUWtExQLtobyBUPTnBy+IfLMjquuItgxOrzkWJmGJ9eXBzpxQlWUz6qsZ/9iieQBdoCu7TNnVYO+3oFqVJ9PNeVaXz26qv7jDBcjykdiCtDoQOl6XcdN6w1dv778zTNG/aPB8Hl/MBgMVoCD2S2y+TnKia4h2mZgHP9ZWRt5gyShBZ3K7Y+RhR4Mo/1RbVzqgvGPSDil/TFNxVMI54VT+g/xj5+NHE+HwxXEKBRvtFPlV7tIliMe4tSvqg3mBSfDwfAsWEUpRPspyYMHkkZsVzfs79xy3Y0FHkhAkoQm7jhJ8The4q7bDLGcBMLz6sDMJGbYW4x9/1Y0I9NhcpxO1dHXIBgIj3s4CAYymAj/1NhTM4ISxgvEyQPJ7Vzzl8LF5KpFJnafwmPjnHCewFkbWO0sZrTQQklIkdOQoycSWh89wFF+df1Epnl/gULlWU4faEymRF3mUqfEBcnlrbaDnqqkUrVqn/mKNky74rNpDs1CGS6ZNQE0HairXiHLSIsT4HG/tKsOqtuPFBbfQcNTPQlOVhtikj7QnAE+V6ejrK801lc2WcsGHadzZC4xgJaoEeqhdUYIDmRpTgCz7DsYooIkGcu/p9G5UxQtGxg4+0lwUUpBC5FGClIPuOg567Ueq3B786KjhHcbK4eN/Dusoy2O1TZb5yfv/nl5UC32YmtMC1zQBxsZ5YHkoJ84/UTTKYSo996wx70e2ntLIlome1Kb917T6WwPhkBs09DDkRhUYz5Ni6AJvB6AlBAMpq8CuqraehoMVGbuHGKIEZnQ1L3IJVqoXnbGyNIieINyxB5TwI2NUIJTPJWxp1fXH27vgvf5tIeu0zBAT+CBMJ7o421fgqSkDFABJ9TaauVTnJpyLY8zJowB5foyZMHQjMQZ2H2IqHMSgnIKzxbshPC+MpbaJWIITjjCYc64dJwfWR5HLSqaPkRBSnkRTNkDxCz6yhSBujaNgTwc6aaqakh26F2YUfd6GJDUKqQHhkIvgrr8S16lQiCxlrKcFmogUE6mWNaftEzAehJsOPGim9B07ZViXwjkBRrLcpo4DWcsl3/2Q71lVvHIl/IdRzL/G9q+0HdeVDnKMRQ1VEcXOisSplIcq9tyYjAgCOeLHsrTMo2E7Awfjqm5DZfhYqZftl70ECj+uyRZTkIortOHm5fVhxA7hL+oe3AmzPa0ymHW9I2h8CVNyB86G6edPBk4rb2c0Knc2L9ARV4St3UpEadZZoPQyD9GPnVuYd2MD/htsJZMyxwGRXbm46+D6MUI2e8tZAsaXXdMF7YshMsBsCOgKS9wtQFdKiOAKJffIv0topGeFmHMyqiaARfiT70Q5WKa4wgX2D8p3qpfpTcROp/CjrU6SMBRNIIXRrpJ8WZIOJe7FT1HHK7hgyDLmdCGKsG2uiIuf+l/WawbdpKX+kTM1F/guofkWG6YPJ3TBE+Jp2uc0D4eh9Hw6KnXnla9X4sW0PWl2YhLOemhUHr5EzoXKgIvsTiyZ4gmSAguMCIBIS/RMe/LC/XM6kMTWG3SF3djGDLvr9xTh2lT66vr/LF6S3A4oykB49KpM/VBYH3QtS97XzHqYEkXf9W1V6XjXQeuMb+69pOTaeU2L+7DedXbvrZHEQs/ga4qg3Sp//ZML/kb4gWGQ+g4lkg7YI3kb2Je8xnLi5FcEirPSvsBsr++MUYt67UhC3mOB91PHCMilyW71rpfWJbA/J94hdbSlbA4q/cGls6aUCv2WvuyW6frd6cue6Kf0N37y/cv0Gv2KPyaBAPMMSf/aNDieBhosZeB2u05MjZdkhBozRULf6W3r+Vfnkau0wmztVUtC+JzpG2NpaDiuVc91bpxdXFr59BQnTUSkJAH80Thz/+kDoGxqoguNk/Vl7XLGsyAzLRrevvQODcq/ODoy8Q7qSQCR03VsDf7ZTwYlzRudtkcUbN67w3PLoeD53vdyHl/i6AHO/DuJyRkEfHOg0W08CInRTjrTozuRV7JSudGAz+VY5KnpICTEKWHv9rPPO1Wvxtnz/XcqkaRrYWLrWr10VLL6hC9WOfqEs9Y5Dc7K01mSwIZkyVVmoMruio9Nnzdnm5YhD5eXzY7Ev/PMxxuj6mqxWZnLGqY/A070/nezc6Uufz7xobZ+nmU4Cyj6VS9u/f3jrPIolgtJAnOmiTDvS15nvbd0W3R5ic+J1B6hZNiu0Nctdsy0BHJYjZPavvbzTuu2m3pWDiCZFLGW2fZaril6yV+0Lodm2aXdut3+jbvV7arFhhly6vV5cY88LSrfqzWFbOp9a0DVdtopUWAfOnqdqoeAvKFhGVhnYcij+upOMZZUnH7i8qDO7956+dY3/+XMcKCoQecU1Zy8YVKlm0LCFbRkMYueEFw50Z/pe7YtDRpA6Ot0KYNbuBEogqcZCsPVMkb42ThNSE4jy3T4gUadlPXCl9bxXZk2JWmGoM3ZGnEVfGRjyn9gkjGwlmNHw0R6uOkpfPzCh/sYx6rkzMN6wlOK04jhUemGpqnOKGhm3JW96S9WDPtM7ldMnd2Sac6qAyk70tEmRd7VYooYg8kf8xpQWpbLw9k4bo0iSZ6GlN5LiOgfcw5ScaxgofzUGvyUXTl/ZAlS4DWVmCrBgOzHmOmoKFf2Bbhq0jcwjxrmTBLVMCHbwYUWeBmXeiooODW1kYf5JsUjov31okgG3dtXYoWAqhJypqoaZ0prGGVrkMkOjetqCNUAAPCKGYqgwnAkx/JGGV4SpCmWmKTLqbUbPSLot2Mr2Sz6mVx1pxHRZHZdVusQTHos3BCTbmdrd9pSOrguR6OfdENJ/A5Y5EzRG0+1sJxtViVTa7K6QJm7bElOKoKatr0NrYtCMnssKh+UbQTJyFOWUpDHOsuNT8KB5RE6PXd3U0D5wDVsypzkneUbuuRtl86rex8UN1WmcB+Sisya8kBXZXI3qCUfBSyyKHK6wwtlDwc10NjMstGiVyRL6l0VKZdY6oz2dQurWkR1ozdLaTtI6+CR+8EcRJuwL7EBrfIVKaIJhfFdEJQOA9juItB8pxJXCwWhmWek2hFfjwToE3/29V/2Rh0V349Jo4BlrGHv7VSZ0cMMpzjxHGrF4Ybaj/Xx7D2Mw9xTKKRnUAm/hOPaTodTXBYsFxMNfivvkJYs8w/91od6UkM5QkhFiJEV3KS91VKUYXnKNzrQgIKKz4AmFcmm6tSoa5g3YIXHRe3FipvFeyKHSpv3+Vsvv2/ThK5O3XSfTX4C0looQq3+taG1nnRulKvQ2Lt/ve2aLOyUTejTw+Z1aAnYJ+6ZthryxvjvGCsFxG7hGDxn8JqSghOaTqdlLF3/CFkW/t2qWBjnE5LX3yojVvPGt1gdk3vp5aNMc1xAkhGmkaJjeWjoKm6axNRU+AudBgnoEwL6pnj31iUiqxvIb2Wrq3NUEIeWf7pexOZIexbCM3buQmyVdV01tuf1fnY9OTqzi39g64vg0YfHKIcGx793jUKMuGcoH3V9r4EtFX30CDX0k6+rE5nW9pJWaHJrKBxAamVZzjlCiSjIF+KJntW+s+aQvxVvgwXuvXda3UHPTQ1j5Wd3+cVMvI0QPtqT7LfQ/tjHH4SipBGv7Pxfg+RIjxY1w3x3aKtsW1fZbUbq/2wgO9fJEqSeIVOU1zdN3SKRin2QyYvaxowFL3wSTn8cnWHDoWXyA9f0Gj/wGNvotK5UYrWWtohFO3lu75Nq/S/TLwf1Ltt6xq1xco9/TcnH4kno07r0oKBqpXAsYZHqKMSq6zXgVKmcFZROKNxJGvWl2lKHen8SCKGWl4Re1yuWAtEfCGYVhGHKgfINM1NOlBzCdhQnu7+KMNbWlk6msXVjnbWtpQv0H40DjLGi2lO+Oc4gDivsJoFSbIYFyQguTCa+yEOZ0RZT48R4eV4J5ydo0mZQ2kjXo77EX2gtlMAN9UlwlLFQw85ceiDrzP5hYZsb9a366h3xrfN97bZ7p/rC2Z627yAipHfh8sFPtf1pVPdEhUMPc5oOLPOO2XBSo4KVj/+xXn91Pd7YUaS5j1frmB21z6U2GIsxt4uyDCm9NarN75JwGVNUkg2IwnJcTzaxn7gSrcmBtYqZY61RTAJF8tobGSzrDb++upK7evFN4WqIhAGCU0svQkpcEs1CLMS1cmsEKXgNYnewCa1DQmk36ou1S1EjVEOyTFwqdG0aPrgCi86net9dj0CufXJqgJRVg7QcjVbIZtz0Rj+Wv+8hUxf1qqNXQYThHexD7vKtVwiL/T9xiG92ZzuK0tZylhTBztEe74CSybFtyNHNlFuAvD2aNLJwB1I6pSjv8rphsEBhMQl/xlHY3htFLrNFxIH045Zuf+VkwN+JqBUkghVeMMLR84W1eaegC/PZ4tEbtdV6U4YQtcFXO3WGJ5cNOMsgFxegq2YzxGWRxNqRayOe+0iC53YplmDadpxv3J9U+MWF4pJbldmWoEY5iQoygWpAmW+Beg86w0HqrKT/rw3N3W0tP2eBqshQQcbrQ41zG23obVMF6R0aWTtNVRuiW1blgm8inm7tgSckVwI3eTWkLqjqja5UJ1CeBSCCROkWbJDwvnUVh8/PsMiuS/amivycD4tJSyNuz69lcieExrLio8Fg/xPsU3nJOUUKhk5l5eWjIQzGh53srZdX2SVygRiv1rC1R5Qx3W7bhIcmrZDVIOYfa63putQBSZjs5G+dYUim+zsfNcAnDfdPseUF6QW2lmDp3NVol/LWRaAM40j72W6FRbt9nSvBUS9ri3l5zdvQfjeK4bdt/Br0VL1XdVTaKNhlODfWd6gZDwvOnb2VnxvkixVDo0SgtGfpmp3i4Wuxb6B6IQTubFQEsgWu8dZ0pcE3deT9vmGSu51u/xctVKuaz3ZahSz6VRiuruuUIvt8OzYVyTiusI03YwEu27iylRcQU3EdQgwoZ+YVnE/f4RgyUUTj0vZcCjbxXijKzpqLiRB8iTSODINnxJwpClHFYo9G0PQJNJlvvTBLUf/7r9i+SMWDYl/6doO/+5/IDjuX9+oFELxfILjmKMxDj8BNjma0geSVqg2hS5ml5OEFcQtRrlU1DJA9R2JuoqY/WiijggvaGqfRmtUi8YPtd2ECyFnvd5AvY9IgWnMre2D1atdCeXQrVWLkKdVGQstZT0GlpUxgEnJ+oa/l6k8qpcJmXLQVMBgf0XNUWI0pLS5y347tCD/uwZ7yBK9IbGFonpXVTB0uilOxnRaspLHcwjXWrqn9jRifeIsIXAHTpZvxim6vukhrM8gZYXJlH5BnAmhBwj9X1aq8h8Ix494bm8TeMHUhiPHj2ZeqAPO+0A9uFd19P/mrj4pooVuWShCCfdyC7kBuA9oJqtVqsqr9z0UkYykkSpaIs+RKnQZEGyBaDWYy5etrD6ADr7gYpuwLw2BZQesIXJYvZDltlkKNsKUUb6+eTgWDF7fPJxWQ9qdeud+VTv9zrZCbqFeqHRkP2M31r0rh6WFdNmuzcg+BFsULq8Vpmy1uhuUpbTqUZrmtlKXstVi+J3IFgeuVoZxZc/FFEZ8aJa0GjklrWzIji3WSNykNuLeMlVvS/BqSe5aNbHrlk2KR2FC5cDiEGJWVMzWGY4ncvERkuwhMg3UbTWtYIcSyiQZN+bH0pnbwWtv24n4Snou5LKtQmiNpPoWcTWqWntfWmitXmqtRpYsslqjyg0ettFkPBWnhXa3zgq0hSwNSZ5qFGG30uvm5/Xtu0xfFdkldCNvUdq6leGbiLA1hrxwa7l5sHhBOrU3JtweEe58o64eK960IGNtWFVZRg9DK+SHL2LGMvHKau9twGzaXi/Tw4J7w357LMgKq5vz0aXGq48tu2DvdhiTFXZX4Witur4eZla7D9CNG1W+c5MBaq0U6mHBKa27HQ4aBXE34WVZMV7HWyZfMpJTp3yB5xql76KiW3WlKq5ityhPgThk2c2rxKv6ZUMNoYuQC6Pr39l7ApYtiUqAE9s9vcUdZhy6uZa+dcu7teiYBLj6AdfCW9uODCTnigPkJEa0KWTLOvD1qKx17KfTDE4LwuuuAiGKVLvKhejVJdrM55L3CebFsI+XeDhGMVxI6j+jsmkWDhUC53eudQ1yV1I/9dE2h6t2QPQNJFCjwGhzcRQkUB2js3gUIPKfWJs1yvn3rcSaypV0t4ZmvluLaXDw/YZyGi7bA1bIIVPbbdwlzbKrJaa9lWzXu2rAxi1CAjab5bIImT2ivthqmxIt0qF2P/JeHm2onu8Rp0kWz1FEUlYQSOuBKzdVelJOspxwKF6NES9DSLZgOcJ2qxNM4zInViyT5DwjIaTTaMy5tLCLgUZlSCJv+pkZAtVb9Ut9CCrkwT6AoFT4g+c3b9GVeuQIAx5WBX0l2CLLFVChD25Qt+wf2hZJX4p2Q5wVZa78UFU8pJInYFSpWsoKuUXFBLGFiaIixivB462W3q5uZbVaoGYKQpUMBLJZKfMnLOMsp11AKxRdrUmDr/QhW4jjWJ3RPMp6SyjLaYLzuVDDjBQ5Lpg69K/ANhqUwajSdDr6ROYdyFt0i1e19CuZ1076JVSP0LeSw+083amHHvIlJFmn27mLUj/9gTB1FBCL6Zizx7Q5kAtTzuuoTKvkSN5ptZHBEE4KiXFk1AnNcJaRlEQKpy3Slb+qrwI/WQnh3IXuQG2heuRXMC+1WlqKANVLGw0sKuONhCNbqO5HmJQjh4yW7muu3bLOzdz3NjbDaeTCFKAFUAVdRXotoa1Y7i42IFtII8XldFbA6bFMMlVnvBK9K0cp883emDVz71aYJ+femrEVvLuYvzqet9pcgSV9E32o1xqTeR8t4w92Jh9tciekXj1VNlkVRGoggOx8AlYFfeO5a0kXz0XAARstImqtnO5z2TApoH6DoQFdCWu1fwFJBikrUMjSVDjtBUP/w/fred0GfDYjORRghVYQ5YgXNI51bc1IVUE1uQvkc4ljnZXtcNhD47KQKSdZjEMyYzHgrOUE/oyaFFynMM0Qp0WJNQBIrVVBEdz5K5icUFL0HBVsCtPX1FTg+shG+WB7kK0pQ5VvZWm6PT8YtHpJwfSZmqRwzntx81GWPCQJy+eolJwanJJaLZu2Yx63BoPadvi8PW6fO3V29+7lZ/f6AIoD9misA7WKo5bLQmZVzcrFG+k2tbwPs7LRtZAblPtpnnLV50nBChwHKcuTIAubrlkL4p1O8MhIHrowZEvcR/WB0C02AToBE4lnVvRYZ3VLSGowQ5gXPoBPSHYDZDuhqTJRA+alaqnK2xU9yTKsUIqxUFU9ndZksVdZ6nXwP43TTamEa46S/LgxUEqxVxmrxhDVEvr10IznhcPfooER7Spamog+OCxKT7erLLDQQjWLVSHTSU6IbymtnlTYWMR3bNkAQGnjfYlZV/SJXjSRNJWNyIL+wjGJQ5XmJ8uqMumYvL8N0PsUvaFp+UWoVchSTnnBK/wq02at0ywuRbPhTOnkuJxMSM6hufe3/5ZJZrDthstDNnHidahVn2K5y1ZDJz79lzyX66nvYcWoL39M26xAfSgav28ovHvBaFWNV19bKp/VCh30YFYai28Z+prNXFA4zDKbayhmV+O5SDdbDegSE7rIiHbAE96uId22KW0a07rYGnOiw+C9hW+qAI4YJQqQL0IcvstIWU4m9MsLtPdfEP9ve52GlNM/dmlu4CYHmNwHmtuW0R6zGXYYMRFSzoNmd9un7wPhstLzLSnQLf2DBNIbT+AUj018JLMwLDMqE6Ehy1e98+TD+duDpgaE9bHupAFVPeWYJrSokKBkcz0bLmoOoUB5CiwMNljpbrPaY7LbUHUWqqikyqkDLel259+ii6kJSQL4xjPwrUO/aPC9JEuyKmmWOVzwU/TzmIYkQK30gWe+I/o0gdCHNkcr0CdtuV4rRxMa74zSa3dBXiZNO4ZtAZnpWt613ZSF1bhkSyXetkEDnSXW80OlrmpLSgnvQUs6Dcj6KMgZK1baVm0M01nzSwQBNaekHpFafhHMbDXltRXGCptOHsgFMZQ59Qb3RruBYoNpfw+wU1WNSzGgUHMfooHWscSNeejmwsNjmk79hxMLiiGp1tYQan2arHJe0BHFTSHkWEWdFcW1mM15HCu8UQljDscg6lV5nQTAR+ESFE5MKwCv2uCjFddyoypKl/qSUwWFLEjo1esobciZJr62zWUIT6c5mQIKknqF99C8ujIzoTkvVIym8v7apdTFcV6IpdPZenrkvETWIDjjaBjXVvETgcxTnHpEbky+FPlGHK6Au7kuk++MT63HSuuKO18Ae4HklPlwo3Ecs3AkE/n+RBwrgiX5sK+udilm31LgvPCCZUum/aBUq7C8kh+wqSbr3bvmuGeiDL2VmKdpycmPMOLCHVGuUTxXN6Fb+f2BBrsj3wXrsDvyJ1gtFsQ28bmaa3zBMqG84ScJ3l4d4QFiOCD+FRBQi0le2wZVOf5pHdJ3GZnrjZfJI4BDrxYOWmikcb2I9W5ovJVXiOEIVHe6GqkxTf1kbk3vbRpjSLSU076VzOaSLX79s6k7/gaKvrJG453o8rpK24GaVdR1A72sUWJt3eSl/QRn7ubt1nrs9Gt+AFY5ImlOwxkUTyhmJtz9N4Q+0TFOsdRS2clIolFZ9xJM54FNhve40vodrQtSYTXnFIPyf7tE5P4aUL5O0c5ALOspDPraNeR/GZE5eNA+onxXmDabEYqOduyicZlGsQxQucVE18lDe6PrXzroIYJzQNOlD0TnEcDGQ91jhyTEwp0MEgPfngfyiTsFMpyuFrx4oORR1hPReqnCbFWpYahsNtKQ9C/Q3j/FN6Irvle7WzSjcdSq8V+9Bsz15RN+UGHkAAY8nLo8MRfmzKxbu8TAjii3ke0l8EYE4JbkCyKpMKRR89rcijeTVwu9tNZFAQqbBVHsWKVn51hfb7dD5J2NfP2JplCUQeOhQNwHqCzHfcDwMonA1QEbcCOLGkA1hp7OIfJVMhB7w432Qg2w/y07hO8nE06Khq1xa6WYIukaF32u0+yAQVsTFwXalkb+VoJU3rVk6pHErtzxeRo2OFupaJRKvqxl+oPePWKOZL1/Mb3naTjLWQoYP5Du7zzxSX68uefe5gp+2t4SfKmziUWzLXmDOXvkIzyZ2GCmbtere6FVDES0jnTreuabJGde4IK0VGFsYmRtW9ZN/6+9h4W9NCRiI2hpd0ztkmqfeQlEi3Oc0RKlaJDTHCDx3908Ix48IquwFqR4RlAWxyk1s38QoFsZhq9Q18ZWVB9zmGWBoDJo583jB2+JNwv+UO/IF7A4K4rsxeGh4jEIWZPjHtrP8XhMi+Tz/kE7RzlxUPS+DVeaCDQmgOkFGaIyg6uN4Rdng4U8H34uSUkg03rfszw3s463OU+h786zdOmM2p3W2YgxOqEYiAegQZbR0EKStjKOs3IcQ41tBICCIaEPrVnnbcndK4jCjNrC+kcr838+Vf6o4Uws8XFMzRLvP/1391p3zg9udryVMbDKnbatV83cSiXB7m7Xsv0/ZFZFjpdFnQyGLt5WTngZb1he+U6WbS/jwlMqMJAVz+3q9GLYxMO+rHcTuQkJHjOD80/N859G3faF9c/9qB9LGDt3C5BbhclhxGHjC8Q1dd7PRPD34O8rMrJWKfYmvw16JLAJSTc8awpjvz63JoN2Ib0xLELDLsqkVJusN3jOygLdzuikUOkpfqs5aQmJb5+6V5CXcJ1mZYEuSYznC+kqxv7TyO3TJa3Ey5iFn4Tq3tGENCizQzSj+lHp6ioR5SzL1txTVCmjVQamak/V5JUekGvofNNtW66Jj8P1PBNU++//X8ekTRIez2Shb9JJCMv9k/8XAAD//0CmkzM=" + return "eJzs/Xt3GzeyKIr/n0+Bn2atn6xsskXqZVn3zL1HkZSJ1vFDY8mT2YlniWA3SGLcBDoAWhSz7/7ud6EKQKMfkilbdOxs7Ycjkt1AoVAo1Lv+Qn4+fvv6/PXf/n/kVBIhDWEZN8TMuCYTnjOSccVSky97hBuyoJpMmWCKGpaR8ZKYGSNnJ5ekUPLfLDW97/5CxlSzjEgB398wpbkUZJgMh8kg+e4v5CJnVDNywzU3ZGZMoY+2t6fczMpxksr5NsupNjzdZqkmRhJdTqdMG5LOqJgy+MqOO+Esz3Ty3Xd98oEtjwhL9XeEGG5ydmQf+I6QjOlU8cJwKeAr8qN7h7i3j74jpE8EnbMjsvm/DZ8zbei82PyOEEJydsPyI5JKxeCzYr+VXLHsiBhV4ldmWbAjklGDH2vzbZ5Sw7btmGQxYwLwxG6YMEQqPuXC4i/5Dt4j5Moim2t4KAvvsVujaGrxPFFyXo3QsxPzlOb5kihWKKaZMFxMYSI3YjVd545pWaqUhfnPJ9EL+BuZUU2E9NDmJKCnh7RxQ/OSAdABmEIWZW6nccO6ySZcaQPvN8BSLGX8poKq4AXLuajgeutwjvtFJlIRmuc4gk5wn9gtnRd20zd3BsOD/mC/v7N7NTg8Guwf7e4lh/u7v2xG25zTMct15wbjbsqxJWP4Av+8xu8/sOVCqqxjo09KbeTcPrCNOCkoVzqs4YQKMmaktGfCSEKzjMyZoYSLiVRzagex37s1kcuZLPMMzmEqhaFcEMG03ToEB8jX/s9xnuMeaEIVI9pIiyiqPaQBgDOPoFEm0w9MjQgVGRl9ONQjh44WJv9rgxZFzlOAbuOIbEyk7I+p2uiRDSZu7DeFklmZwu//HSN4zrSmU3YPhg27NR1o/FEqksupQwTQgxvL7b5DB/5kn3Q/94gsDJ/z3wPdWTq54WxhzwQXhMLT9gumAlbsdNqoMjWlxVsup5osuJnJ0hAqKrKvwdAj0syYcuyDpLi1qRQpNUxElG+kBWJOKJmVcyr6itGMjnNGdDmfU7UkMjpx8TGcl7nhRR7Wrgm75doe+RlbVhPOx1ywjHBhJJEiPN3cyJ9Ynkvys1R5Fm2RodP7TkBM6XwqpGLXdCxv2BEZDnb22jv3kmtj1+Pe04HUDZ0SRtOZX2Wdxn6NSQjpamfjXzEp0SkTSCmOrR+HL6ZKlsUR2emgo6sZwzfDLrlj5JgrJXRsNxnZ4MQs7OmxDNTYG27itoKKpcU5tacwz+2565GMGfxDKiLHmqkbuz1IrtKS2UzanZKKGPqBaTJnVJeKze0DbtjwWPN0asJFmpcZIz8wavkArFWTOV0SmmtJVCns225epRO40WChyfduqW5IPbNMcswqfgyUbeGnPNee9hBJqhTCnhOJCLKwRetTbsjFjKmYe89oUTBLgXaxcFLDUoGzWwQIR40TKY2Qxu65X+wROcfpUisJyAkuGs6tPYi9Cr7EkgJxosiYUZNE5/f44hUIJe7mrC/I7Tgtim27FJ6yhFS0EXPfTDKPOmC7IGgQPkFq4ZrY+5WYmZLldEZ+K1lpx9dLbdhck5x/YOT/0MkH2iNvWcaRPgolU6Y1F1O/Ke5xXaYzy6Vfyqk2VM8IroNcArodyvAgApEjCoO4Up2OccnzLPF8ys3SPNFdZ/rOU908SWe3honMXs92qhrKJm7fcY88LTtBBtm1lWiEG8DIcAqpWHaMByeNIsJR/ghD2hNQKHnDM9azAokuWMonPCX4Ngg+XAfxzGEw4jRzZhRPLe0EYfR5cpAMyDM6zw72tnok52P4Gb/+9YDu7LLDyeFkdzDZHwyGY7q7t8f22P5edpi9SMeHO+l4OHieBhDtegzZGewM+oOd/mCf7OweDQdHwwH5j8FgMCDvrk7+FTA8oWVurgFHR2RCc81q28qKGZszRfNrntU3lbnteISN9XMQnlnON+FMIVfg2p2PZ3wCFwvcPnqrucXcSihqDlKfF8xpqqS2G6ENVZZNjktDRkghPBvBMbMHrL1Dh3TPInpSQ0Rz+Y9D0+8E/82KrQ9fdxCjLOdBfgXvLUBeGzMC3Il3EKBbXlZbnv13HQt00iiwzZjRt3ZQE4pP4S2HksWU3zAQR6lwr+HT7ucZy4tJmVveaDmAW2EY2Cwk+dHxacKFNlSkTjxtXDPaTgx3jSUSJyWRSkpiBVXAGcLYXBPBWIaK5WLG01l7qsCwUzm3k1m1KVr3+cTyD3+hwFLxpvFfyYlhguRsYgibF2bZ3sqJlLVdtBu1jl28Whb3bJ+/xOwEhOYLutREG/tvwK0V8fXMkyZuq9Oy8F0rpCUVakS4igNWq2eRxN1EY1Y9ApIJn9Q2vtqxJgHUNn9O05lV9doojsfxeHaMew2o/oe7EurIbsB0kAySQV+lO7F0qmuiaWmkkHNZanIJN/1HxNRjQWj1CgoH5Nnx5RYeTCd0OsBSKQQDQ8C5MEwJZsiFkkam0t/7z84vtoiSJdyGhWITfss0KUXG8J62t6+SuR3McjepyFwqRgQzC6k+EFkwRY1UVo71ujub0XxiX6DEijE5IzSbc8G1sSfzxsvMdqxMzlHApoY4cwQuYj6XokfSnFGVL6sbEHSXAK3MeboEfWHGQGSwC0xWloNEOR8HOfW+qzKXQRirbYW7EnAcQvNcpiAzO4ha2+TEyPB1IHi3i26gZ8eXr7dICYPny+rG0agTBdTjmTivrTsiveH+8OBFbcFSTangvwN7TNrXyOeICaB9XsdYjlidV9tJW5MnIDqruY4lGnKfuNPYgzfRmmC+Fh7+JqWlwZcvT6IzmOa8oSKeVN/coyMeuzftYfP0SLUjQG64PQtI+n6b3BF0sq8HDnU/xaZUZaATWJFfCt2Lnkd9YMzRjMqloDmZ5HJBFEutulyzSFydXLhR8WaqwGzBZr+wj0eQwQHUTARN0D5z+Z+vSUHTD8w801sJzIJGjMKxkNZUaC20ol1tUq/CKpC1mbZwOCXLY8koKjQFYBJyKecsqD2lRvXRMDUnG94EKtVGZTBRbOK5lQNFNBao8ei5n516jzs7ZkG9BfU+QoA7lhYsMfXbXE0Rw4+GCkdEfgJ7e5W6tAhxo1Z6NRcWvH+XAjcA1GxUnL2BumOwCr9CmtaQVrDC/erDifaWwWBPxPG2/TzBAgyHB0U1mmVEszkVhqfA+9mtcVIdu0V5vYdClOcIOsh2RpIbbpfLf2eVzcQulCnQ4DQ3JXXbcT4hS1mqMMeE5rknPn8jWG46lWrZs496oUQbnueECV0qJ4E6s7MVXDKmjSUPi1KLsAnP88DQaFEoWShODcuXD9CXaZYppvW6dCqgdjSOONpyEzr5J7CZ+ZhPS1nqfInUDO8EhrmwaNFyzsDcTnKuwRx5ftGz6jHes1IRai+WW6KlpZOEkP+sMBvkwUo6wnOg6MLD5Ol+lLgvRoiyupQpCDeREJmVaBLGq3GU8GJkQRklCNaoRzJWMJE5MR9ldCkqIMBS43askqKS/3EXONXJ0x0eW7KWhumPiPbR3qPdp/5aDZAf7A9otAuOM3cmHUkg62xv1eFeDTAk7DUoHY6H4/hJbc4pk0nKzfJ6TQaCEyuzd+7OK6sjMGdKrIEjheGCCXOdymwdMF0tZD9nxjB7kWSs7tQMs2/qbrhfH3/3EULtXsyaEPw6sryEydpAS2Vm5HjOFE9pB5ClMGp5zbVcF85PcApyfvkGkN6C8OT4TrDWRZoOpM5dPqGCZm1MAa//uGVgyuR1IXm4aOsOLCmm3JQZCh85NfChBcHmf5GNHNyh/ee7ycFw73B30CMbOTUbR2RvP9kf7L8YHpL/3mwB+bgMvmHQ1Ez1vXAR/YTqi0dPjziDDoqUckKmiooyp4qbZSwlLElqpRWQoSNp4MQLAcFchhTOFYqHKbPXn9MkJrmUyt2iPTAPzXglp1fXLYKXk2K21Nz+4b1wqedROgLhtTRRqAH4GDkaUeZw20+Z9KttG5XGUhsp+lna2ptCakPzdZ2yzQsYHtka1VqmvPLHoR/bgVwt9B/OqV/Juc7dElwrwSk4ZuSDkAthtRpK7FJgIqnIL+cXJFoTAdIG4fKGqiVZ8MzKNHA9ulONThr4s42/F3uDvcFD2KxiUy7FOhnYW5jhPv7V//vJXXCtiYM5mDoZ2N9LNmZt+rNy/u+VlPyo16rVt/mckd/B5jepEVwveCLPj18fR891Au8uqu1jNYVrmW7/UDIh9fUxV5EQ9hHC4MVHVhkeqK3j/CLoLf5eRfnp2fnFzZ6l9vOLm4Otuhw1p+k6zvOr45NuYBoGeiFN8JTOqRNE3/54Qp4P9nbAp4xhbSw7ImdWnZCpYYY8A1WY6x457I95JZhbWXcL3ZxONHJRUwtJfi2LgqmUavYvMmO3NGMpn9OcZHzKDfg5rBhlIYVwoTCmAx8ntgxEkFJoPnWBJWzKVEIuyxT82DfuQRdshP4ZhIGGEWfLYsY6uO9g0B8M+vtn8O9uf2e3tlOCmqRJGZ33Yzd1bF4pKjTaTs4v7KqcJQHDEF8fXwWzHHnGkmnibMyWK1fGQoI2KG9+rjk8w6UTWaKIURScEmJKckkzMqY5FSncgROu2ILmOVr+lCzt1djQe+2iC6nMw9Rer/poo3i3Lhxjw47/reADLV4P0AJrq77Atz9J59upw9Hak1VU0bv348LtQcwo4vnsfaQNUyy77tI2H09OtExpxqczpk00qccRzt2DhRQFyzzIuhx7JTXs/4+V5xflvWg4Z6Gy8srGRMrEPZekcr5h2ddG/EXTJY3Rk87VnDHD1Byk2kKxlGsrr4DYRNEqBnE3EDVajnOeEl1OJvw2jAjPPJsZUxxtb+Mj+EQi1XQrIVdqCWxRoqB1y60UiULWeEk0nxf5khj6odpXtKLlVBtguxg6iTKVkIaAMWjB8hxWf/XytIr12UhlUn7YaDPGCBs1qghoXyc1hEmA6IPKMCnt0f6tpDmf8GpL0UeOMWqRCJ/nnlRAXifsNmWFqULJ4LXKD9ki9wR8z5QUVBkemdhJCwJgHhznsv/vfkdpptJrQAEp7Z7YmVMqKhs7qdNVL8JAiC1tLWjMcrnoJvPuM1E/NzFuNxaLRcKoNsl86UZAwsCTQbXZiDzyCIQbZUZ1FRoKawXxI0xTSXMbuhzvJLocD2uHr1cj4go8VCickdfHaFVjbPTwzAlpGTzPwWHLFJcdYS52AatKgkYW17CML8D12GRiL6kbZmd1hOJW/4xdvTzd6qEyFTSpCu8Bacg6et4RB0zAkqynleiQJG0G2Zw3DBsF0dhdAjr4tjkjcMW7mGK1E6uxR/i+RjelZipZL8nE9jv02UqFnlA7OYZnzBl4COTkrmuRCvLy9PgCgjtxxadhqJhWNturY3PK8zUt7p1dAUzglZikDYDlnh0K8jfpk7AL3tTVhQDmKHpDeU7HeYeam4+ZMuSMC22YI7EabsDF+IcRIMy+fgrERa4t/LQdgumjiXF9PkoMnHHbRU6NFbM7CBXhXKNxNd4JnKwNxIzq2boowWEK+I6dB010SjGr37XisaljUIJQIcUyTohBTSUilXeauTjOEayCZ+jLhQ92daMgDKRSTHCvaF6bk4qsQ76CuMIOolpLOO8d0byIso7Nejyz3x/G0S5nVqNEEzxkS3DRXnTE0iiwtDYqlMybTtdHI9xjpSikOAFBwkze/wB2xnryUwPgzV83PvAxFfQa4g03emRDMZCixfTaDohJQvfgrIoOkyUCHoLD/Bd3x4ZhjijBMxZiCGAoUEDERNGQN1YtA+1iGHfsjQMQfUzuzICZkFdVZgLXcYg0FeTsZAc1KHvMJsykM6bB1xKNTrjRLumoAtIe0XquXC3piesQelsHwY2rSuGymRSbSxMCdYksjeYZi2ZqQoYwUeLSbfyCPOmI6lXnJ6qn9eGg1UCQV+Qm9wYcOyzXFagOYQ8JA0rBybG+623zqkIQzgX5VHFwBM9CjpxjXUuS8cmEqdj8Bt4wDplh9sK3DKdvmKDCECZuuJJiXrc7V7R1/PNlmJxnPR94AfRP3rz9GznPMIsNAgHLJhdtS+IHBwfPnz8/PDx88eJFJzrX6eJsI9SzP5pzqu/BZcBhwNHn4RJFyBY2M66LnC5jgSrWizGfvZ+xm1XVYyeh8pyb5XXbO/R4jDqaB70/3AduAadA3qIYWpCBZKpTSFycVYuDl7rPqDb9Yd3b5fMB1nf0zn0eyPmpv2OAhXqG1wSU94c7u3v7B88PXwzoOM3YZNAN8RqpO8AcZ+y0oY7cWvBlO/Hk0SB65XlulINyLxrNTjJnGS/rNkxXEOKLMFo3V8zCuo5y7eBehHd65Ph3e5lX33QkES77bpJVz7Bf/5fhjB4D6L1dde3Iz+qr72Zi8yV5+PpveLZSYOdnh1l5FMCEiV91XB6BLnSPULvQHpmmRWUOlQodpTSXKaOiLT8vdG1ZGECwpkW5+IFHZcI16H0m9JchSg+mz6+L84Ezrq1EX3I988/phsQI6fLVXe4tAVinAS5yv+U9wqZwUVup+UaTl3Q+zmiP/O3kgvzt5IzcVALAcVGQMzHlIhD+P17ZV+z3Lge76/jQoiDMvWb/diD33EpVKXpkQtWUGtYjOUzfPkT4/YrKi8zYteZTQa2iUdNiZMbIZe2Xu9WZqxnTrFkJoabFg14w5oKqJYYphUn16klYmC77EdV6LGXOqOgimh/wJzBw0AJUK46ZZw4WSz4u8qGtMRpVso/ohNER4FNLY2tMr7YHIDJJePHUToxmeawN0JGR25JkXQUDX4qFzKkoJ9TVLBkvLYZ8CYsbJjKpkmhMVmXiK5azG4rO8OPC8sbv31wSKfKOWK9UzhM7J0tuizQplLxdroxbQ025thSL4yzjLoGqTcFwDzBl0CXIHCjdOJ6Uua81MYUQY7UsjJwqWsx4SphSUukqRC8e9YbmPItDJqUiRpXa+PnIS0ZvGClFlCM08cE38Gr1ir+9qvHDsAur1oh0xtIPXSUNzt6+ffP2+t3rq7fvLq/OTq/fvnlztfIelVjWaE0hcJc4fE2cDWwlSANV0DlPlbQ0TE6kKmQt6fvjXkRG52s+x3aKxzzMMJ5U7rS69Fp/hF1BnKQ6u5Xe+rAzfPb3n/75y+Grw+N/rIxLS5JsFVzew8Y3L41UDK1O8bHoIHWSzuou9H/Y80GNj1W764jgexACi5WQfFJSD+xRVmIKQ9ac2xZR9aI44yUxUubaVR8BLwnUs2DpB7Q44ZluYfdhFw4c/M/Ea/f9iB4iEF7rN+UNUxg6QafUKrEBI/aNcNdbaSy2eXSyLlpD/kf40iqIqQQcEEYcSwiyTfzlPRm84cF6lqbLn2wVWItKPrnyMQ7IAAVxkZekKo9nqS8aJKrWF8lUM5YXkVMGzJcYaReG1s4wKpZWTjQ86FerSFbr9JtUi+dZ3czA53S6Vi0hVt9gspAvgwBZQsOyOlJ0gWbodE2QVZTl4KLThpc8qiF4//RRLcF7qgk2DUIwqyvMV5t3jdtRLboK/w4aL9LsulReHN1KtnSKzJ/rihBawj7WMIz4iD1x19ooRucxJzm1B/Gy+vojFeOiUfzJNvQDwyAfLjB9199Ngi1cqapqfGLBgRoA6YxFNphz0fVK/eEqODv4/aNHg/IGRf88wmUzFcdeYCXNa686n8tE5rmEaohzKgRTR2T0X9GCwZj43/3aV/ZvzUzjWwiJK2jK/nuUBGbIodKd8zVFlRHhwg1e5BmFWqbK37bKScSEah9JVeGRgcQQrUQn5JVUjRx/RyoYFTWRpchwwVyHsrIQcYf29ySV2+NcTrep6HNhQu3BvpF9M2P9YKanhvZx1j7uUh936Vf7toOxkNr8K+zxsSBn+LZmVKWz2h6kUmirHDdrr4xp+gHr1GU8ZRrFl2AcqJMKZADNdS3krPG+y5cipyVD4sBTdGNlTyl67XE1mVsJC0LfkEDsUOzWk6Zi2iju6y7UYks6aZ9pFw0QKhmO3o96ZLRt//ne/vP/2H827D//y/7zf9t//l/7DxmRZ0BWFZlseYhHvRGYvEZ/GSW+dLBmeGTqSIf6EUykmC5FK2XxDmKYljxj20z4gsM4zHYYZjstlWLCbDsM91PFqGF9wFIyM/P8L41faMH7BTWzfkEVnetfYxT+6wF3vjt8K3BcS1yGCnN9z4WyUVkC7FmJCvGZGXI0asgcKlZqJjTz6o1TWd4H+fB9JBd5JpW8F60KliMx5eI2oZA5Yve3UHLOzIyV8ImJDFIxR/HIzKRIZDUKBdDASbngIHIZrCoH32dYrXpmhVaHMaKZiUddsFB4BNnr+w0ovsHT9xsh6si/C08kZIRhCe7bkVMf4lFhxlDICgemmow6+OcoeS9+YEsJmkmDYOMhO66GVHHDFKd2kVaSt9co+s1HATace0Z1RO7xsDEBHr0XhHxPXtlDH1cXHvVH+MtrCakNKAoLMhwMIq690byH4z1eVRMK18djUfYxRpj49NcwfgLyJfwZNANX24QCp4Pzx8U0Rpa7cZL34pUV1e3ImtBcMZotvf+euaRXz3SxAiBdeuUTqal2ZLrueyEX3nbtxhgzbUhhkc1ThumcDp0JseDEQyJkEGPrVbU4OxQMVyP39ihxZe9CuW6rJEHVVKjVEo9rbxQo7RHevZt463dFnVbjMR3ZjsLWxEQLUkvFp++h1njIzyTcqurwyqaR1Xytq1Ds5rEg8oYpi0LgvcuC1RiRo5c49RpvoXyJpMuy2K21kcup3gDi28A6p3ojIT8zwm4LlmLNH3vB0ywjG0bZ87ARRoa39FKYGbP7ulFVQ6KKTEpTqg5HoJ1wNQU/quJTE8wbX98jmEePVoIlWj+agiDzxeTrdbsC9Fj0aBvTzJK6Cg+mt6r+EgYx1UpC9Vx+mpVEvDoHoTOu4A9SpN3QqMYy7SyL5Te2Y21RoamPlZjCe3gS+hkQZ/C/o4hSNAGIoxlksaAUqaHkflwwEisIuVHHvvwSGINEfcX6rgk9GurI9MclrhJWaz0AtBrG9mZX5/uo5/4CvHGJaggzhAtSdD/nsRBWiQn0AdNfZS2pWimslQpKhePt+iw8UkGpMCwUlnoqKPVUUOp/dkGp+GD6DGPXVKW5X1+qqlR8pTyVlnoqLfVUWuqptNRTaamn0lJPpaWeSks9lZZasbRULNd9HfWlIoieikx9BUWmeAGm4ohOPlJZidVKKhWK31jGe/rql62uokpw/QAT/6rqSkEhoyg2xq0UImYq3BhpN8ti4pRBoPnjr3AdlaIeoMx9uXJRtXNPvqKaUVlLz3wqHPVUOOqpcNRT4ainwlFPhaOeCkc9FY56NCCeCkc9CgE+FY56Khz1VDjqqXDUU+Goe3EWPLQ53qM+YunlS/h4fwrBKmnWYELP+VhRxZkm2VLQORpFPEIlzXxff5dZCJ4K9zMEJmKzzrgFueucJ8mGnlGo6lybZ8O1fw+Z9KCgeMF+7APpnUTPDI6nXbRk0I18EsGRh+Z7cooL6OdcfHDzLcmzUZLl+WjL9f/0Bh8pyM9cZHKhq/cvEdw3mDL6bJRo2fXeO8Fv+yCcttbegqUGxjLn464B5zR9c7l6uE89Fz/5hpLdG5A/5b6vL/e9ieqnVPivPhW+uWV/nsz4xsqeEuUfL1G+idqnvPk15c03EP2URn8HnqyCl8yz/TWd7len+zjFg+DRMzpcE0CXPx0PPw2inf2D9cG0s3/waVDtO5/iWqDaH+58ClQ6Y2wVb/knQXV5enZ28TCo1nQl10xmTnFoXlBVt+45LXRX4vWE5wxLlekP7cP8gSnB8t2dxGuVq9TOo2ZdtqEfyzxHiO0krbU3gD85eu+UtveXoHDt7rz/pAWxBFKeDEtD0bI1VDG4eEfiaYihaspMMBPaZbeWeHuw94BV2IuTiuWaFnAeOnHhNC0y6/m0wYxQA0/xnPWhpMijyo8FSyLA1r3aRmzpJyz2gsYBuB9fnB3+GvqVrX91bppPXNlBspu8OBgMkuHzveH+A5bI58U6TczHaFgOJVwKqYwrVX1xhieNHAvioCD9Pnjh4TESwUXsL84/6fWACRdTpgrFhStMCCkyN0wQOjFMEcUQYy4tzJextvJiH9ZZyWmKCh3UY42Z2zKFxP6s5zKKFujChgRBLM1gFK2KB1joMeOyLuMpgQ9TUyswMOGKsSUwCiw3YWaKUdNXzNUX2BkM97YHw22jsIBDf05zq9T0ETl9OyEXUygw0BHllh4cDnbTPfZiZ2do/8hSuv/iYJfSbPcgyyYPIBCp+JQLml/DYVijNySchM/hZpcXx+evr5Kzf549YIlOT1z3utw0n7O+jcCu398en3lLKfz9Jtg88QreuB8BwYgtUKHzRuzXl/DxHiO2a33gounthKevL8lvJYMDCOVJhF4wVR0E+7trf+C0RcbhLIYIUjBrimnOwlhLUiguwVw9ZQbW5YZ1gz4bZUJDTZojeH60RfD+XvpJ4tHBQ+vze9G15MzvJuRC4rQhZVhjYAGtBe04GFCnXTA0MuDehRB5GKcNJb462npIAmltxSvXkGrlklNwmET5x1S4NzBugqYzNxfR2KmTKGZKJSIP3xgNLrpZ/PYK+i9wYc+Gw0uVu+k3APGsmZu1npo6XpKzk8vKLPuWpVJlbizgxcBBYwvmvFoO/ugnF2Rh3zo7uXTDNxM47F5aGsMseojehLhkBr/U88Xtc56WybEhcy74vJz33JdhXL8oqJQT0RWW4BhZ4CCDubUMrqtggZ5VHMKQEI+VwsXJwUJlV0Q1KaTWfIwO+Azy9a38F1VH8FWpZESuLUCpJmmpjfRVo5rJsW7NaU7Xlr2M5Z8pxq+HDfEFvaoSTb6gNVznql3A+vx1J+hRC5S12DEB2ogFYgCfjwauHw5Gsf+IT0PCVwsmMu2DEaDAA3Alj5J4QL/21jU/HCT+/zqxsM60r6t6WJqluKgaeQN0UjAFAZARbs7B3AXmRjkhJ6+PX50RKHHiykrJ/MZKXxFz2tzUWCJkFLEYE+WyS+gBA5eoUkwX0qI4uCWiQeBcJuQ88CohjQ89a47p5B8y+q1kOiROj+z1wqKCANG2QBzmHaG3fmuMWSUI66749JBYA6kDN+DfsawbFgwY6NwFb9al6Szm7GwCjKmWdM91SlXGsoT8wpT0xUTmYBadOWc78tAKgeMKazhFR5JzN6GusU/F1azqUfGJPAZos27+YjRj6nqS0+n6nHc+ymGHuNRkyyZxZgIz10rQFyw1taovR+T4uEeuTnrk7WmPvD3ukePTHjk57ZHTNx3G5F833p5u9MjG22MfAHFXbdFH3Rq7JszViN1CVLvkHyd1FEpOFZ0j6aGpzUQUjPHaTGFpi3ggKG9X8KoqA7IF3aFB7wyH9S4IsujIHHz0xbtYBSnQy4cCFFafda6eD1xAwgTKpzWRlZA505pOWRIH8nIN8RkOd46BGe8ew2FQBAbMQBhJPOadOPr7u7O3/1nDUeCJX0xWUE46xHsC1Y6PigU11r3OGxGuwgZo8Y0XjMKNrmZCij6YMqwoGJfBfIYJArs7UH7FQkCGOwdbcby91LU3KiYeJ+hRTZhOaWHPFNWMDAc+sU6TZ+9PT0+3KgH8B5p+IDqneuYUut9KCaUtwshuqIRc0bHukZQqxemUOa3BVanMeVSEZcJYFo8ARSeVSwZ7b3rkvcK33gugP+b8hQ+7XcM+/+HJT08JT19TwlOgiy+c+cRrxgO3wvvSlVrM4htK0FksFt1If8rGQRb4lI3zsGycioC+jHrgtKT7JYvj4+N6XRqvql5/TuL4cctCl+fk/MIKcgw6fY1iy8aoYWLwP468pc/RDp9MeFrmYEAqNeuRMUtpqYP1+YYqzszSq0Yxpc6p0VYljGoBJ+Ts1kDt0QBfVGzOA2pmTDGsDyp0EiFnVMmsUEWYm2DNgrAuqBRqZmwOJSGioVEuwJfgd0Y1h0jmMOIN1yXN+e/MiStWwp3Ijg6lm79uREYTq+9UH4dNxcfLwV9CDfBzdZcSef0GAhhr0K3xUGzGpyJY730wVNZzGLYSKRBe/dpaylJFNaAj6z8EiU35DdP2odhv0IMv4lgyLJYdxs2EDqNMELamA2BVKCoAvDXf2fprQDTml8LXWiyYcut/Jgu0uuZLO4SWMtwoTlfDY7GVkGORQcvjVIpKbW2VBbWH6m4vhLfjWy3OMYMWfQeDb2i5ltb8O2cnH/PvvGKG9mMjtW+F4qzQq3d763ScRwE5iv1WcsUy6Cr0CFE6ZyeXwYsOF1jAL5bFNzIhI5bqxD00wlQ3D0bF/UAkAp5TaoNN68BlneeOhCJK+3nGBO4ZbGCqpI4kNV8Aut93xlHnuLAAQdhrzqczk3f1dY5WA+9HyRc5M9glcaqcx5pm/7ag+qIU6YzNaQP/pJYW00E6w2SQDGLKySc1ynn5I/kJjE8fIaDOJJeXXJS35OyWpSWquC+5+AB//IhFaZ6dvfxxCzryQHXtlYnsC8QL+U63tZghh0yLle54ocOD/uohQ+OlYddSra8P6g9Lw4hmv5XQ+UBO7gb8JTcmZ+RMZJyuHkhelNdrvI9OLt7V+gzfCfy5MGzlaDPg8FyK66xi9PdBHj1Wg85JRSwjC8sMglAT6sZYUDd1Rdzk3LIFapiGN8IB5SZuaKR8KEAGjgoriPmSaxP6AW2jLigElyKVXjlSkt1CJM4Kq57k1BhWeYLrhQs5RpHjcCwjLGfzkDuGIePLgq0OF5q3Ezrma467+kc93MpS1HGURfQDhm1D7MaEpow8O/7hfOuhy1inURR5cd1h2DwXq8K5Rm8pNFhClh8B6eZ9IJhMGLWMi2g+Wt0qh8xqgs/FqGvjtV4a3twY3A6xQ0nwUnqAqwt+ZZD1NR3zNYH68bPlMY6SwpvLh2J8jdePo477bqBVofziTO2BJ81FaD7WvYDDPcK94MKSVgFMsCjT6pNCoXwMVGuvw01t9ePPiXfS5biP0mwYEqzCglEzIyOWTxK/4uT70epHObyUzvgq6SIdTLJWFr8uhc14X/9Wusy6MR3znJsltGhXfFzGKHNwPHh7gcXLYpXA+QeBfjmjQkhB3PAkpXlausjgIKZ9MtDrDAOwxHfp6BFOlfP8PxTGNTo+WyDGBVxXh9AXYb6Wk8lqbc0eBVic7TPA1fz3VTD7kE4CLSBDgWo72cNhXePd2ALVDvVwCG+4MiXNr1dvmfIg+a4FpZuvXqbqUwD+9N3/BGgfuPtTe+V+qSsTJvujr0xc8QOvTPfSA0SMTz0oDmseWYGYHgzrmg90A86HHWko1nIdysqsCUwvYbpSNlW1HEgPgtAnrqPFrAy/YflkjRlRfniil/OxdIlD9hitqFEEA45SstYT/Sx8sXIBIiqiHAlXxAOcC0uIOQuH9x02x5zjccfnXHB+UTBouZQzMmEGu9V5Nw3UCkupRjOXisNq0TLPjWb5JCqOKnD0B2RIrKm0PyATA/IaQd4IYF1nytYAwd1V7DogcEGAHwEjKhLWsVgfSFg/zIamH66hI+CDz8MfVsLsCmtLpNBRz/eh5RoRZ2myyCFBg90GrgDO58aQMr3GYNrPaPb4uREJYb97cfQIFu0Aw1+cS4a1kSMNJpjF/01vaJJTMU1el3l+ISFq/Mw/HnOVG++C8lwlfPGRzuh4omstCl1OFJS+uDV3VCCqmmUDDRrF0xp3qLpn20cJtHFxHe10q9Fgo7Ui9LOrmhcjt6pCOl7KwKvA0ecbE4c6ctSElAYIiRHTagwS+hnLSbQIN54fivp6NpYgocQcsauH2tW9qPWji4DGyJNQdN6N6fOUIEgnLqvew5Z8fpBUCuGkxjEzC8bsTFF/Q1rvhIiTccENdoSxW5VLbdd27Hfi4+jGXuX+yEsF/fCgVUdO5ozqUoHhR4fOt23MRo+B/8PQDyzQcIzmmDwqHM/ZXEK6INN2GD9cVmHa9Z284YGLGTaHsOtSsYRcuj7hrqWzvfpGrpM/Zmk5d7I3C9X7hocjHGeOOUihq6ShpuF0XcmvmbZT6h+nmAU0IcSV+IAOHyruKiZGMnvcNglTuOP0NRG9Rc6NJRcggSpkYEaFx2tKDZtKiNPw44fNtQxjBAjp0yxzzcDtuenDuWHwlZWOXA/1bISR/j7evXZxQDxGpDm4zHO0QLKOoKBSM9UvqNYWmX3MEa1vxpQJc82z6zWXa5viSbGHyK/DxQWhQ1EqX1/JiyMjAC3hWRVdhTECrk25iyJ1HSax+W0ko3Foleu9Mzf1GlH1Fi3YvURCw6uZFUpSTwT1MrGmaqrqmqyGuAUMS6vUN5fIr9gkhy7EM0ZkaVLprzRqAkjyrmr4rjAToGFzU8dMkOsYVt9MZH718tIzozCiAzhlKmpaa8c9Pw2Zv1OGtcIqxgWPW47FtS6xU27lzK3vjqdU4THvQvRccSdfOqpZjstuYG1En4dFDcIQTleDRI9CVSqMcsmyKgUGexWHYavG+VGTJqyV0GglXSntUIW3Fm6FvcPdXkFlSqYsYTi0OD94FGs8ZkTOuTGs0f21o3/1UfXAqFpW34U+BhRHhI8Ign4RcV4UkWOX+hVdrwSoJAo9qSabcw0DfWSyTDINAbBhWxrzVriO5793Xs3F1E2rsBKYkO35Yw5st9dtQWx3GdlZrv0s13cNXYMFdDckbfd8fMwrvAUpDg/H+Wmbt/r9WlX99rfEem4+rCTo6HwiSwVhVic4p+8hiwUOMOqUh0iNmF1gHJ+L8nZ7YAfywJMZx+btcZmo5jVY6d7IajbGfErGJTQe2oAQnWpEznQ90jzi9rlhygmWjSmO3CU6Iksnl4cINgJFvl3kt3us2tfU8Btuli6pLJRIBfEQ7qTQxsnNaDdl5Cul+FqNNG60qMuxB6spYITxfYSjmxfiyoEbWAgLpgI2fg+tvnXoga0jPkmNpSzYmhCKF2GyHTVZu9I+YmB4vPv+vN65HMwNoYqE6yjPJxh6CsWEI8xFzb99FYNSsyCfZ0zXamU67V6TUkSdwHtEsSlVWR7vPgja8DSxKmNp/5CK2OWB8RdCDVGmlzdMgUAPRXz8lez1ba5rV5crVoIqZSev2DvYO6wjH4W9j/CCu+KyNt1pwEHq97p9ZxtUkYW/nVwjdGoJUkVVHBWjWEhYIJsDrWu8xADjghfQGP9Oms64VddS10Xsf0N7dUPnBbINauKvquaiDtYa/gBaFmLNgtsyhH40pfdzQeZW+9HclBgn2nMmdrOQJEzrDtqYdUSjopTtP6ZxfnetFpP3p6IilbEcEs1RB40DsV3qrnMiNASRmoYI2wKv4t0CexKKnWeEG8clGpDMpeBGViUvqiGsdCirHbMfvQvbSPKBsYKUBcqI8FJ8uOpYTal2VoI6Hq3gjicupXkv3tmG5NROS9gZDA/6g/3+zu7V4PBosH+0u5cc7j//pZ6QYO/mlmv08UscumkatRpEDSOYbgQZolgMy2p6UH/Bma5yObXodtcNNjqkae2eyeW056xyuZxu9eLJ44q/qE4uq9b+0Xm12kJUklbJuA3FOWw6FAKbA8+GMvZWUvNB3zC8VTFrc4NVLdSNmMuszCvSxz4vWOfelxjPpOlVcm48TMdlU9B0xpIIF2F7S7VKK+kOX2LjTS6K0lyHsAgqpKsN4U1tpYkfoPoVz3Pe+QwmnQGNDDsJ59RNXQsvJ5AeF6atUxLyKcS6PfP4mYkMDhAm5pkqEa5W6qOLF3lGA7OLzFtd7J7yVgcaJlapZnDXlVKB2rpNmhcJ0pu9OP33XqwKgNu7BvLo5Bgsc1nd+L5GL9RPVM/Is4KpGS20PXzagB+qKqkH8XiKLtxNZiDumGKuVmSRn0uhjbLLB+ss5CRYybFJ9MOd3b39g+eHLwZdfx3/cHJaW/o6XSrnp3Y13qoV270aMB/Svcn+YJDVIRNT1q7kvbpMchXuBKCLwFWpUvyGBY0uZcIomrsSK0aqloQBsoVv1QHCwKi6cGJZvEGXXlzIl6HEYeI4ZXUT51q2Rq9JU/EEc+aqqPti2ajr2/vaAhTd7+4u13TRaW48F87uZU8X2letGqZ1ObcSg5DErg20nV6QFNzd67O2ZkoKmctprV+KvWrkB58qy/VRDVfkfzUXV33jt3u00p29nwwHw19WDj34wJvM6CvTc31hg09SdNGOjsl2dqC+H6XpBoKKbV5siH+OQak5HjRmpduXncskylGD44Pl1nW9FVC3Bu3sTF5rQX6HYvu05HpGaM6U8YIMnIWaI6JhvnKGpknb8Fl1MMD8rxk2/tRY4QUgqFVJjC44MqMig0yQqxlbQhbZwqrKwkTHVDG7ZvALVV+imAEIUTKvVs1N1cR/xvICg2m0gS79Mwbmv1DjKZVztAERaiBjblrmVIXiU5XqqKxw1SHy5M0eUTWZam2CLM4SlV2DckCwlqak6BJGnfoACgryqrLAmrWOrKD9rFWRYWjUKPJyCpJA25JSZaxSOAnCS88oDx+DKAj371bPnxscedSoyVBTBSuvL3hc7PN3yZk1rHve/yC819H71vJudmuCjcBSrTBchUP2zlH5ncJBTC4hMAQrdNrn/MDPXF0yXeQcC4ByYzW02KhTUGX0lqXk+LB44b5HAMsTqYhi4OK/U023OgE8wVB+rLz/GIfGtZV9QiYWVjsjWKuXZdWxsvqFy5oHQIzi7MZr66Nr3P0R+GVKzaBpDvbdkzdMKZ45YqVRlrBPjPfg9kiRM6uBasbI6EdkV5BlsyyYHnk2PTqzoiVPEUbyljmxueMmu2QFGb4gg8OjnYOj4QB9pidnPx4N/v9/Ge7s/V+XLC3txuEngqWK51TQKVP43TBxjw4H7o9KyLWsTmPrfuz9rI0sCpb5F/C/WqV/HQ4S+79Dkmnz151kmOwkO7owfx3u7O58FyHjIYEazqH0VV+zVpH81FvWrW/kS3VkTEiXWBh4ON6dkbmZ+g2BAIJKe6Y8t/JbMC0VTPlKTOEmFQYsJvbOxoLG6OFpCXOvpXHVzFDQ9QV+odg2DR46L4dntXONfBOLPzZEAHtr+Z4p0T1X3eINxPTsFehslygd8Mo6FC0wAv3YXooiwO9FcoqFM+A6LGTpNVfyLKzN+WGwAiVKKmHQqnoPCqdujWD1qNrqVWGxoS1MsEOgZGFHj5idDmUZ8IqyfITmebzBK23rTZx77jY2ruvwY6mAniq0CFdN191xYESEar1Wztdapi6sBPfhDuHL1Lhw1UTDDl6hYNKIDbKU4WeF4N/ghDiystWoF18xVCyD8AY3DoeCosHVHHnvPlS7o5nQHZeqQ2uNxbgK0OtK3d68DEUsus4ZmtPhVKGg4gvtXC61s8G1re8v5TSyNs9RbKyJGFXlDK+ihixkpzTHoWeh5dY9BRrdYYEr+XKp51ZOnRlTZFtgUcfWXeXYhSp4P3Sjk2MY8Rn2F+lVDSz6bol9f131j0urRIrp1l3tVmrbqBjV60vVfAujk8VsGdem8OFkbSbVNjx3BOPY0QBvVg7iKQjljrVajDoCD1E+tXidMO7PIIL5MAJ4e1TnKW7IwD+ca8q9gngbVaBVV/9sWTX/sosPwV2NXtJkwcYE2ii60laiAU80pD29GRPcXTtW1rNMMCgz4W5ogBfYaG2fEUgkytE4lxCMoblhow6iuYJKXMCRqCClCE7+utj/Ub1fsboJcw3E5iYg796+JDkXH3yNr/sbQnq6bFKdHwWbvUJIG0/jELkQYouM4jjSmHtB6KnVko+MBEegHtqLWjG8XedSgDcTrtzgEQV8tnfFt9VABhEXvduGObb/MhiArXHl7eH6w7WOZMS7pMZJLmlnRPVbrj8QGAH0Q8Wl4lhmq8kIteNVRMscMih1VIfznWbOewZLA/+V8/WhLGBPbnIH7NdCqvkKBHbnIjZfgy2O/84yGPYjC+phvKVOKbiAwyIGlmaGg0GH/XJOuWu365qFL2UJ+173KLkbATkJFAbWEUC67kC0QyycPdIqSNSZFGEZiDVXpAekJGwP3PAR+DopK2DvQXldm5e+AAtmqt51pUMAe+NRKFWE8HvPG6ZFteIAeuAGpR/qZczZLU0NgRIzrhi9k4migIA4HMDDVvkwgyeoha0bFqn1D/Ba3YMpqKWLgcRhgvr5qV2Y9zlsfw7lyoOyEEaMy5pHRfPwKe9X8vEVsVLuuZNOnJOxLPzFHYWahp2AwGQ3K3c2hVQKzbWJ5W5HmbGpEUQBK2l19hZwMl5Yz5hZNEODpVEup4mG3xP/e5LKjI0Sz3z919X1Glvzq9QgTI52U7QElZpXGLnahCu2oHlkbjw/vdwK0ai1N4L47ciacKOJXIgwI1Zls/d7VW4tjJvKAgN8715uFKYUFty+RZ7XadpQtUoG8v1+QnRCftRT6EKcY19hRBHoM6ziUu5wFtpz+rsUa6wIeL+SWluSPRAV47A7HBaEhjaXaOFgrssiuWI08zKZu6w9oVcOn+iaxAPoiaOKZ401+jRlBVahCZP6IpdQKp/a4y8FqH7np27yjbNSyYJtH8+1YSqj842o7jYdjxW7QR3XP355tbGFKif56aej+bxiJpzm/qn+YP9oMNjYarDRdkbRV2alMjOuPjHmEcID6waoRijfhi7HfQx+3ICbvockhYGE0d1BKkG+FVAZxeTqHmHC7reOIiQdX80gwEBGhi9cFBTALZTdUhA6nVHH1xZtRqF/wdhFZ1eCiil1qinVKm1EPolwmqqDgLGhk5mXyCQwNy4gsv2GacOnfnV1C88KWoXAkHM3NPoFuOhnrDCz1uh4JTmnX2XsQX+2iHP+XOFSAYonKXKasjv1kzv0kurIf5Z+Ml92aCgwxfb+zvNhxrJxf7I/HvT3doaH/cPnk0F/j6Z7h88HdPdwwu7XXjw9TLjzqrn8vR/953vS946x0Wsj1wtaSLQcspBGp8nYykX16EyXjmZ/hWBVn4AFxahw4X7/f4ROuC7N34ldkcUQDjj4IPwO+Qw3/5mKbFuqarGkFmbWcz0Rgnl6vMQpz70Hiryq3Hy//nj+6l++N5+uctnsJctTprcSfNmlNjpjXyPfC6wkUO+aZYjNxnr8cYzCMJxF80E5YRj8+BmCyeZL6sIyiK9MCKKFH7rTgO8tvdVWaoyXhOaUYIFCY3NHvBU1WBxobYUAqj45iPcwX3z9hy9dASpgzzdULS1tFDk1lsYT8hNTGBcK/TjY7YyWGqzkuas2gHdLnVtbrhAsQTxOeHLVSW9YD1wGUM0665GMK5YaqZb2jkrVsjCxB5FBjVTWIzOeZUz0IP4Y/5UiX/Ych+yRheKmw0K9+euGf3ajRzbwaV/ZepUEjHo/fQw8/4hgck8z/e5e+rTeQd/JBa0IrId1uG9Abv/iYnq9tvZFm41eQyFclU9RPHJmwQ7LR4gE8fH5wECiGnlzKsoJTUN2Jq1M7DdMZFIlNT0wiCGK5ezGKXDHhZUdvn9zCQW1uwruzxM7J0tuizQB8+unonq9aZH3Gy1rCXMEQelG+aTEzgS5nE4t64ezJ6eKFjOe+sIWQbyMR4W8qkboglGlNn4+8pLRG0ZKUalE3NfYx1erV5z4Eo1fybZUk1K4pMD2jkER+Ot3r6/evru8Oju9fvvmzdWnblmJFSrX1DL/EoevOYkgvwX6eqnmwj6rC3ljZYbR+ZoPvZ3iMU8+jCeVO9rOFebPuxNNkuqgx+aahxz4s7//9M9fDl8dHv/jU1Fr6Xcl+eOeG2Hz0kjvWYrPUMe5wPaQERb/Uc9Qvus8ubaSvt2hnCNjBT8bJJlEqUk1mwZ0HqgZNsZLYqTMdeXIumH5EvN2UQtBBtC+hz/n7gKm8Zlo7r550RA65QbKiUV3sE/U9KnIkZDDsL/60odV1juct9kere3FR3jaA/HUlRH2MAkYpHlf+xtF1lWFXgjERmKrvV8XedMo6vmxZPRKrHWaA+bpYq2CbqkdfwutgGEYt4Eo/5YFWCNHczvVyBkuuD0rTJMRrCJyvLocMUz/w1blwTZtH+0Rza3m7YbzNhEPd9zwvIGvWmWOR68/DYO7QB7dCG0IADWJUme0DqIX2dcFpR8/Doh0SnkugXXF3WrjSjWZ4jdRKBl0CnXGs8ig1Vrh9kzO2TbNPebDSu1w1zjM5y62k7hPFShU2NH0ntXWzWvAmP1dV0lhwse1dAZhR1l3RcFUSrXrm10zQoNkkge3Vtz3dVWuxPJJ8udoRGFX8q03o7Br+CYbUgDg/5ObUuST5GttTGFh+5M0p4iW8tU3qIhg/dqbVESgfguNKiJwv6VmFTHY32jDimgJX3nTigjSr71xhQX1a21eEbdzWAG4/8kNLGovfmNNLGqwf0uNLGqAf8XNLGpwfrUNLWpQfhtNLbpB/nobW9Tg/WqbW9Sg/FYaXHQC/fU2uYjbPnyhq/VbbXRRe/EbaHZRg/crbngBcH7jTS/sGr7yxhdx71LDxDo1VfAQhVl6hN2meZl5p2POKPydyXvSnYNJGxzgMxr1hA4Da/LMhwIaqpLp71s9sHOHMWE2qA8lYmN2yOd7tjH9faMH1uwNHGGjI2utcPzXo2+6Nt/4heJzqyhDFBuEUf/t/HTrXkfL5nAwGNbdQVUM3LohjIMTOqFru0cs1SbzbH9N8L063ccp2pPqGR2uadbLn46H90y7s3+wvol39g/umXp/uIoC+IlT7w937pxaZ4ytiwgvL0/Pzi6iqVfgeVysrw7tuR27is73fA49chUr80G8zRO8s3+we7hbP8NzPmfr9L+8On91hqYt7xGNw2lQ+IxPNpHKiqIQZjOpqSeElFDf2kdpLxaLhFNBE6mm25htCBLI9pxlnPbB8BP/ndzOzDz/9fz49XEYUU4mPOU0RzPRv3rOzel9MAn52V4RHWUzufZBgOOc9WrZF1jJNZS5ipYeyrGvSErz9VHSK0tIMdq5IDI1NK+oi5quBjWbg4O9QYOEPjOKoiOIIkQ/UKiaBOEu9cO/RqPC68Zl4xzQoa9UJW740mYYquMCA1oo86JD83qXC7E2xy0G7tsJNkHmVLFh5J5bs6DmEcXKP6yx1I9lnsNa4piaXmMjg9TXEbBRk+yyKBDiYQEb23ftfcG+RBjCycW7egiCoWrKTOj+0R2GsHoMQgGpMQUV64q2AUeOgDKTME1LEOz5NA8Ic3Ie7z6GijXW9VmRqwVLIsDWvdro209c7AWtXJqrLM4Ov2a3YljdTd2F9MCVHSS7yYuDwSAZPt8b7j9giXxerFFp3jxGPdktyjm4oBAjuTjDk0aOBXFQkH4fWnfAY3H9YGJ/aVShjIr9YnwnZ5gvR+jEMIVlswvunAtSuZ4wqcwYlvKsuJmiQofAfI1VoHxxWV8oduHqs1Ix9QUeFA2B/QA9ZtfUo0uVE5SoqYloE64YW2IO3TiX020sSte3QoblTds7g+He9mC4DSlKXEz7Liqlj8jp2wm5mCZWemvrzIP04HCwm+6xFzs7Q/tHltL9Fwe7lGa7B1k2eQCBeGf3NRyGNQoY4SR8Dje7vDg+f32VnP3z7AFLdBHq616Xm+Zz1rcR2PX72+MzHzwIf78JlaYuMWVqVQQ83DjeYW2yg9hjDapJdALexHGPKCRAHzSXUs012bAfN9okPDzYPdyrAYrX9PWfRBi7QqEDxDHI1l7OIZm4URZhfToo7BsoYs+QCjOuIAXWQbLVor6Qrxqyt9dmiruaAaNXYIV7B1Y4VRUbilJXnl02THQo369iqLvdH7xIaA6n0/AbZG5rN327xKJoXheX8ezy+PVWgnoWKN4hwbMr04qWZoZFjKjIavkKsKXj0rhE76obCzm/8O40pnvk9PUliVdMyDMonszzLKUq01sYa8zmlOfVe23Efp8wrNSapHJlZw7gHtrOqQThXOfV4pHvUt+BAT87eQ10Y4GAZLoIhQG5rdW6jo9g+SM/8emMHGtdKipSRi6hOyk5Of40JJTCrC0+v0IAzEKenWxhk6Xm+t5dfgrwUbVxlq1zI0/jidw+nn7KPp789d1lj7z5q9/Pc5H2yJt3f7WyWVTfoEdOXv/1nj0PR+ez9j6XKc1bFageffP9NJ7fvNxqiU+WPCyn+Adni09ZiVRTKlyJkDWvJp5Kk2dvPuMwn4v0cxdL8+tS8HWJkF1rpjmxM9qlv/uEtTcI/VPWD52Br6W6BvF1feUgw9UJnYihfh/OFy7Oqx65BNHlokXSJzTnE6kEpw9aopDmGtTIFdZ0lwX3qtUpOt4aKNoOUjUopULzjCms8cez1nbtDHYG/cHz/vCADHaPhvtHuy/+YzA4GgwevCqsSr/OZWGZrxWWNHzRHxzCkoZHe4Ojnf1PWBJUXUuvP7DlNc2nltZnq1SN/KSQaj9+MEH4QsGYMw2Q2JFbS317+bB7IVpUWqqbdR0sK2TD+FH8BSMsz+0DqfupWhYJCMbstnD5cV39FHw8LSQIrk2xvzP8VEyw20IK9tCUhEZSEQ4RNjBjYMRubF9Idl9hVQf7+7vPPdZXLe7/Cav/TC0dSnJaHd1pStGu6oKmqLtz0xbvdwaup+WqMGumOM2vayHAj024rhMWTlVVGIUm1o6Ku29BqNscClemy6ifzCTu2QZ7X8yoK+HZIzyOhENToS+NJEHVyq0UYvWlkKoZhk5nFFLZVBu7+/s//vDDi5Pnp2c//Dh4cTh4cTrcOTk5fhi3CGnwa+eA55XePbGIjOuWhFz8iEv8zKrmheinDkjBq3sCbQi4IH+T5CUVU3ICJU1cZNgyIZeMBbvplJtZOQaT6VTmVEy3p3J7nMvx9lQOk+HetlbpNtZE2baIgX+SqfzLy93d5/2Xu/u7LfxjAEf/ofzZKfF/jOaqg+rqwWiuCsPrkmkuxzQPUp5gK7s+Gov8IzTTz1RMPfBfg2baKvHjTEDYiuQO1fTy6q+V6NojL/96SQX50SqdXKcyUl17Vn1JQFF93H3/arTS2so/aSl/tFp610GtbeFnr+wr0EEbC33YWv7M+qTz7q5XLIqyEO2kTk5pUd3u/ZCHOFaGh81V4vyb+3hPIc6/MenLTKZQkFypJbb2wlpntAoAgzIZFtaoF3+okFCvtgrC+JTJ8EpcRM0XPWfYfRRTyFg6AwGx6iVjITu/8NKeVM6PrPq6LIqch7o+q1SxTLlZrqvs2IlnhG3PphRGMVpv+YLVa5kw1+02/48Cz9VC9l2pk7QVZBlm39TdML9eWaqqFrImxL6u1XFzk7UBlsrMyDHI/LQBIIgn11zLdeH6xElA55dvANltweC4E6R1kaIDp3NnT6igjRJD/nh+BJQpk9dxZYE6Z5Ziyk2ZMdCkcmrgQ9vl9F9kI5di44j0n+8mB8O9w91Bj2zk1Gwckb39ZH+w/2J4SP677u5bo/C3+c7yEp8D1YhTogE1PV90Cut1ywmZKirKnNa6SpoZW1reyZBrRs70k7iReBRFwJVrcwpNGzT2Fp7kUiqnG/eCettucoTg5VUmI4qlPeBzeCPW00RI1XgLzChcWAVbzoGNR3y67dIfS22k6GdpbV8KqQ3N13WqNi9geGRfzSLNsBce3FqVPmic2ainHvV6C92axr7hO5SctkuBiaQiv5xfxIoMdnep6vYueMbyJV5YXveBeuTwZxt3L/YGeytbRhWbWmFjjczqLcxwH6/q//2kC6Y1cSsHTyez+nvJxqxOc93dJh7nynRNZ8jvrmNBTGS9IJGcH78+jp7rBNxdRNvHagpXLt3+oWRC6utjrthHunW1s5G8HBe+uFuSs2vAFCMnzllu1NFYBZ7RVRnpRoGzuMFKsrJ8lcl51S/40Tl1LAiE4HT4hAuGPkVz5tobxQ0kax3fBHl5enxhz/kx9qGsKuAh/Lij9RtobdEyzh7K6+a5alHYi9xV3t4O9cK/1PUY4xwASr6r5yvF9PmT/3yPomHpE/MvkTwrioy6H3Gz4No9F2yScRckvDkbQZvYj9hbJ5U3vNlRmKvPT16d7vcg6WyLYB0O5q7+hBxnmQdqEkrxY+CpG2K8hH65KqXah9zXQcSbnXqLqWtsDr3LNCuogppkbmRav6WeaUE/YFsH1ztaz+ju9f5wZysssEr0rO4zzUyo7dteNDwc1aItoTfCTVBjKVEQFGvlGSag5RWGAZIzECX6QetzA3oe+G+668J9EQOB+0Kbi6xK3kIQoRRo8DounepInpkcLe8F6xHFfGfYfLn1AKXuS6dEfvlsyD8mEfKPyYH8StIfA4uTrhyxZ3H+871NTaCDSLOpiesw7M4hNJUX2lARdVY7O7mEd5PvPSe6sytzuwkITAp9Xt0x8zJJo92rVCi6VkEAc0Z1qbDQVNQo1a7VhTPWk4hmVGULqlgvFM6YY3lQ3SOnMv2AURGGcgEqjT3g/6ccMyUYtMGQWai6tsr5vjut6FGEvjeNNr+1+bpKml4f1GOb06JMSk2nq1zB0KIyu7678eUFU1YVhEQn4Om4SxBxEnpJOj9p1cnRPg0NKWsaCzfI3V07P+yGGYlFlynN7bon1Eo/FkO1VpmR1HQEHygxCwnzAK31Yq9rRafQTuKGKTr1gkOlHrvXdS80OBwApMOgNGmE6TqGaVXVKeP6Q6IYzZI4y/ZTXe5GmspB7TN3ybMpLadsC3qi+I7ck9Iq1M/odKrYtNZsFvBO8xxA01uuTHWoreD6iqcyzx9YNguWil1Y1r9WO49h4o9c7pfTK1ADkJOIPXpu7pWLuzSLqiQ0now0PhJ2kM1NfZfOEUaUirxm5ofzN5c1bQRmwnKP7bEroKOZwoig7fhG8R2Vpt+8vnpz+WbVrZgymXxFZnQA589iSq8v5is1pyOQX51JPQbrKzGrW5C+etO6BfLJvP51mtft3jyZ2B/dxG7R+jWa2SO4vg5TuwXoz29uryv7a8L85k9u7FhKiw7VuXEKXpXzp8nCt48fechGYN+zZ0UxUyqhvX0YZFSnhX/EdP0463F2a5SN4y5AxzrgEdVWQ2i+oEtNSnilB+1rXff74H6YMyq4mE5K6D6MPfXFDVcSSiBFw5/5LXcR8Apjwp22ORozauA+GzWxUHwEC+GBeilzWF/RTKIMtkearotYyKvjk3jagAG7cCGNK7yM9aaAUb798YQ8H+ztWLTrcjqFgqNH5IymMyJTwwx55noR9chhf8yrbGir721hSzcn2Torw0KSX0NU9L/IjN3SjKV8TnPs5KXJlN94WzjsaaXIIJ3jxBQ6MpUCWvDZi9GwKVMJuUSVkt+4B9Fd5Wzlrv1kGHG2LGas4/Lc/HVjMOgPBv39M/h3t7+zu9EjrS/3fCvSu/0mj7N9r+895xBf5dJ64YRHpzs61e8Ev3UmKS+3gOL9W0lzKB8Vxoz0RLDuUZSAnOm+sheV2qIc0gmscKeI3coMmq5YVbe+fUba5xuHyDVDTtgUCvs+hunhLqMDuIRkCRZNmuehDzP3nRB0RL0VC3w0k0NjqQVNP7CV6vyutlg33le3XC7Wt7WKpQxC/fyiv5K1rntvw7r/oPVKnUzonOfrCgd/c0lwfPLMy2yKZTNqeiRjY05Fj0wUY2Od9cgCDWTtAhX4ZAvuMs8fD+o/rGBIy8eAvLpevS1UkXJWpm4jGE0tvl/Jf9Ob1i5/YEqwR8TX/WvA2QLYoOIpunB111uQ7yV7yaA/HO70nae5Cf3jWiG+tr2OKys6lN21uf9sYsZHfXypnfXzufOcMmGk7pFyXApT3neGqVrw1hleYx2czXcaOeTIzTNyFghoPGvYVCr+Oz4hm4vkwshKWK0U0LGSNAM1iymopAq8jTdKI/nHNSMTmedyYUd2Sk29OCp55mNG2NYRybkob3tWbwCMCn5b5TEuWv0kzxGkN5dWI9rchGL96LMDA41Tr1ysRc7RJ8fqfYXtE+OKjIMbOSEXOaMaykCSUoOhxt4/smC+AQGkZeJUZyeXPYvVQslCaka4iexkrkl6WzKHZT7gmlpvvb4Wna/KuoaDZLiXDGvQtqn6cXSHK9c0q6E3/CgVOcllmQVPjncyYRYFuPFdj06oIpTzD4yMzE4yZxkv56PEEtPNvKK2thsp+Ox72HMi+LV8/b04e6NS2MOIXYp7XX8oixUr694laF2yVIpMV0LSjGoyZkwQjEyrb9vuzn59eqsA/VExilASa50hirA6KPm0psW9syuo1ZRK2gBYie2RkzX/sKvcLnhTg97tJbYJoTeU53TcUWf2OB8zZcgZF9qwxj0IuMEAyj9vkGy0yK86XjaC80uHzjaAWGf9Vocp4DsQpAkOFOVCL2NePgGjETIoQaiQYjnnv8eBHYDC8PGdZmConZARrIJnI0sp+MGbqdHAlUoxwb2ieW1OKH1HhZCRFdJVk+8gqrXYsNuk5HYLpmwD8Xjmwj+Mo13OpPJVTBXJ5bQKG6kWXaunbllaGxVK5msrt3gMrbDkBAkSZvLeYfACOXij1IS6LfgDH1NBr2k252KjRzYUK6SyYt+1HdDbgD8e+WlMLfnip6urC/h8d+Tnjz6kPeTF2pcItRwUSpgEdaVUuVdVNIPMDG0iWrLboXK/UsV+K5l+QBqGf2Ess+WnWPI+2kvvMi4Z2wCTwKzNfTk8fH43iK45wp9AYrhyZnrc+Hsx8hPLc0kWUuVZN2bWsG9XEOen79u9ZxZY4M7YBbdDzR/u7XZv5tou/c1j5zxs3vvQF6+G69oll8up9qGmYS/TnDNhMO9AQ+1HqGoOlcSpbxQXnrY7yrMqhwJVJIztIEKKvjZUZFRlCAYirXJej/7Zf4uQ9c9PR2FUe1v+s3/iAOVS2F87Cinv7LK9/YPnfXb4Ytwf7mS7fbq3f9Df2zk4GO4Nn+89IDrWb9KcmZlc20bV9gKnipB5obgV1iQEug+Tg2TgmuZ4C8q05BkUZF1QHUprH1UDbFwF8wbGHc9Le75YHB1tZMjSwciU30qmllav36h5qeWkAgPtJmF2CAcqFEvRCclSWjrOHfqHw+iN+GZcr6eV0klJIFHNab4kGTPOdE/Im9pAEPkytiq8yGohtVwAkDvJIBm0yONvZ1c9cvHm0v77zv4jL6+693zNPZE2X3FX+ThY0SwXad5f0aEKgeOwgVDquJbjMKYaHSbeMBE1vozZai/i28Y/PzrBF/pXYBJ07bzJiZwXVHlz+zwGmYZBF9xUnQui2TY3NYmHdaN6+8uM5YXbbbfLMI1i1GgSsskImXMNIvEUuug5VtQ++HxOp2x7yleu9u9xrNiEKbW2MiVv3fBVxFd84Fs3hS//Nc7lNC5Zut2AXRdSaPbF5RWcdlWBJQbyzyux3IeTu0UWj5svLbM4aD9NaHFA/9HM0YHxeNwx2sJHZI9u1A7+iL98CoOsccMwqhPKHoUrOuRqQ02pO0I9P7+ZdP3c4ETdEZ97g3ri2Hq9HQDXXZ7GIXgzQmycD0SI9d3z2pf3FxwIA8RFB3yhVsVSqazADNEW2JgA/6zPS2r2Ieg3gqq7iwMTRI6RNFwu94QrtqB53iNKltDJLJfUHo7cCnFqK4xaHZPbcEzCWDMqMnCp0RCYkUohgqB27l5Hec+NSYnmYppHw1QoQOD8WJoJLRWEfhBdUEHsirbwTMdw+GiUDlR05ICubgugOafrstQEEsFZMPCj2rHKDtvriIz3u1eJupY0575PI6bwAyo5VKPuEVka94ci2fx3MF+lYFb0YAg67/LfuRdX5RprU2MrfJ2fNpFVI+8KW5evX120zgkh56cdN9zKquAajd7n8V6wuymi3VPSzD4Cf1WtahrzqZfu4z0J2aetXGkwHtobK5fTKdxELJ1RwfXc2UXhSzAJWOijgnZgVKjysy2jq3broznarencuJ5XplZhgDyfbStg+/kjg2fdT6OXOpfTMNGYRVcXFJ8gIwsuPpZ8P6otxL9VtXaVzoELGeNalsoe7doKrRhhF8GyePzvR17QGJeGKOq8xWSEMH8P7gEunBvZKrSIvgdkgUPnqcftH9XohdXssGkRC12JQM9BnKSWqCdKzhvBW+Fg3teCk6zSerM64pGZmyyoFpubBtOOMc02wNcjmYR98dQXpeK0ZajtG6q2czndnpQCGpXpxB+oFThH3HzvUcMegjnEriokgfltqFeVDLhxFBo7BLzRSDsEuaEUaEylVSTYDVOQxWUadfDhNhauwMpUQtUDJG8YBCMo4Hy4eTPJcFfwAC3t25XAvZQlWIKK0sSnKpxpy308MASaNKPgcIlH2v+0FSfsyznzO4msZ7SgSox6ZMSUsv/h8E8lO9C8w6rIlHIWiYilTps2g0dL8YmzXnAid6PbO8/1nEZZy9fuL3UJzCY+WPEoaU61j1rnghvuLX9hBpARnOZBSVpqI+fdAZVSTX0TLGzfmIylNNooWiQ/+L9qyEITIDQYTXLezIzqZEgu3+EuDNlRovhhEzeNplx4lcyRHQSH4uKdNTI2GDaOTGO1ezt3LmWdSaBNMnis1YXvuyoj+fC4UPwspQX2DI6YO+ZDgMcgNfheNVn3K3ZcYAvhSuo4Y4F0kn/TG9qJ9FKk7czgR20QUkO5m84eDGenbmL5I7TDfYHq+kLow+6DpmQWrOxW3FdsLm9AoifC6mpz+m+pQpieYhCw6PhEfa8g5algSrMMRU0ve3ggR00Di+f48cUzZxoyySA4wWfh+dSb2hO1VjyE6CLnBmv6GFIWdgFg0StyRgqqTC2oE9N/FEVvFVhk3LDeJ4w7EycKUWFJA3pRZDBipYtWp8KN0osPQW0ZfrG91oISl5kUxoRGujS3AseSaHvxYNv61GlnFIsvY5whE6kEUUgqItgCGJrG3WT185QzKiyCGiA3N6d2gKE3C8tgV6qmLHD/ZVzTcc4yoqXFfErhPh4z8PnEOVJjH3oNZjV3MyhmFGeh+PToGnlQx3G+ZAUZviCDw6Odg6PhABPDIQDx1ZJU8tPKXWRCtSu47FdgARLqrN910J3MMGeGQrp7LBG5DPdIkkQZZM5NzFlvOHXDhMBgzRh5++OJJvt7O3t2a3eHB3tJB/zJhKY852aZrMPAthmt0DV1IX7ClpDYDNEL6ztOU6lQXJfRqixN2WXdUcyICn93VxWLwpD23Z3dNrHs7N6LozVetBGmrLzbRzvxyshqrAOI/HnXWgrFpVqtv8TDtrqxzX6eNkF/4hazakiuySH5vkLOfwSRO6nzotCjyL6vgO8Tdluw1MX4BBbtqKdR2urFsMOvv7vfhdYAwMOP0UdPTFA1Vj4xNQXUSWfQewq6l0cMI9a5qiKezYkrTgNYappwz08vt3qxemX1oxbw7mROpUW8u+H9j6PkXtCttgbXidfWLLDacJGaSCm0Wpu9HWSB6lNewZ3KAi1YDQ2tE5TWlnfyhLDh6xa+/2hiCBPWk4xXIgKw2t9BAZF2/gdufgRFa9/PnLLdSD6NLZivo68+Ujk3pI7Wakuie2M+L4UTz9COJW+YcqIkrQpZEhTScJy4NqSuGQd9kuynVKL0o/vARzdss76MlWlvqiSRlbwZlblgXcflGNWmKb9hAlubxLM6g1KhpJGpzJ0u4i0NasyNoopHhEO1q3ngIibEVKPMPOepkpqpG55CQcTSYM0cO9kSFYPqYf1hWUS2JZ7+1rM3FxtL+aFHzMLKcsoBs6hln3FBNDelk9oXYGjCHESRRXEp0KIdYKn6sthbKAuRbNifJSjq2xnThpxfYM923QO/lu7FsS4LrlhoZBPdqZ8RwQVN5bDkXVoGX1GlhqHXjmyce1+SZU5nJ5cb7YNJ+bxGWh2xCy1V9iFxC5sYuIABCyBxQ+gM7MhY2nMDGRWNcMPzCRkhgjGYYgRCxMgi2yrpXIrwvXLFlXpk5A+r+wlFFV7thC7nHTfSwWENAY6DmOX1OuM4IRNBToJ3QUBVML84cn7hCkIjNVFNFizPHZML6/HHryqzVed/URtQYqTM+3QqpDb25vPRmkb6WNLqrE7yevrlS0aVIHMr8FHT1cvQEkjOpzOzHZDX5xkUze4Q+o5mb/5Dv9776T9e/W3/1X9uH87O1T8vfkv3fvn774O/1rYikMYaTLIbp35wf/t7dm0UnUx4mrwXb33nR5aRSts+ei/I+4Cc9+R779N/Lwj53jn18W8uxrIUGX6QpYk+gTNT0Ny9dOs/xSOT70kpgLjfi/fi5xkTZE6Lwh5muDG094HYW81pOXMpuJHKl2Rkt6YXD9nhHKlYGpTM1AQq8Fms3HC26Lka7sFqoMn7Db/gjXhoqcj7Dbf6jeReeD2qpSIFU3zODFMt+OOx/VLuh78GeHNbw0Q1fHQuDrdpo0feb4RNg09h0zbcav22RYhI3ovKDFt7xdlx7H0HswaICExBFWeuTDTXaK6NIYVev1ibryHleE3LLCRsoQa5wsV7hEkStA7by7U2LIJZrSRMXpvRHYqOuXztqnhQP5o37EVAXFX5tlF2bRQobL89v7zQRKp4yH9cvA5Xc8j9TTba1lnAZY2NTKRaUJWx7Ppzql+dX/h0T3RXRsb66CdnTi2UvG0HDg5f7CTDZJjUvQ+cCrrernpQOu7CXxavUZF/5hn5YrFILAyJVNNtlNOsyKC3/fXSR+DaXyS3MzPPtyqd49JdKyC+5K5JoX9Lu82nOZ8Kd6GBAPyamR9zucBMBPjLpQ6FcSGBAUV4H4HetaZ2a+s6ooVYCcV3Gxlfh5I5gqk49oFmmbuBXREAS/leHLnJqXAPx0bg6mxB6Jhgam7p7B8vj18jhf3W56L/G35hKEZMcE1cabKEHOdWPIzSExEe72a30yYc7cXwt/PHA+wRTI3QBitLVLKrhUMzkbk4EOABsGnBrn842EmGvxEmUlroMncSttUYGsFfDXX3F8Y+9MjPXDE9o+pDshUQ/rG4JLuAxK1uTScGcN6OTqpFqrVO98qBR9EK1mjxeOPUd1zMXXFIdy7ngdFi684gRkUUC3NgAxlIF3SqQ1Xn1h+65nL+BmkNP/MJr4HdWfTqPoWnS7nxla4+Rb1x73YoONUvHSqO/7HShZ2y063k7NRDbj1LXoNcvfnyuWeTlX6CnIfdJqA99EgO7PrfNLVae4juCtaEr09LDlmwIRnBQ70OFF66sxoqtFUSAlpIoLQCzSLp9f/gPPExDLUnKwzndGlv/jIresSkRY/w4uagz9N50SPMpMnW14d5kzYQv6aCMy6++c3lOXklM5ajgrGIC8N4sn5psZhY3O0hBiOLVKFZ2iMFnwNCvz50WqBr+PyW79E/ww0aokjcKPC0s4i/ib+7r9NSFDTdbLcEln4aqi32LLWUaOeXqsOQnDFQsXwkLiap9Pz4GFCG0bkfHbFfF+OdCcDec1jEUdcbZIciTCFSzTdYwkEhJRVKdLilguYZKh+1MmiMJKoUqyOAaDkxdrrEV1duNnzyHhrdIws2BiUPVHYujCohNiektm4XCtYL4/rqtV4ermwc3/kTbAVkN2wMUjQjRDTkUoMC0BraYvX44lVIGvquYjuBPiMfBsU82ztcGO7e8EkLfEKoCDlUgHVcpw50oX2sNtKGroT/e/ANq3CjYjiW4mlCXrnYot9KVuLA5OzqJfQLkwJIyJs7CyWhbHJlXwrDhPaCiqHRRUKspZXMPD60yyp+gN+Fxbkpn6ZC+jPtKuqSmUSdrcpzAU9HlMyB6rpFAxRfCmzfcj/ceCgIHw8BIUZWlZ8sfZKRt2oScokpO1TNa+a26jpxno6m/tZI3vGeMEjhsVp5M4WHRIUF4yqEDpBVmbyrPBwQkjyl8jxYOWvh8E+f29Na8beZ7NNa0LcsrsVL+Malttai2r1BHs/84biwbw/iXRLBHnfP6u7iwZD2VLkbqWIQLVm/K1wD3nPnweiRM2fWr+6g01e/9MhPb3vkJZvaJ6wS2UToRTnOeXqNwzCzKmKfOqw9dVh76rD21GHtqcPaU4e1pw5rTx3WnjqsrVbvpNFgrS7nVh7IR7RkeH1/7aaMYFj4Vm0ZvtXOkzHjc+qStJD4p7dmtJf8rZsz/Iq+ZXtGbQ1/GoOGX9UXtGhwkcp5HGH0aRaNqlILxVEbt4XjVi1rBlgxwqAfsWacvvplZUx+WrRhFU1YlfjrvsXX1Haz1nGzDUHA1FMHzgZtPGIHzsfTi0+qqh/37qVPFIAHYXtcBkycAhTerCX8+KKGUUBvJTZMqlDB4M0MHkaKCfUsr0rlYZa/VFMq+O9NlfB8QoSMC5lAUDVjGcvink8Orvz/Y+9tm9u4kUXh78+vQGmrruxcckTqzZarUufSkhyrVpYUUU72JDpFgTMgidVwwAAYScyt+9+fQjeAwXAomSI5jp1VtjYRyRmgu9FodDf6hQ00YeOJnmPItXsQo9v9qbQQLz0C7Q/fWt+4lx6BLz0CX3oEvvQItP+89Aj8jnoETqRI8niNVcIrzj07wyNKzgyIatt2i/BVCJjkNK03Bcc5y+xk1hVWVudr66U4KtfdLkypEcNbC4i+AwvM6O9lFV+ijmX4mbnrFZfaU4w0nTAVzavM55Kv5E2xe2+cIghl+hIF/5nAf0Apgz9EmjIo5oceO/NXEeA2p8JAyWFV1JYO0rvXSdRfYODFGK47HdNMz7i85+7ftYDmWS04O4taZfGIKQ0JFPCuizSd/f6L3Z1sGGCQwiQZGuAuU8v30SzN6mTcmGZ0iPGcsSb5xJ3xi5U/4L165V/n4sSLvD4zG8OXpqVSTolTT2iYnuwX/K7dZ5q2F5XmcZorPbexWi1N+Nx0zwUvl7V11Lw8dcAZwjtSWxZZBeZ6ueQqNEWWIy5c7k4KT3EdCw/jG9UH92XBt5A0RG8hFzmlcdWEus37rIkawMKqSWC413WWBlMQOhhgCSrrJ8RSLq8GQqJd09Qso5muSj4Ccdc6n8zRzm0l1EVxdpLva+1gN58VTVgNsRChYfYMU7o5EUkzTuL7hdVLj0+N9ohr8Lw4Ljb7e1EkaoQ9PC3djkcNC1NbzbaRGdNMNUgmxjQBR0Uq8mQg8iyR0yrHFa88jd+j28wf5DPff6GSUjiOSw9gmTRyAjRDNJ7ChqW+vFEsxhOaOU+ZkDZqpaRVzkR6hKWVlO+SMWLpBIpOUSlphkrBgKeGqDAO9IR0jkGoLQnGVwYPOueiB6PA5zl11f+CRr1lF+fsobA+cP46907IW85FV5iwJbb19ma3OG2ebrZy7qrn+8qD89l01puxuJr5XXqCX9zAMzR63A38HfuA/y4SYs0O4O/Y+/vi+n1x/S6U8/yt+33Dyhp0yMJT/iL46snDvdANHz/bQRdUmqZY9xxTd92sDr4TXVR+Bzk6234fh3KvFbGgKIiCA0bxP8NRIerYD20BwTFtFm0xFrZrl0GIM1nU0SXjEdcs1rmsSzjYNSlNVVndh7f7vf1y3YB+ztOkZi/cZsfumbmrBmLIQDHrg/FsUWxnxxX+m6B9lC8pZaQZ16T7sYM5AxmmsDOoS+eGmFM/crA7eMPeHiTJfrvfOnj7tt/eZqzVavUP3h7s77/df/Om3YqTRTd4PGLxrcrrOsMO7fAVYjkMwT65Y9I3OahW1Xrb39k+SOjB24MdtrPbOjiI3yRvabIX9w/ig93y5UoweU0YHZVzPaD8WlkKeMjPJyzzZZulGEo6hluPlGbDHHyNwrKUgnDXLclSTvsp22KDAY95ka9OimoBZTsSydlTsajtPD/JEliabEhG4j5EGNoa+BW1+Xu5YrIJCSYNMkxFn6YVuuDX8xBhi9jFCdVz1bsrI/ighNhc+MqUS3nMMlWbDnSKw9t2bYUnNITMbfZAThjViRolQWp7JgBNUZPAEUPTXoox6V4c/Yu46U650lhuONAtlOL9lBUV+NQkeYDqe3ZItfW6Kmc6ExqPmB94O2rVaBHMPSKCKQrOEWUFvL7OdBdUj4LCzW7deIWhwi5vuZJbwPpbhyxNqdwaiq121N6ODmZ7b0OF9trc8B/F2ICMvi0/WXjx4TUY0FO5KlQSXnS4ebxJha/KK4wsM8y06HljFJsFsH5WAwvHMaV21dVzZHt7p/3VjCDncK7qAhDKaO0Ap2+GLIbdD6cT1nA9HfWIlh/Bq6riYgE8Eb6MzDsiJ+MGSSa3wwbpS3bfIJn5YsjGDZLl8PW/qazueTlZ+N6lXk3MLWh5lrCP8nZ0ECr/Zb3/mHyELtjLaP6/or1HLoTUhvXJ8QOLc/zz1cXxa9/u55tSqw8vPpemIZrKIdPe+QtN0ypq9v7uwlpiyfleSwpIBumaME0pzgGbbdoavAmhGp7iKYM+mlVHDRT4FwNNDoWcCFkuNvUFNOvXHj2qSVWNfCamFzTMq/4CZmbsms0nj9qMffRMtPajnehgv9WK2m9223uL4sfHkxFVtbWpLSrogxEzhkL5WAL/4ti2NOxkDgrSbELrXXiMBHAR84uNFndhBwOeDZmcSJ5p0ucZlOWGa2VCB5pJaORuyIW2qJC2nW8sEtYMG8MSWw/Uma0Km8mJOM6lNNo5KqFYYjAewc0XFNnXknqzF6BHj9kXK/Lf399HAy4ZmzIoy99PxXBLjySjuikZtvXc2m61d7da7S0taXzLs2FzTFOjdzSROE0zIc+G0UiP0+qB1Ir337Z24l12sL3dNn8kMd072N+hNNnZT5LBotzhOvD1YBvUncxmCLmKBOtedE7OrqLjfx0vil+9IY8eqXlxj89EbsPL5+uHzrE7beHv2Uu5jaexD3CPXW6xUwCCr56+kF7I8+emmH+dbLazv1KGlqbQMMDWhSuNh/133HCEJ1sBKwb9pX13SLh5vHHTT3hyQ8RAs4woTafK+ZhxKsK1YumA0MyvrsFqwlHMmAfR7nZtDOAaC8Et/MSL6TPDunKINztS0qkt4w5EonIINUhVwyAttfezQ1xRX4k018x1EC9E4YgR5hW3QJR9olMjfPG+HykzkcJoTZCqzTW/KyWEz826Ajuvz7MtpUYbDbLRTM2/c8Wk+W+7FZn/tfdn064M3XpQ6uF5BtCMZ4FlQ+2PIscbZmwIaJjOb+pZHDouc8qVe7VdMQzG5lM/j2+ZJjSj6VRxRURGRuLeDzk26plfE3Jv7GO/+bXANQq2DPkEp4Z/YYz0D1qecuteQoVB5WrCYy5y5ftYVZfgGWprwnqKDzMKfmb2wNUXi2/3hUgZzebR/j3+FLYo5gNCiZ8hrJdf4Rstc7a5JOTmL54Na+wRZCRN4Fpy2omZuMRo1Rw+96hr7IM7Myi8OaZZPqBgl2BmY+F9wIDpsPUjK7rQSpayO9sZpDOZpIz8cN6FoglVlojFODJzsuhhEkcQ1r0sqTXVeX3y7UtXDDGTGn3hjCAo80nutnEqsFNDLKcTLYaSTkY8xmbzqhCU4ah3NOVJWH7I2IgyV9rNZ/S9O0byrKjXapvZuleLV1wcajG+H/aeKpJncL/Akjkt+i4vzy97n8+uLj93r46Pepfn51fLLlkOVUPqqi7TxeFLag8EgEBWr1yrBTqDmWZ0XPOmN1Osc+fDeHCnAykLcLVV3JeiBhkVG704B5634Y9//viv395+etv5ZVnSGv5d6L7hiRNhs6uFZMrWvS/20Jx9YWwuPlNviieo8BavP/aeOziNcg2C1Vh0NEvAeeuHLMUYQHWZUtXl/hT6srkmSuZ8ZekUrwDRK4wCoKrzrHJ2gdBYkczzT14I/eRDrmlaPoPxPtGYI0PKM6VLeiEY2VPs8ZXpsplbFXu0tBZfkGnPpdN4TLOkl/J1lr5Yc3RVeR0+5Gnq4IZ+88hKoPUZWVCIi9ngOaeq+7msyj6jqiNT0zQtdMZghSDhq6JMrqDMh5o8aUJnQ0m8Ar/oQrJ0EH0FJ+8nGo8MyUuOXiswjk8/POLkfbvfXNzPazDpTzXrCZnUlpz4fmp0G/ZHDpdSYvA48Kdc65SR4yzhdOEj1OAQT/Jejbc7hxefSymOjyJwkmnvoFsMcMmAVXvBJfcyd+HHD1picgNeiLtLYN+80IC7qYrbDHfNz7DfXHEsaxB6/ZynGiNfwbuTRBDsRDNfEHFAb/GIsR4+rM0viuYAC+HPHsC1ugDmg5RqzTKWzEP/1GaM4nAsIQyLiWECFB6ckEP6HNhGcKkU0X7d6YC/lB3ohrs6gT31Hg+wonDWq877k9fLoAKl7mpCAq/gsJreY/vkObAaLq0J1COqKcErvgBQO+8SoLJMy2lY9nZtN92WqMUE66As1lysO7huo/XQ3gCT1XnjPdCFxvkssFWP9nlN4H55vznKn/IsfyDn3WUoX+MRZTnlqVPqOZB+dYG3xO6z13LrOjtwuDWdHa5z4ALAZSywSyudl8DH624rbEg4bmCsh0Xjke/QVVn3wIJnZqGsJVjExlvnpb1ZgMqkZmgsruDKMfSnROX95kyLUYh0y5jR6m9CjKMfbp63vf2L8YhvLydAS7U1ZuJReVP9kVtfRJ/2ecr1lCTcyNx+HpLNwrHUMsMRICaLBEw8C/zuiGaZyIgdnsQ0jW0T1EK1WwnwQUpru90xjNi1vAm7DCZbDs4aL3ErYIaJXs+D0pVY74nBQLG6LmgrAONsK4K8fKQexN8tBKgvQR/mOzwP3hrP0Aq4ZqjloLzjUuc07dlCl+vWCyuQ2vlcYc3VgK4jZvNRiJfghCG4g77S0QqT/dVHK2K8xNFqX3yGOrLsxrGUcwTzTLUUvDVv8hlYn7/NR1SypJfyvoT4rZpAdZopTkf8dHgRAxVJuAoQehYOmqWDGkPo3PBETcd9YSPNzLZ6vlUCdR9qvBV7vIbCvKswNlOqHkUDXpBBM1C4iOgHhSowIwhbK6uwhJkfVQzIPZTELeWsQjICdIlxgGCyNlxVfP58ctQg3akai8wFQJGfPp8cqSKXFbqZukBGmDkHVNOpv8LDzk++9SXc3lWxPhSZ0jKP4fKK2riidGqHCykHNX5jMTZQTSSNNY8h7WnMNR+Gt8UXJ0dEslxBsfF7lqaQyhjcLVJHzdjHBgmoBcfHrEFoLIVSs+U5iGvSYKgnlJ5ztRVvx7t7e8nB4OBg583ewmlzxY3F+rjwL8uL78yEUYW8XgqjmqFdeHsyQx0+r2nZ8wKdruBiiGu8ay/HOxXtjYDBNJNj11wOKjLN7elenMW0j0cxjOkrDhaTuf1uqztBDlYYZ2j+gTuvOYlT7Z03izKR2YrRONmrSZB9OtrDKaqTqpGvF7fuWbsfO+0npt3e269v4u29/Sem3msv4k5Ycuq99vajU6uEsUldU3ePjo8vgqkX4Lu/STmPTXfMoX3u3++KMTOngiKxTTXG3FsbwCOJ4mOezkuAmpVjEyqNMHkJeH1ewOsiEesFZV9CYr9mSKwl/PcbGTsfgZcA2foCZB+h+Euc7DcfJ/vIyv19wmXnI/gSNbu+qNlHKPwSPFtT8Ox8er/E0H6JXC+htH+LUFq7nn+fiNoAoe89sDZA5buMrw3h/w8Osw3I8K1G2wYg/k2CbqsYffOxt1WQv/UQ3CrE30MkbhXq7ykgdw7032lcbhWTbzw8twrwtx6lG0D8rQbrBiC+xOwuS7HvLXR3HgrfUwTvPPi/4UDeeeB+s/G884D9PsJ6n4T8243unQf2NxvkOw/Y7yXW9ynYv92Q3xLUL5G/y1HsewgAngf2NxwHHIL7nYcDB6h8N1HBDuaX4OCX4OC/OjjY8eK3HyNcTxjwc0j0Eii8OLW+arzwM8H6ehHFzwfsK8YcPx+4rxiV/FzgvrW4ZQvcNx2+/JUilBen1oR9jZv0uhsQFMj8h7QiKBD++zYlKHD8u7cnKDB9aVTw0qhgET7527cs8Jj+JzYvqNJhuJDL4ln+1pPC0rb4Ql3/INnIBt86G6/PzPjGsn6uSjaZhb4SSf68hl6+Q0K1UfX27vZzgatAt46WY2ZoR7lNMpkPavuZoIKtuACsj7ZAHDHs/xouqw0prebdbLfa+83WXnN756r19l1r793ObvR2b+e353qWQZYm0fqpfAUDk5OjdbCBhbJGUWrBzeb1hcfZm63nAs319+I+8sYOwDzjWDG8CN830LeI1g/cdRt5TJXnVqAeOQxCHhM+gA6G+p0fchD006WkL8W9YpIopkEEc22BcE6se9bHdoigYmQ6xY7aQfj5ouuRTwzkz+Pzst3LYpElZbk7okb2sozkk2qn653t52qZ90IaDaaXcMliLeQabaU6+cewiQWdeNBnkzpnybM1EmO2RVMes4Wp9PcwiP9zLOG/tQn8H2D7vhi95MXofZpB/vbW7n+8mfst2rceuK9vvfqp/2rb1AHyLVmeXqP8C+3KGRi+BavRg/RN24RPCIO/j8Ho6PPXmYMOgu/H2FucMdZgCTo4JRtypS1VbHvYy/C7x/vDfgDECfZzBWXQ9YZ3AxieQFtGLd4NHtJAwxjqtavD5y6dC2Yh95JrzWz32T5VbH+XsCwWidHsik33QUiPoKwi2CAqj0dm33WZ/sXooMcPkF5zyYY/50xO7XeNclEF6DCrJsjjoghzg5Q5DH27SSc9891N5EtkiInVePu5dnpLMWafaad63zHp8kAMLEHMra8/YHb+5fFPvfcnZ53L/0bMWeLU6IpS+9vP7/POYavzy8/vrzqdTgc+4z8/LqrswBLj6fOlmkpLLfAhll3AnGOzvGaj4Hwu28kT68ITgipCXajzvDdhXewaOQaIgC0Uz4ZB2Jx93jMJTEleGSJ3f2sAsY//ddE5O+p1f3uN/BCGVHkYuC4sL5ExO66d0maxQ5ShnRAY2Iz+6fPp1QnMBWO74dKU9Aso76jkkE+aQnUwHDbLx0zyGHAtONqMefTr+eURMvTxT72fzacS6AH3BczlCwclLOZjmhLJbLQ3GoSvWDQkNxvtjZs5AWCbv28cvruWml5LlvS0nlz3eXY9ntLJJGIP7BnV1IDhqrHR6ynno2mWUJmU1xsPVCtFXB0QNYshssSiWIz4XR0IdPp9ye44rBdYRc4FZ+arHCMf/3n6aVGAb9m0Bng/8jvWxLbcdzYWUwwgsaACbPf8w9Wvncvj68JicyL87Or6EHUXm7Z5fTI2Cs0HnjJyDMGchkHPYVJ1fc8zA6jhu4VNOqpHNaAPdVHM2GHZE7NUDTMc7FCQ3fMW7nplgvhtPocw10esnw+HQV7fFygUwrlOEp0Ftj3M4c74CoMsBnGhLIFUK+tKxVdPttL3ZRUV0+YIHzNbkmpAY3NAU83IhN8JjAqXIs8SQsmEM6hU4uAzcsydXVChBh6AQyAsA2eddMooyVDeKZuSSUrNkzwzJ8zxYdfG95KrEAQ7NLq/DCRWFowbRGnwoLnTSQygdA5MgbqCPRu5DJSawr60RfkycmOpGN14TDpGQMaSaR/Nbyh0cuEytphy/j/nfcwSJiEovUFEXzF5x2TDpQYUHKFtKHODxClnmW4Q96jZJRnTRomOBkLeU5mwpMcnETkZkKnICZ1MmK0OdHLh5LYWBfR8ctOAJw1I2qgLSDSgGCVDfscyg4KW/I7TNJ02SCbImIJqdj9ins25hskoeDn706LIZjDVu/bBdtSKtqP2nstrWkSVrtGn3ElTPCOoGjGFbCAyQxDpGMtqVliVybF/A2jlpUiu0LyE0nAF/eyohrQjlk4M2yiuc+sZBuqZqTalYQWVSwYZH4W9ZQEjNB0KyfVobPjpFVY0Y5INBLxhGMqITDj0PACL1yyBpIga6WvGxzR97zc3XwUZIvMJf4x7yJMjfB6PDEY+/Hx0phokEWPKMywG0ACbUlnNzH5lmDnlVD2jQgBfJCTaP1TB2srtk4u5yJW9C6q2UlGOv6FK18wiwHfzFsFnOnyBVv48ye0qucPEfX7iJDHP2M0O6UzuIsaVrAMPkM31wRSkbOplJBGOXnRoDCQDgE3HdMlQhKZM6gDbTGDZGUCssJwsk8EUQWKWHQ3vaZwdgCZTALjlwndOBjugkjFXcP9mFGYpUnNoaXOsqYZ71AAGu+DkqLt1ctEtfhhwye5pmhpGZn03ZFAPJXggl6mt5aYahGUJVqhJmLbpt0ZU4BGmGHl1fHT5mihw6PuEMKbjZ0himuuRqItXjbpjduqQZvxPe+AJSSaK5YnIpmO3pRAI2NLwl5GkAutjsaQkPGGtHGd5zgApXuLv0FTraiqbp0Imz7DLYqrZcK0+ufImdhNYslhl0A4VpIEy2zTFnkeOBJ4m5kwqmMPlKM4nRUdrNp4YY+ok0MhOGb1d2Fqt/Tb/CizyykU+LLtdbkeH+Ui+T0V8SyT7I2dKg+Y3yfspj8nRWRczCz9eXV10yRa5Ou1CpUQRi1QtfITUlZ7aQRxPjlBMceWyLu+5HmH+OVGxwKJDRtcdgtrodczCH+PE41zGeRbDtFsLR0GmPGaZquuiJTSb7ExW00Zd6WnJ4EmDuX7GxKEJI/SO8nRuWmRnQuMRI9vRwjF/tV4ysdKVMOAJrj1X8nWxfXF6fvjP3tFZt2c2Qe/qtLsobpLBlUxcF4Kbl24C8vny1Kwe/VIJ8XCt/erOPQ38r4aMZnijueOZah2mWLd5c1ORRMR5ke9dng3ML7MzNzcLfsqELrioYYyFsN4jJSnPbgEfjPFAAFO8nkIS9J0NUhxytq4YKDtVz6MLEmFZdM9v+YQlnEZCDrfMp62lltdoWrVVwjmb4VzFdINMRMrjaQM1E9QI8OLbnbrGrIKd/ayzHxOFx2zcLwqbFQ436wztXViR3/uAWtaidMrzb0T2g59GSB8y4WkER4IqzgQ0koLDACtMfPk4KAvM6rHQbrXw/4vSrt4YuSvYxRget0Uku+NqVnXoM4M18A54QWxDkCpq0Rdw8kEXQOHQROoW3zxhJHXsc2aRXQEZquzNDTimzG8Zod54iEWW2eUZeEUdTR4i2ZBKcLMqBuaJagTP4/r3OV7EojwdpOIe7t9kUlhMH4QkV4cXdtSGLWTmwETYYsbvinAdnnHNaUq6/31GJjS+ZfqVcvUb7aBmwAIWvMRBXvRK1+xMVkCm0wo9/r9CCji6QFQetYODx9HaQYTGOse6FYrZEv1yTDb8eBtGfsCpFgzroMhmAFdYu9/+bK1EK7yNFNeUp6o4LOyICArUms+GboGLKUI8rGukW5oA7WfAwo4YlGUHI/TfeYZMARdZ6EW0b88brCBtJnRlyAGIYLOMGPo4a1If4vBbDoXyXRm6w2iSEMXGNNM8xmulBzhjaUbYA8ZFNkpCnStwoQ3y1Dx2xw26/E9W3DQbRJnUtORjc35Q6ecYGMPZjZmhCHUHCTpC7RWm0jxNCUO3HFZ6As8A2NSBUxYINuBp6mUTnUykmEhONfO9GRYyrheuI7aU4gRcj0efXRjvli5VqKLjPh/mIlfpFLkZ3vFSHu5flU9sT7nSZtVOLhqEOj8cuI7zjD8QJQyfRIT8d0FZmt7TqUJHfPnIpvcOJsf3N5H94gZJVtbRMqNFFVfOSe7aPoCLO+KTGwPKTYRg3TRIwiYMvPlEWJ2BiCzwMJrjdCb0h6qoVOVxmegfW0wIxyE0TYWH0jo0RCbGIldWFCDdi689gFZS2IFedbpnryvleyBymcajwtOEpMTQUTbnhN5r7x/M4hy6Yb7tSgyLxxudBzjNj8P7SYhhysjp6WGJHnPCeBYJHX20guJ7CNiBkjNBLVDYk5YlUERXl+rtbtnFAoz9BciWigZAaHD8srd6yEQUcz2tq6/QIdfT+avzSWRaMppWwRGZ5hnL5tUYWgtMV/eimWI4EtQD8keai4qA2TfVfLjPOouqvmVkaiLwWalhk52sCrSQekQ6EEdD5wCZZ1pOe1yJumh+iFOQk+45EL0C4WHnUbDqYk0L0txVPqQZTaqUAllfMWcq4AyZ6IUF7MsVAEU25DpPUPlIqYYPVe/1/yUbqcg23pHmm51ov737dqfVIBsp1RvvyO5etNfaO2i/Jf9vswJkjR6pzc+KyaZTLma8tZQ48jQIRf8JqpRiQIaSZnlKZdiaTY/YlMRQ/s7o0KVqdFYJ0GUPGJeoHsYsw1sSSJRIBQaJ9ZksKoc5Pb04bhG8tChwi17SBomdjApD8M4ElJI0D6I5Adq3OcXHcNoPmXDYVl01faG0yJpJXFmbiVCapnXtss0LGB7FGlVKxLwc8eZBLjXlyo3SWOi5Nr7CB4eM6dR7tm4zcZ9BVCIxqGDxNkl+O7kgAU4EWBuUyzsqp+SeJ0angePR7mq4HsQ/q/Q72G3tLuyANWSVbMhFVqcAu4QZnpJfzZ8PH4OrJglmYZorwH7OWZ9V+c/o+X+K2X5b6zlWXTKJGd+77L1EcHGbJ52zTvDcXODtQbXVkUM4lunW+5xlQvU6XLLFL2wmX8By/n1/EeJTVKYH/enVycXdruH2k4u7/ddlPWpM4zr286fO4XxgZtzbmbCua9RWcaddfjgkb1q721C3NB8OoZryO3JszAkRa6bJK+t0bJC3zT4vFHOj677GNoVWNbKXkveC/J5PJkzGVLH/ISP2QF0QLfSjU2TI75yXMYykIw58nBjDmvMMuksayarZkMmIdPM4ZkrxO/sgGrOKTah0/fyoH3E0nYzYHOnbajVbrebeMfx7p7m9U1qpjOpohWiQzStJM2XdMZC2FroP+tQcFGedK++Vs/UlubXXisNPkInkd0bcHn367XWwnOVDB0R3KmhC+jSlWQzHXhA0ICSRIjen4Yypa/CciIXSw56VhhUSAJJxv10SoF/rGbbeTEYevL2UZTeTL1hZhhVzFS3ZQ3FAyGz+DJMs6c2zKdfbVHXEhyOmdDCpoxHO3QBEJhOWeJDzvjNF/ZJ/KFK9GkEyAwxn/VBGK9kYCBHZ56JYjDeMkNoIv5jt3YrRFTZEM2FYSxaKN7KYK6OVAHtR9H2l/NYmQ2LkgMoHA/7gR4RnXo20nrzb2sJH8IlIyOHriFxhkKQWqE498LG/pupPieLjSTolmt4W64q+spQqDcI1pX2WKtScMqEh+A1rJxvsr06PlD9HN2IR5bcbVfEXUKPEFZ7sdXKDnwSY3hsGg9zs5j9ymmLx7CDEz8VdBYp6EXCHQW7sIWYTNCggygpewyCAMqtYdo8IOcmMhkql5oEjnVQgAOFhG6qa/9vfbWyWt17AzMhTm4Ae06zwpJMyXzUCCtjWBKqKUJ+l4n4+m8/fE+V9E9J24/7+PmJU6Wg8tSMgY+DOoEpvFI3IT2zrWBxlRIvy2ogrJu64aQqdbUPl/e1I5f12afM1SkxcgFcqyGypEIyx0cA9lwmiJeWp2TITJrmY01jWILCovqfFpAdofAWpxwYDBt2EzayWUSz2r9jV6dHrBppM3l4q6O6JhqKj4S7aQAgYlnW8EmySqCogZ+f1wwZZs2aVgA++b8kIUvExoVisxGLiEb4v8U2umIzqZZnQS1ckx/pY3iB6gYjBY8cizcjpUefCiKwOYnzkhwp5ZbOKHRtTntaE3GeDAUxQbmZTAsBIzzWXCPnLbh4MwpuqOBDA6fRESFnaZ1KTY54pzSyLlWgDF4l/GQNiLEntHIhI1hZH83iHDxsrY0Np4Mpty0Vwz2FUhLNGF2q4EjhZFYg66y5ZSoHcgXwUcMRJzOspBd1h0hAKqIzQTGTTMf8ziMpGEvqPnxUb5KnZDDeABU/wxhY+GOxuvDIQi2yAazUb6Jclc/QrYwbOY6ovloBZDyvZ1YIpq0Csz7n3l0m07shYlJmtY5+KIc+qSAcijYJIq5JCirS2Cgkdl+EHDAkzuVsG8CZaeB9NBbjlfZrRHk3GPNtokA3JQIvOhj0z4BfzA8LoL1e3MAj/cl89mW7J3NuVYCId/obpEOBxKGKcgzbO91SRWKQpi6FMj/32CrqU2YEhQW0qcjLgWYKbym/xVAyV3du+/46bGxJ1MZ7uGbEubDJiYyZpWmMLp2M3R2VjcuXBf8UHUJSAPXCl1etZjZcnsE3As4RhScq1GZIMyh4p7OF0YwcEEZYIpozeWVUl39LdwV6rNSgRoxaZNKeDlY9xzDKMAkSInY3nSMIV1A2TXAWCWwww/TYTCbO3aCWUiygcX7sHGAYU8ISpOYS1r1TaT4XA2FohY3rLFOG66LwfHkGFSWH41DDkmGnJY+RZKDkxw7XlJFazYcDwj/OUSoDXD8nGXEOoTiEo/G9nQtvQMI7ZthnDoB7FWPGCwn1ZAgN8EqJE9sIyDoLQMLcLVRGqyY15z56L5piEj4b6oCjSOcZwsvOG7bH+gLUo2493D95sJ312MGi13+zS9v7Om37/7fbum8F+iR9runsqaZSO2TB2L5BOQK3Zu4o5L0KLJrszQb5DqrLlF5qm4h6X3/cNDpjZjmGT/GQOWY/erwH5sGUdB/0uLqJSaQolS8BvXeyQzLtrAvBP8NuYKsDg2FinPLY5wqVd5NSd0AOCDuNcaR9+RgLj/j2jWs0bBE1keyxB77WJr6vkHzULeVMoZpjXPjAbA31sQee6OU6WEI+m3W5lJhIJqzWuwHET9SwBU87ImYAT9L1AWeRZyYzgXnZS0an95jfYpkHSSFhzDK7JIVAPE7EbwSI41L1YLMIC+q7fnh/UHiceMpd070ZbjJdmRHIAQpWjZgAwz+KaBxkEZUa1PBgZEMz0Lnm9tJMFU9nmZqFfQuVTG2QE3lhAzs/WmPHOCumAtKnKYY3WQo8VsKN5Nsy5GvlVKzYlbGlzXpB8Ujrq7TknlAGVhOaCrTxl6ZIx5e6fvEgohp+RQmWuKQSM457XpIlSwdPYIjWmGYadKzZHTXDzNVv2n3ZZQqugyMVaA5ywcgqOP4Nr2Y6pqQoRqLwuq+nZ5wS8OFPZF435OfpsSU/wJ3SgmDtMgkmO3QKdDHAQIf0YVLIZ6GZ36COi995pTjclqXrzBalbWo65OTPrWZFfyrWk3YL4wPuSbVFdlUIGa0FSIW6NCUZtrj3TRGTpdNa2CMpXe+lepcZOtB3thnYWxOeXzKzimyesLHzK2UGuAEElWYMoBvdHKMVcPoZNVtjCi+NonmVlGCPInjCMQcsJHQ177xymYEGgftGq2i8NQlUCIkxuKarqhEgFGSJfyA0J7+VtgkiB02MpEMEsscgUT+B2ytDMqEgpz1hYtg/j//8/f6Ri8gR4RLMy3uqxCR0ZysR0vB7m+pwENj7er/ixnWUU0zB53CbHALxFkhZB9wGWfZn/nKOCxxLD3zy5v81MEEvfl0yQl0yQl0yQbyQTBPekK6NaiL2/MB0EQXpJB3lJB3lJB3lJB3lJB3lJB3lJB3lJB3lJB1k0HQT1p28kHQSAeUkH+WbSQSx3fCENwkhl8DkUx5/wGRJzUyGCuiRESwpetWz4zaeGPEqOaEV6fIOpIYubel8xP8TKB/It5YeEBuhLfshLfshLfshLfshLfshLfshLfshLfsjagHjJD1kLA77kh7zkh7zkh7zkh7zkhzxJs1LrYUTdxi1dFd88Hre0YRuXms2WUqX4YOoCzim0fIL+JzSOBRb9hdLiOBfR9EFkYjy9thBeeyXHIPzp5OrymHSurv7X4T+hHfhA0jGDJlPXWSW0yexpg28JkmJgCwdG6nirhUvfaAV9OidH3QY5++nDrw1oSfLaxaJSEovx2MhaC3JUDA3+YkAo0jTWPI5+AIh8T7KwmcyID0dWu/WFw4Uz08wYxbgI0fUGH09orK83XkelqVg8gv0c/RCSoTIpBJUUg97yDNwVoKzSeASFu33nDrhv0hhCh/M0YMHiWIwnKVd4+TIUNEXoinGvN4K+L5kRfsbgwpg5Azq2bl8oJyeTPB6NgwaPT224DJzBc7cVqu4G1+ICiWeJsYGFVET0/81irex8pUYE0NGspK27ur0CC+r7IbnItgqAF27qFOAYeZAWwBZhnoftOfwSROo9gvUcbFeDOvpPikdahUjfTwDTajFKS9PIqU61Ne3YOPEMcN3TI3bdK6a87mE3kuueOTmGGUuue/2pfcxG5svr3rWnG/QkuO51r07+FY5DVExTpiJy/DBBOYrtHN9dZ4T8ALfxXZfP0iBnImONYsxrciruG+QTS3g+bpCPfDjC11rNdgv/6iRjLim0oO6amcirdnP/Nf52dHbivtxrHuy9vs7CockP5NfjC/fAydjnsDXJIVYsf70xu7QGgtUWtf7uS0cuQAIPUdeKBIOJY5EleayLzlT29JxF9OSCPETwPwgkt90oEpKwlN8x6eRlJxumTJLjf64oM8GpENUbX1swe6FweQjsxWXJuUFecWg0ik4nzzliQHwzhtcVwk1GXI3+z6xnfmnCDHjKIgp3p2wRizopElDKxuApXDwY9Rlc3DxlsLBu6Lnpk9AaJ4VWdsz1ZbhlbEK0pPEtcpV5HcaNFm7L9hSm2iYF1hZk7Y0LIEExX0CAjv/SOvGLeJf+1PcMjMhHJtnmpoLkqazJHkY0V+AqcXoWSrpAbzKKkmSEOVEIMdQuHOcdga64d6yBeb6wLo3Ch9MgLIvldKJZUrjjGXROZg0y4knCsgaRjCb4b5Gl04ZVABrQonhOssbm7xvuWWMq4dPPaLH3xFrGImE93+AqwkzuL5jPfSFSRrN5K/cefwpVeQ55KL6FFlfEBjVUrUItc1YDUuYvY1jWly1v9LrAQeHSp8zE6KSXIgZHrGcJzONW/tFS/nbYsHJMs3xAY4MIBmAQJQb63nAo9tOKgjGZS0VVRLKU3VmrqjOZpIz8cN6FhKF5PvhxZOZk0cMkjiZSPExrWAVNdV6fxPhSI7OYSZuvwgiCMn81BjneY6RiOISIZLOZxVDSyYjHhEkJPTNdRF846h1NeRJGWApJtMyVdvORU0bvGMmzIIvINyiHV4tXXExxMb4f1pwIeRaPWHw7L7v4+PLy/LL3+ezq8nP36viod3l+flXDauZgutUVW9fF4UvJ6xDtCFqunMXZd7Inh0JOhAyjhNeItGZ0XLMUMVOsU5TAeEJaWWGTAp0AmUhh9MyokBx+0GdKkOOfP/7rt7ef3nZ+qYHqZkMspFc9cTBtdrWQTtsONuWcjUbiUfni/xezO4uuj49tUHwP4nX7jKDvCRwC2Gq+1O6tdCUPWZxhthuo/UKkNiGbgmoLRVRYfIsXJyhRqppCTUcoCKgVV2C+boA3YEOuaVrWEowBA6EhdEh5psqd3fs8M5oeOLhKZUXmilhaWqYvyM/1kbDUVHkpe+CDUX9hHHOYgQK/qAkAeYHIoqX314KZGWnNdk6h5Nu8cIq5BpAhPN/ycTWFrL8NhrEcgdZAPjEQkJuxmerGljfhZl8yRW4AiyCT3LzRZ4Qm/wZ5424zoByBebRBFM9iPxzc0WYF3D4wyVBoHVROWJBNv37/g01Exy6YQTxliMbs3lAJXQdizliqCzd/CV4U2gDm4IpYH21ETnzKqw14Q0kkQycCpqk1zDBZmKJRocvWSIzZFk2L9VqJPgaIHk6+Konmbr8jCQYwsyWcHqXRVZGFwhUeU04pKJTcjPzKs0Tcq5mgeIxnKtKKfeA6HodGZoWUj0U6L5dnRe8s0JOlgwisds3AjqqJ7T7ReATZ+sFUbjcdn36Yv6Me3u4393fXhGR/qllPyGR1tnkExfdTYz2xP3JwGovB43idcq1TRo6zhNN1aOIGvXiS96qlU9aG3OHF51L1lEdxO8k0S9eFkz2ce0mlKM2zjtTjBy0puKtAWHk3uY+WNzNuKn+wRuQEgcFyREW4ItegIPVznmosKzSe8NRKzLDp9oDeoqY6pilYE4CKkGpFL7MnDXuYCLnQdeogpVqzbP6N6qn18+FwLCEsZWZO6zYE1Xw6YWsDe8RowmRE+7xXbzPmmdoyhl07gePoParIcD84oDEjrzrvT16vGUsIWagJv48wBUZFPLYn14RGUO1z7cqI0WI1NXsxwMHOu14sWKblNEx8XlumkV2KYoKa1wMjBmreP5sbrYf2hq3/yoc8o6nHR61Fgw8xUj3a5zVh8uVt79brlGf5Aznvrnm9ajyVLes9dTCvCYmvLq3XKwT4eK3HJQ5X/3GpbJLlAnA/HjW12ckIhTvDMJQos5FHmJdA4xGxk1UZqWS53LKp9bwV1ou9s7IFlRTzgVhhzaf+lKi838SQFj8kZEZljOoRuQkxjn64WZuU8WPGI769nPQvhUOVFeMRb6o/cusx7tM+h0gOXxy2VOAuDjMw14aWOXnEZBGHwbMw645olomM2OFJTNM4T224ddGCrB6cBikd1qU7Gc7uWmaHHQ2TrR2FGuPyKxiEUVxrQ8BVI+mJwUCxRfSmteCCs9WHjeJ/LrIuzym2VcHBF3Ixk60dlRo1igomWKd0zQjccalzmvYWD5d6lkZeQcLOV875qgGf5VlrCWTWy1rD1cOzF1c0YLK/WtFAjNeraNgxn6G3LbtJLVEdLT0rrhuVmmXNDBprlTYjKlnSS3lfUslri8dzNgFOR/x05RyMAtd1oadZOqgx984NT9R03Bcppt+ZLbxWA9NAW6cXA+6I/SwNwh7iNE9csELKKPydiPnNN+5dSV64/IFwm1IdAj+wIq9cvKymMhr++boBN0J+TJ8eDqXAvTyzV/0JebUx/HOjAfc+GzjCxuvqfc8kOCGWJ/qwtiCdC8nHVE5tGuvJEXn108nR6ycvTTfbrVZ7HfK3yECqG68w7GouTuu66uRZpV7c+kLJs0p50eKWs2B6m5tTWbXtvf2dtzvrWLcxH7M6L64+nXw6RgeZu9EOI8BsTbJgNYmQRhmDyLBBSeO3CSJkpPVEvdvaur+/jzjNaCTkcAtTTADDrTFLOG2CIyj8O3oY6XH6+0nHl2klxtbhMacpuo3+p2EvnN0NVUR+NSJojNEqRpXKbD4EVy4Qtm+LbPgxx0LpouJ9iLrLTl7HstXHmp+gcOOgxI8i1jQt2HV+JavN1v5uay08uWIIz5wIHh96Y84PkWDzmzWA+pWS/+06hIdncKrecz1yocQ+fqWyPO6EXM8pJu6z2m7yQWuACTZBjZNzk+TWJeYnVK9RiaszcfBDnqbEgBtGlTVm+MLrSnPCiEr6UBIE2jwvjGhrvaz0fKt9jpJpBjGSCk6MQJs8DwODEMsbmBVatUDBI/NxTvGf9v7O23UEv2gqh0z3vh82uwKAkdGMxaGm45Rnt2uxbWvUNIAN4Lh9hfsi4RJqtln4q5l25rE14JTXptBfuaJPRpf/DLq8JIrFueR6Gkbiv+rOKPooPmtS96XSPcXYIlbjY0f0FTYZwkLncFTD1uSZZmnKh5j36zrdSCgZaa/8FB+OdLn0F1dFGuYcXbm13Wq2283W3lX7zbvtvXe7b6JWq/XbimT4RtojrAb/36WfwiIo1rROy3ZZWBGrb6Ytw1rwqGsPLdjHYSUcvtnGDythVaNh89IpYolOESst5ktribW3lnic2N9Sw4l1IFKTbF6wQ8VKGPwntbSYT4URVaNonOzVJMo/He3BzdSKpiEMoUa0XROY3Y+d9jrh3N7brw/S7b39dcK6114k5m5JWPfa2+uDVSWM1XWF1O0eHR9frAPWFRqELFI/KGgxUyoeRMTAH3NPFA9qR9vRTrSi2yqlX9XAh4vZb86+H1N5y7NhpNO6eHLjStLBgMfk1KBMLqTQIhYpRDIY+9hCoCJyyWIxHsOstiQQoZK9q1Yj+3hydYyVy366PD4+swXOPr0/vsQ/L4+PKhXJfh1xvaIbzN2o9OgiUYrrYhk3a1hUxdPDa5Nfn3Mm7GtkdB5efC5nc6Kf2deynp/RuWo65wQqkk1oVld+9IlvAYTTVK77G66rPyTB2wzAJpYfmMG2hsorE/YV6u95GszYxkuQ4IKGHodlUTaT1pzw5HG+K+eiPBPf/WgnOthvtaL2m9323sqI8/GkzvrqHQzrsqjaJBlwEZCLY9zBpJMRCwVpNqEFDTxGAriI+cU253CFdgY8GzI5kTzTWJAEarTesYzQgWaSSIZ0tC3SXXd6Y082Ac/i/JU0U17AKjKi0IItzqVkScO22L7H/i1Q9MLGS0jqq14B9HhwlcuhSBtcQXUprGPAJWNTEDVb/VQMt1BZakqmmJF5W9ut9u5Wq70FZQR5NmzaPN8mEqdpJjSH90iP0zld4OL9t62deJcdbG+3zR9JTPcO9ncoTXb2k2SwMtu4lL0ebJwaXWl+16wiJbsXnZOzq+j4X8crI26LNtWNrZ1mFaw3/OEAhdBtmQj4+3zCsHIt6WIxwxXJ8uzues80GrC/nivQvITRsLu7syKKtppvTet+VXbgFuhvKrfgVe0ulX/2cpmOaL6iC0WyAZM11lC+dOOTz5enJOXZrSvq/3TZwCd1WydNbQBhUE12y7+01Wq12ts7K2qE2JVMTiFPPZrTRn99x+W5y4SGWaAgqWYZ+pn7VLH9XcKyWCRG8pe70tjtTRywROAeF5kqfHVdpsEtffwAFsglG/6cMzm13zXKRdKgDYqaiCzxNZ7gVgGueDDw/Cad9Mx3vriTImJiF7Sfa8e6xZh9vCk1R/Mdky6V0cASpGD42mFG0b88/qn3/uSsc/nfiLm3D6uH3m8/v887h63OLz+/v+p0Oh34jP/8uE4OwEyaL5VpXWr9D10QnzELzeqbjYLzubxfT8sLTyf0q2BizLw3YdnsEnpUgGsUz4Zpcbtgn/c8BFOSV2YNur81YC2O/3XROTvqdX97jewSLFwBA9eF4gbx6DCundLWr8Huejgh8LcZ/dPn06sTmAvGdsNBaTA/4h2VHCo6pCwb6hEOm+XgYwZcC4Y3Yx79en55hPx+/FPvZ/OpBHrAnAHv+asd1+K3HBoKrSzIzUZ742ZOGP3m7xuH766lpteSJT2tJ9d9nl2Pp3QyidgDW7l2cJkfqxFH6ykCqmmWUJmU2QGTrKwMckGnapYAyDFrQnJUFAVbJ36dfl+yO7zIg7PX+RXMfJXj5uM/Tz+tCZ9bNq0BnY/8jjWhb5VRiyC0TQwgx62CS/f8w9Wvncvj60JDdMfH2dX1oTF7Mm0rJlyfjOmQYejZMRTSNtx/DpOq63ueGUANU6+JONUgwrVQxweWhsGkWEj8luEJFHbsLi379cr08iJmDt2uj1g/Hw6DDPnVCBiiUVcUEBaXs9pJhb3Wg5CKaZYx2VOaLpQ1+ph1AREyBvDOL1vHR5e2FK9L/MSW69iQ13cxGtOUx1zkKoyfhILSny9Pq7bFinja24BVcDzLbatCcCqrGdW53J3CdrfAYGqwtaDALdfVePrtFWMFagyF3biyFWlKhuJMx/7DqaHKOWAOCgPPSPfq5F9kO2pF4cVC9fbBtl4pugU10c6wX0vNBzTW+MknJFRuK7ALZNPQCB+F3hlNmiQSPxv+wr/45G43+IFP7vbtx5kxxzQOnhvnmj3gn8ZGtn9hkWr84MpN46dcptcZCYf8AeJvmzSGqBF86h6lW9MJleYtm+IvD3utg2ZQzrdy1+LxWI11cpnW3AP0CDt0usZ+Mg0aPEMX6ZQqzeMoFmGv4JOMKAFdrxW4K4xBO6ZTAiY0mLR4wZklW0Ki2wLZI50WWSx0tkmxTWawnS6wSid0lyquStH9OBROj7/BIW4wliiEEBEygBUtt0nKNZM0JScXd/t+TJbFqbA5xDe/38ApePM/N+TVyfHVB3L54dAPuv1mZ/s1whQ+GI8olD2U3gxw8Uw+79Sm+jlw/YgIdkVzLlN+dR6qPf+36Prgqe0j1P3kRTUbX+pMGjMoKKsNDLhI8vBVdXRXHVYxTfiAcE2wkUjDMHMmNGF3TE7NFJBdTGfenxncTYtNyMk4V1jiuu+b0ydzGnCjSgAP9xnZmGTDoCE6ZB1H5rv/jCxow3kDSaHiQF2Md1F0vbcCzN57wHb7x00gzrSYbMws8s0/IDPIECJssu6AXrVgBRAgT9fYZLvOdKCTAbav/nx5ioU+8I7FNl+YihwaQRRyeRqwFhR6L3wIPCM3DvkbyDmDSyddKuUsWSwypSU2GTPnRKnqCHZYLJwyEBDwqP+zLDHf7e7ubOEV0n/98aP9Hj//Q4vJ6qvqBNj3sbKbnzMfQ+FFL2wVRRSD0IuC4p7Sc0QUz3znx7HIuBaSZ0OUfF6zdrpAnxkRaxnKJlxTFbKIbQmWiqENzjavGik+0Cwj/84xWsirsngFQvVotu2I5yjfwdW/5oelyhUVdoBi76yUaQgxzoSuSsClmMuM9sjPq/PdhCoVcNPaCzDY4Z0gtMfzipF0CPg6syJngNajGYADqW+XYmMdODz7gu/LVafM+fUo8Lu7O+u/wzOY/JGz2hofgC4IE9iNmZS6TOIv1uM9D3G/t83qzWygyvn9X3B+o8LIkkLBDGeJzMlGy6ZAJsy7IHVk4doAVSCEPbJ2BDaDoTBfP9f+qUYwGSJb6l1H0H8O3SInuoAHQMcnb+zbM1EdCR/AZaGGjIk+0/csKI8ODYDuBZpf61BS0MJlkiW9es3NK9vHm4Fsd5PCWYQTN4BIkwnzMkjlffxp5qqvpHcHY+HD4MbeGAgRXoluQBp4+MXsUYL6u12MhGkmx5C8NJEs5oqlU7ui2Lgx5beYdTTJ+ymPicoHA/7gR4RnXplD5N3WFj6CT0RCDl9H5EpO3Q3wZCLFAx9TOGi5gtpqfDxJp0TT23KQrFXtzfqntM9S23TS6LBwMN+zFJvHXZ0eqUIOxiLKb+dkv6+t/6jhIxWPWH3pZF0Y/XFRD8f1rG2FUQU37+Yq+gjvIwf/GsjhWLfO7eQnAT+Lz65DZ+4fOU1R77PPgFlpDdgg28239bcVltlDzCaoPY2ErXuKpf1mtpuVFxH4hCgQl9M0sEVnIYBMfe7a9jP3uy0c7lMAwQwzyiPMHNMsE4W+W9qbjYAChatnFqE+S8X9fFExX66UZU9IW/SLUaWj8dSOgJsLpQtVeiOa9UPZUUr2POCK979umoCZVd7fNgzULgmwRkkQFOCVWrlZKgRjbKDXzZxbWlKeFo6NOQKBqhUDSQ3razHpAYJf4UBhg4ENgzLqN7KQpcsrdnV69LqBDjmfXVisSGFegmBuuDZUIGJDSRFsnzlun9l5C/9e8aRZP+CQ7/vcgTPnsSOnWInFDh/4fnVmc6WDamKyz3b49VpFD3utg4imEOloSAbho7UXz7QtTYN5rYB81e2cvY7wfhMDXlwb63ntX2muR0K6qKWwsSFERxgdORbjMfhjnTgq3PhMNcjRWZeEGBPyymb1JjGViXqNHbpKbdZZ9RJ/84fAMl9RE4YV4UrlTEYIfZ0hvW5JcCY8514dnkF1FwMEBJYEhPUkr9Dg2J5Y3Y+dbfKRD0eko1QuaRYz0mXyjsmVaziUSQN51bWTBRO5Xx2+hmD1SnDN5+76UEq40jwb5lyNWFLnoh+FE9k1P1pmzQ9//NxtkPMf3dqfZHGDnH/+EXST4sBpkMOzH5/gD7/5auATKKBQlHCoi1HcNE6Onb6epdUnw0pGAv3C2f368BNySDP+55yaIGvHMZxKkVfnKwiJk2zFfPvHSEDTXp7xui565lGCpsTMaAjyeQmKzGyV9VFFaQotEHsQ+FtfQLk/1M18eIGC8/kj/apBumBwXVQ2xaExu4TM+IqVcQDxTOgeXLstgOmjWaF8DPlBmGc029YaXXOpyIaQvJQpnoAJBz2h54TsbLearTfN9j5p7bxr773bOfjfrda71oqRPB7XPhuIhRI+l0YWi6EtgGj7oNl6C4i23+223m3vrQ1RtBx6t2zao+nQ7KHRuCZO7rjxvQN0yDIm0SzxZs4tq27iy+46TqwA1TiXs3G2a81vgfGDmvWMsDQ1D8T2pwJZ4smOiQX+sOaq+MlXIK6QJuNKT/a22+ulD3uYiGyxC/3H4vSO7RCFt5tBidKZpfbJFAvgur+3t/PGrUWWsIeZ21AR92xE28wt6VposmJhU/Di8T+9fRlwgJrQGMudcl01frZbu2/XgIliktO0V+pQtXYbOuN/5IzgVC6HDc5Lvw/mn9rgbAE5qDTL4mnhAXPFu9B3AnwyGVGbAdEgPGxoghdxLqNBgHmaGl3K2Ji+KbQfuojoqtB8b+/D+/cHh2+Ojt9/aB28bR0ctbcPDzvrkEKKDzOqc0PMmuXtSVFRdGDIG1DeAxFKn18Z5hGPmSGXCh1DqGoMRJ5BlNtPgpzSbEgO5XSihe3SMY1IlzF/nT7kepT3IUdtKFKaDbeGYqufiv7WULSj9u6WkvFWDANsGcLAv6Kh+Mfpzs6b5unOXvVWFMvpNNdzGlgnyV/jA1DeCeDAmMUVG6BEw1T0aeo12Iyt6LWdQf2vsPFrMfEdSt+CjT8r5pw7DosLPmLkd69+LJT1Bjn9sUsz8sGY71zFInACNIxxF4HJ/zV45Jux70v0WCOCf7WB/5gAKC13Tfh+A9b8DPrrwPA/zzK3VVbqVe6CtsJmUqttVfh2qcClMdXxiCUR1WLM47r1Ibz9w7mCpBpUMSEE0sLjC8Sy7I5LkY3DWFWWJbZBufTxkhBbWSFJnyZNf4e8lAvK0Qce/jrkwctme5xVaDWPSgFlPLWeQSUokFOtybgUnWqr4T9DpB5PKmUsjB2aW9wNZL7Fy3ysBwc7lLG95tt92m7u0v12s99i+81WnMQ77Z2k1e4vVfjIU8LYyV+LGGauVenBU9ZnVDffRq2o1dxubbej1l60vdNstVqt9lJ+DkeLGnP0ZkihbcYezIybJaboCPER2BqShxDOIKreJ+MP+R0EbVsyzhLK/9CDSXoyX7z9xUDSMTNbsSZqhPmzNvHST1nUtMolFqmimhkb9E8sFRinVCk+KOe5aBprHmMpQRaP0M3gL+ptrUmcKTJaop3KjsXjcjVBG4Hhqwv1XT4s1JGA3EfVIMycB1hzQhOeDZltQwUHumRaCleYJ0zKocMhogeLXPUsfDq5ujwmnaur/3X4z9KaQDu8iKac1hUesHFlJLmZ4BVT3lLBNnwDIQmFpCoxIJBZD/lsWuZw5ru02TCZEriaYm2w+BbJSEsX3TbhwLc4Mb8Z7W8akV9HkP4kNMRZcMmSRjj2VOSwSrlihJZoBjWdEGaPSlTJzNz8nWx8okMWU6nJT/D0/gZZvBQFLkZtpwisRHF0PGsJgO4hrf7KJeBJlfY/tVreS7sgoWu00zdmy019v/wOYWkVcn84OVtYVUJq110CC0guS3WwnkH7EmuHPL8a7cNhS8uwGO1L6FQXwTk9qdY0vo3GXEsGnSzhbbUFe2Jr0WUqrDuqooVtuscuHqxT3nrjaQpl7fGMAtXep/3bow8zF4qvPeHwZ1+artM9e22M+T9ylgbtqxS2JXe6vsB1wWJ/rKrvtffa+wdLESZ0K6xZgtSZpncegD2/g8BPQgxTRk5PF+76UNAlFtnALEV9m3tG0y0mJLbMIjZvLW58XE0/vFII9yHUpwgGUDFNmYqus0drVJwJTbp4PWi27pnIWIOcivuSMP3EEp6PGxAEha+1mu2WrZ6djLlET2PXzEZetZv7r/G3o7MT9+Ve82DvdbUg9/GFe+Bk7LrKkiY5ZFJTnr2uiAUDwfPXsP6SxEelZC1nrFAoKOmKphQLaPXqWeROLshDBP8rF11JWMrvmHRmXicbpkyS438u7OIrKAFxoZGNC63fcnu0jH8pPrVclNNziBg8VZRzMuJq9H+WCESa6U6I2bRsEcX0saiUzTkNdc0CuqHn5gSbI5OmadDGWpFbxiZ4ICP3QJ4vlGddWM+exU5ryft5jUUnO1BlTwwQ7WK+AOmO/9L6cYseRP0pmaRUD4QcR+Qjk2xz03BNJrImexjRXEGodWr9wii5AjvW6jDsYYK1l3gWJMu/w/rvdwxTliESOmkUBXcahGVwXcqS4nacQdUt1iAjniQsaxDJaIL/Flk6bdgzuwH1q+akQG3+vuGe3WiQDXz6GSX7ZtYvFgnr+bvlqJQm+Vh9rr4QKaPZvNV6jz9BmyesakH4wKiO/vaaK1dPoBozrmXO1oSI+Ytnw15tBuEmGoT+vt75SMzE7hA122s2o4Qr/2ipE7vtZg7yfEyzfEBjLAoCerMrk2Trg0XBmCys3GDb7GpBOpNJysgP510oQTIvxWMcmTlZ9DCJo4kUD9M1UV5TndcnDZ6uyTtzjQugzF+BQY6pMakYDiE2FeIahpJORjwmTEohVeHZC0eFiL+wBZ6QxFhC2s1HThm9YyTPinqd3Femg1eLV6qXz35YI+HzLB4xYz5VF/D48vL8svf57Oryc/fq+Kh3eX5+taYVxFu5uhqPde3lelZq3JlgiAureE1XaiLxNKKa0XHNEsJMsU4xAeMJaeUAppR64WDLskeFVPCDPlM6HP/88V+/vf30tvPLmihtmH0hHeiJw2Wzq4V0GnCw4eZFcMSjcgbpL2bnUe1U3Mc2H77n6qRAIRL4PsGsp1IF6lJuJ2RUlUr0GlVciNSWcqTW7UxgP6PjA6VF9YRf4zEIAmdFqs8/0yH5kg+5pmn5dDeGBOQV0yHlWVBDzLwBnTCmrjBvuRdFVWTS0tJ8QR6uRjZjRKymo0NNVRjHHEjY82BBtRyqNCErlt5fGhvz9prtjULxNuJnzDTFJq7QcWS+BYK/+fLsMEzsSogbDT2fQLupm7GZyldpN3uOKXIDWBQl+WydbUKTf4Mscd1pFWRGwi2PglAOOxxEnmYF3GGfiqUpm7A7Xpuj5ggGtxWdUMzZdlsh6LN8rxbvoTyLjDNU6sLHjY9Fpgom4IpYl2ZETnw4sK1+gJJFhoY6djBvYCW+oGVthRZbIzFmWzQt1ujZNDET93DCVckyd2sdQUi97cn+BF3KFbTgeHEHeKF4Zq6xwkz9HYyLKcKsfSQ1HmNGBoXUhpjrqkRdwpkJNGTp4Gs0YPtE4xHPWLkJm90px6cfHmnA9na/uXgPtjmI9acQqVZfs5X3U2O52DYEBp9HcTnlWqeMHGcJp8tqxAaleJL3agy0OLz47B2WT67NSaZ926ul8LCHZy8415Y5/o4fNGQZJChwvNvYV7EyM24qfwhG5ASBYQreKJRurGzXz3kK0VS2IRJKvdgcTrYe7YDaGzLbRgv1AFGkOSxDDvYA7cwWIMQgpVqzzH87N2YSh2MJYSmD2CB0tYFajL0/lgd1xGjCZET7vOYud7+Um9sZVuwEjpf3qJ7C1deAxoy86rw/eb0GzCCspSacPsIUGDnz2B5bAXTD4XUpCND5BWqfB3DbeVeHnGVaTiGUcQH4n1Ud0JK8mKAGugt0wtS7HzY3Wg/tDXCE+QKkDodnVLB7AgvVo31eE/Rf3rpuXU55lj+Q8+4a1qXGk9Ky1VOH5QqAf3XJuvpGth0513WE4XD1HGEK7xEXgTVjgS9sxr3tmlTZ4jxwO58RlAVYyonGI2InqzJJyQq4ZVPXSKRoX1aq5ayYHRoLSydswG0YgMr7TYym8ENCmbmMUT0iNyHG0Q83K0kKP0484ou0yp8jqUsRN2Xlc8Sb6o/cekf7rqdbwhW6BQIqWjjWwQRw9IjJIob1s7DpjmiWiYzY4UlM0zi3LWS9drpOPAYpXaj1yzISw3Bt1zIy7FCYbC1g1xidWIE6DAhaCejJaKp4TNOeGAwUqyvFqwI/zrZeDJ6feO94P+xf+TTcbjJI0V8L+DWe7BXozVBrAfqOS53TtLd4yM2zNN0K4HY+F1qzVhyWZ5slEFidbaAXxdc68GGyv/rAR4xXP/DtOM/QmZbddJaQjn6e5dYBfs3yYgb0lSUGpJ73sPIDr7+iJUxH/HRBaR2uAvxWQUmzdBC04V83Km54oqbjvrBd9M2WXNkoq7vt04dSB6XFOjf9LRoezRB6WFtwx4XkYyqnNtfg5Ii8+unk6PWTF3Wb7VarvazchHm+Ci7ZTE//Ch6rXK/xrCgnsvawYDN20WLGcTverBUMbVMmKquzvbe/83Zn2fUZ8zGr8xLl08mnY3QMudvSMCoI1aBw1YiQRimCaKFBSasmGNVfaqLDaUYh+QUzAeB42xqzhNMmOEbCv6OHkR6nv590zjp+RDEY8JjTFN0o/2O7Cvmbk4j8akTKGKMcjHqT2Rh2rlzgY9/W7PZjjoXSvrBYCXXbj2NZ6TuujwU/GQ4MV4FnRMS60p+7yn2t/d3W0ry3YrjHnGgPH6Zh5L5tUbUsvWs0xs9mZFXQPRmytosT0JUXw7AlG5dQWQZ3mi1/4oj7rLabYTjVYYJNUKfk3FykVcTzertB1ZmT5Ttmh9FFjZn19/rLnDCTko6SBEEZzwsz2VqdZVasUojKHtQp5BlK+kCrOw8DRxCzG5jVDAoxUhvm45weAO39nbfLBkpoKodM974fdroCgJGhjIavpuOUZ7dL24k1agKw3HAcvkKeT7iEAgcW5momU9Am/Ll45LUp0FcjOMUl6M6fQXeWRS5wEDH9qjujWKMIXKN6LZXuqaKP11J1g40QgQKZWYJHKWw1nmmWpnyIuZIYy4cVhH0JDNdgPZBHYXf0OXpqa7vVbLebrb2r9pt323vvdt9ErVbrtyVQHzIRxVxP64r0P3Tl4ao1NURmlNMlon0AZmHUe5bpXlzR5dYC+NW9aNoQvbhiVPjZN9V85M4WLnb5GFo1rcdZKePCTlYFX0g9Ih0o17pEaCliAgUke1yJupbo0NaoPOmewxpVizQuvQoIe117wsI9l3MOaUaXCec1cEPkaMWnVIF5yEQvjIkpWwIiG3KdJyjHUqrhQ1UG/V+ykYps4x1pvtmJ9tu7b3daDbKRUr3xjuzuRXutvYP2W/L/lhRJNRoMm58Vk03n9g5+wkwaR8OGCy73fc+GkmZ5SmWY9wW9lmMqGaafBVrXobNTdbmUCZcEo+ZZppm03XUHqRDSXvo2fPVk353TD+oq2/mLL6xO2iCxr8JZducZOpkH0yk6X3kGNSLGkF4xZMJhW9X9+kJpkTWThVtSlBdwIpSmaV07f/MChkfJTJUSMaeugjQumsOrlP6TMxUq9aDmBY2vx3TqwzF9QzJKDCowkZDkt5OLsBwuIbYxkk01vucJS6d4D+8q6Grh/qwS+WC3tbtwP4AygSUbcpHVKV4vYYanpGvz5yXiwwPga5KvFvC54vXnnPXZkjxtlLo/RVaLtjHCBBZixnfncyGKXLe2k85ZJ3huLob21N7qyCFoK3Trfc4yoXodLtnCnTMLzKG44zjZq0kcfzrag1uVJcwsV3ayXRNo3Y+d9qqwFSUxa4Bue29/Vfj22ovEWy0J3157ezX4VMJYXVce3e7R8fHFsvDxRcDyDz27dknRkK9cuISIgT96nihc0o62o51oCddNSr+qIQwXg9+EHTym8pZnw0indfHbxpWkgwGPyalBk1xIoUUsUrglNzalhWB+JSVCJXtXLW308eTqGMsg/XR5fHxmqyV9en98iX9eHh9VyhvBS0sQyF4B9OgiYWnr4g03a1jpwdPAq29fh0Um7GuktB1efC6ns6Ej1SL7yH3swzL5bBMoaTShWV3Jn1g+DtR0mKZyr9woqrlS7VKjmpg3PYPhmkpATNhXKNTl8Z4xLpdA+4KGZvpz0DQT1Zw54vG8Kwf+PxPH/WgnOthvtaL2m932wmWzS8jy8aTGIKDNDsb9uBrZmJEAtjS5OMYdSToZsVCQZhMaqcNjJICLmF9sGWNfT5lnQyYnkmcaKyNALcY7lhFo2oe9fSbcBngLSQSWXxYJawKexWEpaaa8kFRkRI26EMe5hIKZ2M7uHruQQ1a+vYyX1JfTAejxwCnXZZD25p7qUszAgEvGpiBGtvqpGG6hNtOUTDEjw7a2W+3drVZ7C+qN8WzYtMmNTSRO00xoTt2RHqdV67gV779t7cS77GB7u23+SGK6d7C/Q2mys58kg6VYxeU49WCD1Ohn8rtjFanXveicnF1Fx/9a4sSesMhWg6kbQzvNKphueAEP9VttTjv8fT5htkhlFyudLUEKIVfpivdljZ3AXndFVpfQ2Hd3F27BEaBli3TWtL6zRZj9xJvK1wetIJLKP3u5TEc0X8LXUHeR48tSfeOUZ7eu2/7TNcWe1DGdRLRRZUG5yC3/0lar1Wpv7yyhpUk25ErLKSThRmEU19qPuXOX/gmzQPVBzTJ0qvapYvu7hGWxSIzELo6HD0K67UocsETgnhWZKpxXXabBB3v8ANr/JRv+nDM5td81ypWVYgF7SGSJLx4DbnO4t8Ao4pt00jPf3RTtKMXELmI/145FizH7eJ1njtQ7Jl0emIEliJX3xYeMwn15/FPv/clZ5/K/EXNvj1UPq99+fp93DludX35+f9XpdDrwGf/5cdVVx9SGL9VkXGrND12UlzHDzIpDNwWNhWAxIdLT78LTBp0UmLUw701YKrtsHhXgFMWzYVq4z+3znm9gSvLK0L37WwPof/yvi87ZUa/722tkkWCxChi4LpQsCDSGce2UtpCGwrZPMCHwtBn90+fTqxOYC8Z2w0GdIT/iHZUcUtRTlg31CIe13SwB14LJzZhHv55fHiGPH//U+9l8KoEeMGTAb/7uImExH1fiBckrFg3JzUZ742ZOfPTm7xuH766lpteSJT2tJ9d9nl2Pp3QyidgDW6o4aJkHq+Es66kEqGmWUJmUWQCzXqyscZGIahZp5JIVEBvx2X7Ca8Gp0+9Ldoc3UnB+OtvdzFc5Pj7+8/TTCjjcsmkNKHzkd6wJVf6NCgMxUWIAyUXV/p7nH65+7VweXxcanDsOzq6uD435kWmbGn59MqZDhvFLx1AF13D2OUyqru95ZgA1DLsCQaoRZ2uhiI82DCMMsfIv9IPO7NEwb3mvV6aRFxlzaHV9xPr5cBikCz+faCHodYWbYEUqq1VU2Gh5JFRMs4zJntJ0oVS8x7R8CMswwHZ+2To+urT1NV02XQ6Vsgd5mrq+QywhY5rymItchQF2UA328+VpVcdfAjfrEl8FrzO0Sswq8DHUzgzV2XIZeFtGHqNnwc5x3TzmdLh+Pjo1xkRuXNmSGSXDzJ+hqDIeTg0lzgFbONB5hv0MtqNWFLrXH+1lUHTYaKK+b7+Wmg9orPGTjy6veOyxoWDT0AUfhSL1TZokEj8bPsK/+ORuN/iBT+727ceZMcc0Dp4b55o94J/GJrV/YYVZ/OBqxeKnXKbXGQmH/AGCMps0hrAFfOoepVXTCYzmLZviLw97rYNmUKOzct/g8Xg+u+QyjZBidZk8RzC6k1C5TAtNbuP+/j5iKVWax1EsNgJF+CQjSowZiakCl4AxJsd0iv1mwJzEG7ws2RISXQPIEum0SEOgpIQacVHqtrw8luqDLizFXSC674bC6dY3OMQNBrCEECJCBjCbVWzgTLlmkqbk5OJu34/JsjgVNjHz5vcbOMlu/ueGvDo5vvpALj8c+kG33+xsv0aYwgeLjuxONXdBND7Jz+ZeOXD9iAh2RZstU345vqk9wbIou+4p7MOT/eRFSQ5fW0kacySoiQtMt0h25lV1dFcWUjFN+IBwTbB6f8MwcCY0YXdMTs0UkL5JZ96fGdxNO2GSi4SMc4W1avsuqYYlaPow1+W4ONbh4T4jG5NsuFG0OoC0zsh89/dNMzXcNpB0GPSZXTuzXQgZ3iKZBcX7AdhW/7gJxJYWk42Zhb35B6R5GOQnVBZJghboZTL5Aek8TdeHcJ25HScDYoA18hCrHuD9g62EPhU5VGUvZO40YCGozFzY7DwjNw75G0gUggsZXarVKlksMqUlduSBJtNhCQZsM1k4QeDC+1G/Ylkavtvd3dnC65X/+uNH+z1+/ocWk+VW0gmn72M1Nz9nPi7Ai1XYEoooBuEEQcdWR9054odnvuPZWGRcC8mzIUo1rxG7s73PjPi0TGSzW6kK2cL20knF0Eb1mleNhB5olpF/5xjq4tVRvDagejRb999z0ZhZBvWv+WGpcpVFHaDYgCZl2NQ8E7oq3ZZiKDPaIz8vx2sTqlTAQWvPZLfDF41m4bhdIsQLgV1n+toMoHo0A2QgxS3JN5aF+9kXXl8uo2POoEcB3t3dWc+dloH+j5zVVpEc9DaYwG60pNRWDX+xXuJ5yPq9alZpZkNUzt3/gnMXlTuWFMpgOEtkTidaVtUzYd4FKSILtwIc4SHskdXzsesChfn6ufZPNYLJENlScyfbthpapU10AQ+Ajk/e2LdnohYSPoCLNA1h9H2m71lQ8xi6a9wLNI+WVS7Q0mSSJb16TcArcMwORwzks5sUzhOcuAGEmUyYlykq7+NPM9dgJb04GAsfBjfwxkCI8IpwA/Jvwy9mjwPUr+0CJEwzOYbMlYlkMVcsndpVxA5mKb/FlJNJ3k95TFQ+GPAHPyI888ocBO+2tvARfCIScvg6Ildy6m5EJxMpHviYwmHJFRSI4uNJOiWa3pajNK3qbdY8pX2W2u5rRt+Ew/Wepdhp6er0SBUyLhZRfjsn7XilhnuGd1Q8YvXlD3Vh9MdFNxyzs/YO3qbfvJuriCO8jxzYS5LAsWid28ZPAj4On0KFTtI/cpqijmafybDjL8jCIKUpTR0ZsBQre4jZBDWdkbDFFbEO2cy2snIhAn8MBYJymgY24SwEkCLNXYN75n631YN9nheYRkbRg5ljmmWi0E1Le7ARUKBws8wi1GepuJ8vEubLj7KMCWmLPimqdDSe2hFwE6EUoUpvRLM+IDtKya4GXPE+1E0TMLDK+9uGgdolQdUobfgCvFIPJEuFYIwN9HiZM0lLytPCwTBn41O1RLCjYXctJj1A6iscFmwwsKE9Rj1GtrG0eMWuTo9eN9AB5lPIilUoTD4Qug3X4wXEZygRgi0zx+UyO2/hTyueNGsGXPF9nylwnjx2nBQrsdjBAt8vx2Cu1kpNjPXZDr+61fKw1zqIaAqReYY0EOJYe6U/288vmNcKv1fdztnrCO8BMbjD9WSd1++Q5nokpIvKCTt/QVSA0W2xfXzqq2UG7nGmGuTorEtCjAl5ZVM0k5jKRL3G9jel3sCseqm9+UNgIS+hwcIqcKVyJiOEuM5QU7cMYWv9V4dnUDLDAAFBFAExPZkreB/bE6j7sbMNjbBJR6lc0ixmpMvkHZNLJdWXyQHJsLWTArNvXx2+hmDpSvDI5+5qaCRcaZ4Nc65GLKlzcY/CiezaHi2ztoc/fu42yPmPbo1PsrhBzj//CDpFcWg0yOHZj0/wgd9Ya+IHyGgvcurrYgg3jZNLp69n6fPJsIyRKL9wdr8aTkIOacb/nFOMYe14hVMp8up8hU1/ki2RGP0Y2jTt5Rmv60JkHvY0JWZGQ4TPS1BhZhusRgmlKfQI60EQan0Bzf4ANvPhRQPO54/fqwbpguFzUWH4Q2P+CJnxJcqNALKZ0D24hloAu0czAvkY8kowP2W2Fyu6vFKRDSHpJVM8AfMJmprOCUnZbjVbb5rtfdLaedfee7dz8L9brXetJSJVPH59NhALJfstjSBWg1oAufZBs/UWkGu/2229295bCTnU1Hu3bNqj6dDsidG4Ji7tuPG9A3HIMibRDPBmxS2rbsrL7rKnS4BenMvZOM+15kjA+EFxa0ZYmpoHYvtTgSDxpMagdX+YclX85EueVsiRcaUne9vt1WnCHiYiW+zC+rG4smM7ROEVZlBDcWZJfXD+Avjt7+3tvHH0zxL2MHMLKOKejcaauR1cmg4rVl4ELxj/09ttwUqrCY2xHiPXVQNju7X7dknoFZOcpr1Si5i126MZ/yNnBKdyeU1wtnken3+qgrMCZJnSLIunhdfIVTVC3wPww2REbRR9g/CwYwFeRrmoeAFmX2r0G2O7+U6mfugiAqlC5729D+/fHxy+OTp+/6F18LZ1cNTePjzsLCtVfJvr2mXmSVH+cGBIGlC76LUdSJNfGeaKjpkhkQqdKagKDESeQSTWT4Kc0mxIDuV0ooUtzz+NSJcxf0U85HqU9yGHaShSmg23hmKrn4r+1lC0o/bulpLxVgwDbBnCwL+iofjH6c7Om+bpzl71NhDrlTSXl+jWyfDX2NPKG9QOjFn8sNtBNExFn6Zek8zYEh7NGXT/Cnt5beayQ+NbsJdnxZZzW2EVtUcM5u7Vj4Wi3CCnP3ZpRj4YU5irWAQGdcMYUBGYz3XxwjdjK5dosCJSf7Wx/NiGLi3rGnH8BizjGZSXxeo/w8q11S3qVbiC3plmUqsBVXhy4YAaF+Qf1Va9ecPovryIibATlpv82CQSqIEImSWkz0b0jgsZZhx4n/5YJCxlVn8hn06uLo9J5+rqfx3+8/qhcxyRX0cQJyw0XJBwKGwxFTncZOWKhWPSyusFiDyJKmkK3dbe3vazqVvjObYxm37/PVEYLi4rNO4kH3iWPJvIk5Rqsx/qusUCSvtJvkdye+Bniof9EqYudX7tVjJ9On/mLhEI/iSdI1tP7PAC/4C2yTa1iMbnXfzzDMNm8cP5YMDjEqw7+3v4U5dS+4bNd6zwxObvZMP9RhZPU/bMUXfJCGAOWaob8TwGCenieGVlBikxG/LKkwxSQqC6MZ3ZR7Wm8W005loyaAbkBtgC8bj17NWpNeFvZK+FjWbwzM3qFyAk5Iqb1S1AOOS8tYAufvOK/OFG+YQlkvDDlRBpZbXMl4uuhKax5vHX0QJgqnkrEJH/rpIpoE1IMRylUQplIa/YQ/Q4k+Irauuq02q1trfI6yrF4Jd5hKnzAA+Tnx2jLkykkCaVfb06kao0Kueaz5DpK0vZXKbfErHC4auEW3SUMl1ZPAIf69fZmm62lXenG+h55HRvqa2rdmvvYA73wfePUGi9e3TNmUlPyOAnFfpnr8gjx0ptK3IoxmOaJeBy7yIW2RBrvk0kc5e+1dX6i0TFwvT8gk5dGz0Xf/cRwqq8/7WkBoQ/o+gIZ11VEodjrUbeVqv9mBCJWq2F70cfIe43LXAelynPXKqntdWal+pC3DPZHbF0cU12/lr9NeJmYVKH5H3MSKuZ1M97/+nl8IuRojNGcw0b7xSvh6B98DuCmvZMyzrD/gRKSmG0ADVvGcMsw5qYkHembE0FRQYizhURmMnrxidk4sqmc61YOoDTiUN5MPBzp1NC7wRPFOFZM2ETSG+j6VRxVYRWIwgP0V7rwI4aXgpBg2B0vNp2Sb7vcriHHSViPhnV5hXuYh6idUi78hA4JbJdkkv/NZaRCklaEZCn3d7x4dHH495lt9P79eTqY69z3O21t9/2Dv9/9t5tuY0c+Ru876dAqOMLWRMUdbDk0z9mJ2RJntGO3fZa8szsTkxIYBVIYlQslAGUZPbVvsa+3j7JF0gcCnUiqyhRpt1QdHSYZBWQmUgkEkDmL9+eXusr2q4TMkooSeWwHsf96Jmy5x92LfyikDiNd3HC0vJVHoOExCI4QdNWi7HJRQ5KMssl/GMXcjOFxmlFN3WWrqMpAKwIuG4oAhhco5DqoZMldd45lpARUcfyvbgYDjvfuLRRsiYRnwDuIRuXZO11blCwZviWoDyrXqQ6YQCJi8ZipTEo8GLsKGBpwkiKkBGNIgKRdH54nbYfQFc9yP/fW3pQtgbI/qv7CaShc83lcU5LlqnACtdnVIq3D2fHKKYToq/Izs4/u/Ero78juOZfPmUqATw6E0ijjnv44H42mAvqaQrg0VChLmanVG+lQPp+9/LF6ct3h6fHx2/fnb08e3X+6u2rd0dv3719t3/6+rxzlSl/TNZYGGj5oEDpoB99VF6fP3/9/Oz184Pnr169enV2+OrV4YsXp4dnrw+ODw+Ozg7ODk5Pz98edo7nqYzO+kojdRqfw+MXzSPkZOiVNXj4CBWt6pF6nHnz4tXLdy9evDjZPz46f3fw8mT/1fnhu8ODF4fnJ2+PTt+e7p8dvjg+Pzh7+erl8dvzl0dv3z0/fXlweHry+vDs5F3n0GHDow5uX9OgNcTteJllFoIcKLCfwIVrXIhKqH/eKNUOOYpUl8+MSXR6AikxF+mYY43fk3OCrgieDdDZ6Z9dFubZ6Z975AiYzv+Ln69r+dZGQIPaFGD1ul8B4N2x8qWnOhF5jjLClaopFbu8fL9X+NcITXEaiym+rQMSxUfkeHTwKn4xOj6OXh4cvjx89fr54eFB9PrFCB92L2RoxPEY2QNnWJI9iLD3fGRAFdOddEkm8GdmQ9z99uH+4cHuvvrvCuLt3+zv9ysh5PH74GyCvgxXkwuWMXvw+uX+YzALoER8nXF+J8rxjnCSKGOZosvfLoxNlSRJdFq1JsJkXkyZkGBVJNPfeGultQ8QliwlmenDTn1xqDZTSLIh+qdGqyvFMN9hmuCRMgkugNm1OyFK8hnV+92bmCgDp8uSGiDE5iSk3siHVubaVn5P+1yzyIUldmJZapFnc/0bmOIzFuUzB4L+SJZY5JmuOXet99Lriixx2yrTTbPvUNrE62+mJElY04alZQd/ePzi+q+nH9QO/vmrI7WfKR48Pz1b9Kgbl62V9j8h3/z75Zv7Q/BHTzZvlMUPlmnewMMmhM3/YGnmDVLcmLj5lXLMGxj63jHza08wX8LzBsTQP0l2eYMYftKge5/Tny6vvMrcz5NU7nP2s2WUt/D2x00nbxHIHyuXvEUIP0IiuU96yCJ/wizykuBDCvnTpZCXBP+T54838/pjJY838bAJW+AfJ3O8SYIbs/1dKW28iaPvvf991JzxZQxuwGa3b8J4E0t/gI3rD5kqvs79TEsAY7HDsaVRJ/SOpOaaZKAvNHGWJTTCo6R+Ey1IlB0ev+Cddy5ESDxKwLB34HTEWEJw2sTQW/0TGie4xJaBFb96f4lSMmGS6vuqeyy80pHK8XQuleQ4FVB03MTDpoik4A+pz3makqTzdEvJN3ltQ2OfdChdPO6IwFdAN4mH6JPBa9d7LETL5SEuTn47KUr+PvMr01CcYghPxkJ5qTOSSrEnE7HrioEpHnZ1u60/DL9N5Sz5FSdZumtp3KWx2KmESJlKH8WmIWH3hEPpisaCTHsHw85Kx4nIZ2tVOCoqQdSgcKZfKDfiuFXq9U07OFUt7axm+j59MyN+DW19I37rLH2viN82StYk4nVG/PpjsdIYbGbEr6Hzp4n4tcP0I0f8+mPyc0T8fs9ReeyI38ro/CQRvx1HqGj1B4z4NTyuNeL3sldsby2mt1gjNK21rdyTxPaazv+Ln68tiKw5uFd3/GjBvc9fHx0dHeDRi+OXx0fk8HD/5eiAHIyOjl+Onr84OuiO2qTl8VhXuELiWVaLdTWBnZsQ3Ovx+yi3un0YfvLgXsPsegNNLzuHlFYMcoMBqAUdrc0AhDjI7xcH6Q/BHz0OslEWP1gcZAMPm3AJ9IPFQTZIcWMuglaKg2xg6HvfA609DnIJzxtwNfQkcZANYvhJr5N8Tn+6OMgqcz9PHKTP2c8WB9nC2x83DrJFIH+sOMgWIfwIcZA+6SEO8gnjIEuCD3GQTxcHWRL8Tx4H2czrjxUH2cTDJmyBf5w4yCYJbsz2d6U4yCaOvvf+91HjIJcxuAGb3b5xkE0s/QE2rj9kHGT5mv6xqf1Nu2Yow9xdbdjr5gxzYeK14HvG6YQq5dPRaQ0XOcPDzofjdizWHB74m5J+Qn8nsQ6hgytsFx0Ii4jP5jIWLcBoK4NO7TKcWjTkJp7qHLXwU+Jm27jstHAdbdWPDKfgR5tAIxExjeyvzITkOCLDXwzlJ/phTsyFFdzvs0xtzyFUTzeCdSQohvi9ARJ5NIVQACgyQYTUsaEQVmDaVTONRgRmLkYxlnikhP01J3w+1HpRaP94/Bq/ev3qYPQyiuJj/EsHkWounlCmVbHBZ427KjR8cpYQRO5Ahgm9Jb7ITKDaiKgtJZJsQpSo9NbJXumZlrHaVnMn2ClO40RvwVwnNJWE75qAShJbWYuqXI9G49eH4+fHL1+Onh/F+AV+HpHXh6/jfbJPjl4+f1EWp6X1iYVqu+2sr/47VGMoTelkqoQFJKv37hm/RTOCRc7NjhKU2CmlUWAncl+N7SJREeb+/nj/xUuM90f49f7h6KUnvJxrg2WAhr98fg8f24GGv3x+byGEYb2LlZMKaD9688dUl2Y9xFyqDfmXz++Fvp40T1riFf8jTvAtTScoZvepUg+GRDQlMzJAGsRpgDIsp+Z9hmw4bRfsYN3Amgzy9hm0btUh50lhXLbKOFNbTgUQukiRYDMCEdDKCil5zvBcQ2CbOPWLT4rbPSVCJdeYchLJZD5w5wu4zJreNw9V23Boodoe6Dhwd4mM7uG4YsJUH+qnG4ORpSXnU6gZUoSZu2hFZ0Il4ThBF5/uXrg2SRolzBwg3vz7Bsbo5j836NnF+dU79PndqWv08OXzwx1Nk/9gcRZiz1Mg+nek5JNJmAFmXllyXYua7O3qwtaA8OWSFGwc+7o0AqD9FVmF4HQQrbKutvMGN8RMYcca6BLE8MY2vC4hONazRHpDdVVvnQoEYQSCSESVFTKh1AOllymTyszzOeCsT2EZLL9fadx2mxFOWYxmuZDQyEhZckUficsrQZGToB8eEbSVpRMPBku9vjVU33l9/cakiUK+1yBwhi/waxSdxSplKRXomd22SsyHk993BsC5axPEhpWPnvoBgU6xnm1Nft8aaHp0C1s7dX3KzOmUVaIxx5NZt0PolXToE+PSeN3GrCC4otKT4Ncbz8hIlm1Vxuvm1xt95yRLjrAl2rDneMmTR3RXv1vhl4uxLqih1hkE9Sdnym7iFBbFOcsBy72winNPG4RkfmAXTdFNzpOhau8G8qQg3BSsqp7ZVMDhZaoDnEisN37gf1pTBY6Ua1KwnEfNyS42JaewV2+Ojp7vCYJ5NP3L1z+b7/XnXyXLSqNnzcdPMILbX9IZi5V7FRdWEVRfIEFIWpKsk2iD9aApSnXpRDRjKZVMbX60UWIjcI5it+KOiLJ+RnFgrDnBwlcFDIllKGETMXBrIlQ6kCRF/1X2zW0+TIAxOCulSelrzowYpXSvuWaxULb6HgtH6KDkTKVM1o3TSkqkWmv5uaRfGRbC05pHz0EyzRd1I2ARHFZokNO19S+nlb4922oEtFUhh/Get4/60OSN2YY30sEKO12j4+iofjtxdPS8RBTsS9fp0kAHRon1ryOiPRv9i8nxa+LBzQMl04qy1dauv8Dapf0e/7jG72WorD0uO6cpU+/CDOWF7dGhFx7tQ+PZcn2fB/2NcumeGnidaWa15+RahPwDnCIyy2RBD5Cun7wxb0c4VdbF3TFTyG9IJcWSoBGR94SU0zXlPdMbgsoCrTM4CSfx9Xr3MlfeTrToFEyw3YUpfrOMFAWp85H+yRvGmifotaUfhk3i1pgxP0ppSw3Ilv9F1YJqj9LINSaS8BlNSaxW3ogKkpjkEAyJguYIo7jdFvl4TL+5FuEZyIl9s7enH9FPDBmf7AzRFZ8bdGKcZZx9ozMd70GF2ucIOsuSOZKwa607m2ooEzwiiUD3NEnAFYP16J4kCXB/9f5MFIYmYsP8dqtu2qtBXO48DjbH69KDS2i93SzCglN13HVUwc2bRtdT09uydJU5swq1TiV3nYAtN462dgPm6GuOE+2EmGdg+2E2OoUdwEliudMn/eRbRDK9lE+Z2iGp1/I0Nl57bRYP4RgA2wMSb89SpQDOIE3uu7ZO8HukTzjdmZG0NeWg5winKSs8s9KMGXgSKHb3VYZGJNHJLvUJ3DzbyxbBl60+CsFCDmdz04JWeT3nsZBbw+rRg2mltO8DXoW5I3I2yeqlyEeHQ5GPDkpmZVCangV52rqbTYCNty/a2NIHLWphkBzTpNgAN0xTLDpfmUqWXQMbT2DMyXhMIshXUB6fVhTD/TNy9f5sZ6BPWm5Tdp8qERZyL3YuYBQH9qQSzJs/tb1J0nAIUO23OLjxKrBFbAZ68GPbfLD3bea+GIluhh++L+lNLghfYzjCF9N8gyPuUwCv2mNi+7n9nBi0EK4DzGmx9RwRTbVTrAwEHrFcG054VO/hoIwducNuE21OLGH357TEVLxT+jHFdwROeQiEhzDuHRelklMijNsInYBZYRx2jCm8RmNrKeyRNk4RhgR+s6vUK4BnKGdm4DqVsJvidELEcL2z3q+GrU+MGZ8XogWXd0YgNI6N23w2nKL3ZyeflAhPtNKeuab86d4dQt3wDolJa1TgcuZTdxwlQ55aPB855GedZUkVx9uiWPIHykdwtTJqO8aTZES4ROc0FZLQtK9wQLu/m/ZC799bfbUI1lYUuH7J6HCbgHtTplPMhSSzvSzBUpnQ3lquuVjjUuKPou6sL4leYv+j69gXV07WQDxA/RmuC5aWlqUx3Pxru5kinLJ0PqO/e+fEWvzu4xdBxnmiJuGNemlI4xulg/qDYvDGuZkRS8d6nHFSXgrTuMFzzwWJ+6trVVGjIgvkMZXU3lGIIjm4M4l1LVyVyHUa2ssp42Y/xzhK2MS78BUNKdEYLG1fWXCWrC2V2eEQ6dAM1RPC2uWStJitxreqsLP9761bOsIpvsbxjKZbA7TFCWzu0sm1arAHus9P5/24a2Xr4P8hHbyC+w118QoCg5O3UDx/YDevKoQf1dGr8rGRrl5BZHD2HuLsFXLcYHevIDI4fL40/hAu3/fwCPzYps1e7LuHxzyCJ2Dp/FkX+TJ/G7l+l0l8+qXZ9h9W3dZV14roey2oDm98U9fK7jbrAQupi375I6yREvMJkX/IowPD+oaeGxjqNt+P+A6HBkY2P6sz0VcCG+lu9GViI88KDIXBZXnIQYER4gafEhgKN9btecIjAiOKn9j38YOKrvHE5sp4oUWo+LZDgJFuw4YZpZAnDzi6M6JjyDEacXbvZSa7OXo1JXOTzSGm7B6p9SRF92Rk020h90M1RdNJEZBuEu1zR6oNBu8eExQT1fxTGV3TW3Us6acpS8mSncdaCCpEVwdfwmPMaYmojc90qphETz+uS/pR5fUD+50mCd47Hu6jZ3o0/gedfvpiRgZ9vEQHh9cHOrjxA47UF//aQSdZlpB/ktHfqdx7sX88PBgeHDvynv39b1cf3g/0O38l0S3bsVAeeweHw330gY1oQvYOjs8Pjl4Zce+92D8yhZec0MVwjGc0WVdqycdLpNtHz2xMJCfxFMsBismI4nSAxpyQkYgH6J6mMbsXOzUB6idrdP8ceY0fNZRFOjEOnnXoUz8x2JXU4ACJpd3Ymp5p1fnA/ovvSFVat4SnZF0bsBoPujdHtkbiwPdtM+RoeDTc3z04ONwF4E0aVanf6K3Zg8faJvx7I902uP+qSsZuB55qZG1/Zj5HJJVMDFA+ylOZL5rDmN/T2hxWhK3N5Rc6VPzG9GMwEMDzx5JMGKe/6ydYlUmaSuYGV5los6CNOMMxAPERHiknHmwbJcLbD3x0jwuCxixJ2L1q2VTwK3KSIW/smUP52XmDEprm3wZohiOQaEq/FakNRq51AIePl2jO8u1trtZ/DFkMEDBvknRMSm1ChRyYhHsvK0In+bsmM5blaj8UD9GnhGBBUEIkygXkD6DRXAkqVT3gVANv6q7OTy8HSqoZZxkTBFEvmw7HMVRnrEfAA5td/WUmhusFlqrpeVfTdbA/PKguqusl1UPsWuJkKUfAc8XvErOIGif8H+9PfuvifqvnrOONeZHxaLaDc/Rq/3B48BVJPHkmdnSqVYajWyIdZJDQmRJYIJpOAFQE6ljof0L7WAgWUYOLp5pIbYo07MNho664dhMTO7Be05leHG0NRzdTftOZ4kPFfRMXnESMx6o5mk4Sw63EE0jKAuuQAzADFI60gzfVQAOK0K+7NN39ikga4UzkmkoxMMcITZShUva3nGc08rLDTG4CgK1gl+YuSCoYR8/IcDJE/w8htwP0T8qJmGJ+uwM53PSOJHPkNmlwaMTxGLCMK5KgaUp466jqJpB+yDBXDLBAz2zWhWnV/Fbmf6eFycXsaf5Mu325XMCetna/WHOezJ39pamzUIr3tEFXlKLrOkLEikPiyQRsgWny48gW+vKU22rv0Ndyswo06J993DTpdNs/JgLUFDcrDJKXPVyKqYg4gcOs6gwzbQIFXntt4zKmnNzjJBEDxEH5xUCfgeAYjXCC04hw0WMXvLaDU2Do4kxvKpRKFEjQTvp1e911zVnjJvljZnAxgQM4ZOrDA8uloPESjHFn9fMkJRyPqMNstea/9kP7OqCWgVJDHfK9cEPXqJb8ZYs2F8dQnZKttAO3VkAEKNrExtYhUPacR1Mqia54BYzImlwwBP+IItv1ChxBA0ViveddN7+fjf0bjDPY6aq+Lr9cnu+of+hSBAk86BotXrC4hYyjd2be7pTyNIu60F9znMzFJMc8Hup/A57213sympIk2xuza0DUSfaUv5eQeEJU03slBq+t70zEcCpn//6/oCFHWFkYxbP/2WlES7HoUTYTr+4mbv97y/LV4741StRiYVOo16QlUF6h1JGDJC1JQUSMF55laXCK8xwf5AXKbUD17uhOiL06rOw/LjtjYHsUb+wGuiZV74tmkcLkM2uWcEs4TmA19HtrertlekR3ZDijkhNdN13ZsL0x/gpqnvwa3ZFrSDy99ogT1xEnasP071MAZ3fd+raVEr0Wn3/LmFCW4/Qf5z6H/6mN70WqdkcfL5Gu7IIOhweHwxcDH9akLA6zy/v86bRHqWwCdQ7WPUGsFfXujsDz0VecVCwYmvrkaBqihtlx3lUEa/NMFOeWY2Manl2c7dgke1O8ogRO0bRYIp3rPEQXfnoyysvXcaYD06i9O67Ltbp6dFX9+ymW11RcqylA4x2j61UdL7b8VV2/OPtPwxjt6mpB+/v7nSvGAHomWR/W9wniRMOOtRuYkv9srI2GLZ1RSSd6++NkYQfDaX9cGZeqYJpHJJrQ3RFN1bdw8htN6F/UP/7s5Pji4KCHGJXiXa9V+c0uknEkIpw2q2pj/aiD/YNXwz5KodpPCR/ekTRm60JJvzKgKW0LPJCANAk1tq5IikdJ95JAEeNkOCqKySxiZpwwLBtd2EvVjEZO4DidmFvS/eG+8rgP9of7Bv9E/RONiL1pmDEhkSB3hPvYe2+ViylMi0ztPpXHJgQRYgbXsmC1s4RRaYUyI5LTSKBnWEoc3aI7CMQpTjQ17N03KucDlHF6RxMyIQZB2ERfSMI1jPLOANFZhiNZtOrHUqg2XLvqtQmHZlVTJioKaDLlUwG8ucUJaHC/rKsOqrsbsyhXLO/UPNXj4XG/ISbpHeUsVa11uvV8orE+98laNug4nSMH6ghaYkZogFYZIbi7p5yo9sUGDJEks4zxTRqdK0PRsoGBa8IZlrkWtBJpTD1AqUFpvbZjFT3evOgo4fWelcNG/jdbhaR04lFsnZ/99o+znWKxB/QtCWWgnYxgGEA/cXpL0wkcUW+9Z/dbA7T1gcQ0n21pbd76G51Mt2AI1DYN3R2qQXXm07UImiCqB5AQ51f0JaGroq3nw32D4jSHM8SYjGlaBrZVLRQPl8bI0yJ4ggrE7lMSa+8Fp3iiz57eXXy+vBp+5BNdeAY9gy+U8URfLnd1pfyUpbsZZ2PqbbW8ki8DdD9lyhhQYfGqJUNTkmRg9+FEXZAIlFN5tmAnlPeVsdS7V5UEzwTCEWdCO873jCdxi4qmd/EwpUIOJ+wOzix2jSkCda0bA3050k1VzZCs0btwo97oYQD+kZIeGAq7CGKonwZFyxMns4xTxqk0A4E4mWAOcQSeCVhNgjUnXnUTua6XnEN+O95/7R8/QrWZ00oZ9YU3UVQoLyDRi4O+g9E7ETWx7IGkmizfKrXuRamepX9SSXUljGSOEjaZmEoM6Or9JVLGVN/kxHRCYSW0Ve6K0nVOIiTKpfLx0IimmFPlx1zufbj4cF7uLTVR6iMWwzOwgOJkLgBuGMDQLZUMTvRv3Zz9p0VM9wuH6fBVoatCqLcHgIHt7nkh4u9G/QAVhW6G0IxpcYrFlAirb2fnn3dJqlaNcul6ZWZcZLmB9ldv3kDJFACgL12vjEhxjezu/fS9lSZEvTwUU3x4/OJmx7F3fmcGFcsiXNYvQls7XrZ3R8XFmhiUSbGi0HWNtDx8vEZzAK1G2xxloRuZiKFXg+nGlGgwLcLPUUJJKo1Au9+C4AQmqlpWINNgXXGfrmCVKSrn9WtwH59dnvy2M9SReqofge4wnyvLH1WmI7gHto6mdhS8MYGjnREUwlTTEKIx9cgVBSmUlp/9dol8jhF6ppq6p0kcYR4L45aXEjhIvWzm9p889OvOXoar3v8dyjS6Ko2rFThvqGPfv3694/97lG4UVda61240dG9CucZ+o6erNbpqjMqFGqCPX/5cqdkO9RkXjLSbK6uO+MaUafyglEJZhX9Qct+Tie9dmXG1iXuRRg/gcwMKNPZju6LZPVn/SQs5pkxeQ0mXDuysXJc/ZVCFgPAutfkP93f3X0Jt/udvDo7fPH/drza/YkjfR62TIzhj6MLNwevd/VfAzcGbo/03h8f9uPFqra+7cPaJqy5vQ370lb6sFaSvctmjNLXHD5T1X+NOFdrXvJhAFZIk6oHI/ORVm/fqgXs7MNSx6L7ai2bHh52vAjwhkG8ZS7sVcGoron9umigqPBAOUNvlQdPhDN0YenF8/Pyl24bG5FvlHpxF1zpCrHo/3p1xQX/vMvhtTMNRBP3dXXR4YykyHKmNGBpRWffOD/ePXnU/TuEUJ+uta2tSFnVX9m4Ulhynts2rGxyNgAESkqSRf249NjfWAFkOI55NcapL0g4QlV50t96tSnOiwGCTlCjHAq43skwHfbumiwp5NcEeH797+/b16cuz87fv9l+/2n99dnB4enrSvWi9PbZYu6G7KCcwlyqcWyJ8i/BPAsGPsxmBKx8fdF0vyfaYBf2Vofc4naBTKPCPEjrimM+H6JIQd2M6oXKajyCiacISnE72JmxvlLDR3oQdDA+O9gSP9iJoYE/t3eF/wwn79f3z5y933z8/rtfgUW758YvdHma4qJT/Hbabwu032wqKP7wevOPve2wnV99Nhur/DxntjdlKrlbx/6es8v+TVPb/uav5/zAV/HdVz2/QiMAVNk6jKeP6425kIxvNvc1b/UyJhP8D2j61lYbMmqRed/cQ9goBbjyTxBR5hONnRWrjiTkkNU2ZkJ6h1nLCCXVFHDMsp/Zh78EGAtXfGck4ieB2YhduCIoX4ToGPtFyfhNObYJViT7F31DSGfnd5te3k6fj2ysPz+hEx1++QZLnpNy6lkipWQaTxXylP1w36U0L6258ILwGrvwnOYdB0Z018ddB9GqE/OcWsgWNrjqmC1tWwlXuPhFDmgrpHaIulREcS+h3kX0X0dhOiyhheVzMgFP10cYLcDQjEsdY4uZJ8cH8qoM+otKrEFhY7EdwHF/DA9e2SfVkRITQQWX+HClxDi8N6QxPPJTYAplkRnfxKIoPDp832o9CQS5UC+jizIUtanKtRIx6/IpO1EjBQyyJfUW1BCn6h5oqy+uSoW58eOFwe31YAouQxsXdOIbc87176qC9lb66qrHX2wxHU5qSay9LenFn5gU/rbprX34U1nUHg7b4ra69ZpyBFes4cObx/uPGyaTw+hb3UXq0sX1rFmIW3YKuGrtwZj83TC/9G/gdan1MEgJlpcEo6N/UDBdTxuW1tsyFP2GXY93frrMJLcumIws13EyXXykZEb06AIKV+7FJWJ7Aml9pFFpLV8ri9O8NLJ03oXr2WnmzW6erd2cKhaJf0dXHs49v0N/YvXIvZjjTKAF/qdFSWujR4sUetdtz5Gy6JmFoNVetv4Xe/k1/amjkIh0zX1vNsgDlL62t8RRUfd+onmbdOD+99DOObY1GMSSRGM5nydA8p1PmMNdnrSlLd4s3Kyi3zBVmbNf09qEp4brZJkaMJQSnHcU7LiQCiTnFsNf7ZWI4ymlS77I+om713jp4dXaw/3qrGzkfLxH04MfLNBMSsZg0zoNFtAjJiYym3YmxvWiAynTuNPA2HxGeEgkhAkYP/+5/19Bu8bvzucoOVNEo8rVwsVUtXlpqWUtEL9a5qsQzFjebnV6T2ZNAxvSxUn1wVVd5gw1ftadPLEZfLs6aO6JZrZ/SV927uPhU7wE25RmOHk9sRYv1zlhcW1Qe2JmFamrprLINeniHtsGmbHLV4/////5/wmAz1Ukya8SfHrwaeT9fz3CW0XRint36U0fT4fFkVs8ZzuokA2SmPmXbOLo92pqJFySBlJjNI91R1kw4J1lCIyzKWJ7owdpbtNsyaWKSJWw+qxwKPLzjot2WjuG4cJwnj86y13BL10u81lU7ds2au4mYjiG3Uup6ubZIeIGGyfNU0hnZeSJvvi8XumvjXJh1vPAsPrkvGto1PxY+hTvQaPIBirZRLweAfOsqGdPDsIgqX7DtqHbD7lPCax35BNaka/mHV8uOY/FGlS3UlEVd7rSummjRmXYjbZ2Ah8vUVBTpwfQ0YR1U+yxBDpR7bfypEUSwEEArmGCjjDoiFdu/KmKxnTQ4m+0Szpl3oHHy6QM6N1+VQ2PUlwWGiZ41jKsXFk8bxusHygvOZT/Zt0qAl+yO8HtOJWk69Ck6giP3/j2VAJEXdgUHzhLPst7GIBe1KeoF5CDIjs1T+QYddDOJV5aSEqqz2mDPaMSZIBFLY4EE3HR9Sek3RDIWTX3VWMjpjAhRmPaaYtciiCyULwKFsq93EmrOk1ZxtkjgxOQSS4K+8MQkU4poStS+E3bmOI3BuLp7yXmKZzQqT67qcUGRp6ha+qUyXjVr0j48SiIZZ5JFLKmU3x8gyIzZ0kEsW96ANAqpUvnqgTSpJgYoY0LQUTLXty27WAgyGyUkRl8+v2+i1gXceCG/b46Onu8Jgnk0/cvXP5ssDf35V8myHmyVykysypjbBjUL2yO8j8TVdqpKWHnWLlEBAERooujo6HkfOrCcPlQbsZy2CMcMYx/BfM0Jnz+QImjDYGy2UFZRrF6ThePJrFgNViESnbhWTHznHJBQIWrFZvdhQJXP8IQgS7Vk2VJK3WmmlO1rSS+bVc6qXnkeSZkV0NOlQUEJwRqdmumgyNKy2WFITDtVCiueau0It3S7M2Vlb67dz1swrh6rusm+nC5g1h9bgmPCRQO9tb0+QhowpNH5W8pJhFOW0ggntkvLz4ylVDJOYvS3q6tPRZWEnowACA8vbTIWSX6Jf11i57NpugCH6k9pQWYll7yrgvl7/lxce3GHXisVb23hqEBINTSmQRnMcBjWNJUl3toHoYgNSSGwsE5Y/fJiIW1fvCj+3xRxupCBj3kKoKMGWMCSixI6JiiaRwlA92kvb4oFYlGUc07invw0TI62udE+NZaNQfeJ0XVMyienv7QS7p/OZZjjWWlLsPBor/JzdXgrP4sIJyS+9qFI1J/6mqaT6zGOJONqFsJfdWHxDhSq7C6U8AkaJ1gic+5oS87sGnAKe4CrvXJY4AeWD7VhNaH5Ou+hEoFk6rP0WxNbqLw0tV78a8T2HdrDD9suZjODCeC7orbiDJlRKTXSS9OS0jplWhf4VUisIIk/Fm0ertHD6LND5jXYi5C0bLubz8IWnWz1PEtauDYje5g1Izil6WScJ42aATdDlXeXcprgdJI3Hb62cduwsNeYXdGd8isAZZxNOJ5BYSVLo65r1tftqCv8ygRW1P6hNDqPQ5+Qb9oQGLI2TeorkOXt5mbknvHbTRO1I2zThN2bMHfEyXEqNAbfahvTKo8PjUsASNmCKHRxtmhFqPYu4ODngSE/V0X3BrAGc4K2Tdvb6J4miUVrhVB3P/a9uFxpaSdl0pKpW1JfTPEdQSLDqTClJCT5tnAprDLuBYSuKPi/64checKe+xsIHZ0V6q1h28KWW31GJkO0bTZw2wO0PcLRrVKeNP4vG20PEJHRTg8+Orpl7Xc2C25sGu9rFkjkr7pUlXoEshsdkq+vn1YwEdMwyK7MiF3utYT+en6F9pTXLPbe0Hh7p/PJvOS4r3+8rqkJc/PizNocoEztJItcf30rMSJq9yyQZJ25zDCv3uVsCpuatI5ecimzZTV21rBT8Zcwvf+vrhLfZTuyIikkm5IZ4Ti5foy15ty2pobcXUNDTag8hVpY9rq+69i7mIV+429zTypvL071IbxW4WfMdMZKA6IaGCpr26tkFpU74DGNks3GlSVNFz3TXRq0R1tjGS6JATzStej6EKaEazq3fmF1f/7o09hsxrwIkuVq1iMOdNEY/r36eguZTfGuflQfTBDRxT6sK4ZyibzQ5u7FG6M0y48sZSljdR3ssAN5ApZccHBHjnyi8l7hPN1psmHEHUhy8tEQj/0mWQtFruSSxY0s5VcuShUw6JJLh3rRClvDqkQxkZgmoquqVBz4Bw5LAz2qtQFSzujWPR3TrQHaikiSbK2iQiIfPS69ZyAsEjeAfqrOdg3xcOH6/upcEX969uGkM/FutDHntG/8XPNQ65qijK/JFrRtlDqJ81SzCX3qMf8Hi/GYpWSArnZ15WjYmg2baZ1F0VORqolxSCBwG9dMU/rUNBW62EYTfTo5XVx+LAlJD+uXy0qAwaJ0qZWsKYRXNV+p1GaXXz7t4Z55qRgb89Kwip3mPaRBRIzHJEbleNalpuBxcjOaopEekcjH3ft1JwyhCwmY5BbwV6hmSjsKnYwyLJjnCOv7DrPFKC6eq1GqS9l+QNLRxacKt1gaJkXBei9iWCmWU3v4RTXhS6j55j1RQjjvpD8fXdKklXbz1q1axXr4IHe7Ule83NBKizgEntnq4SuoXLfA30cxbxeegDPCldBdBBCp7vzRPZRlU7/AFk0x4XJmf2mWdGESJ776NEPDLJL7ApnbuH7MJ7mup1L6+YOuSDmmiQSgTV3AWxlUKMdLATC8JRtg+daoYXteiZlZZJQ0pJ4TcHHaZk9Nux66lGh6HKJqxGwLewi4ClVgMR420JdloegmOx9mVAoPP/Q4MgHYRFqSamvWWqvHsygOUwPEuxEAWMeiW9SY8NxjNV+JqL9V1viTTx8McPeajlFXE52jyvbUICqLWTXD/2X1ndBoLhcd+3Qi44Nq2cWVmvgfIzinjfWJ0u0KayWKXKFKuHEbEShrIRm6wdlsVxN00yCqTmeQKxHUmvfkK1aNnp5H3ytRVhyIdyCuyHd5oH1pdHib+Wql/cq4mv48NVVfaFpxQltmakuWQk9CzvTd6MNI8Ea4Z/cXtXFbkQQo2LEqFedQ7WMVAtxhDdRhWaRE7QlZq4zngkjmVi5PgUbTfvexzSqkeF+0D6urueJARnXncOvsvOraBsfCV8t5RiOcJHPERnAlYtCJsI2/3hboX7vvGL/HqiH1L2TwYP61+5ngZPfik4msVd+PcZIINMLRra5ONKF3JC1A58C1HjOOOJkxSSzpnUVUSsZpcOVaxfTJy8IxlXy6qZu+8tpsdbvU13LfR92KO8Ggbo3qpgXUUd1iIiRNfbBaizJW+6FypFCiZdt73B4j6AdGxN5BeGcIXq96IDMc3RK5Z4bRkd7Qqr5hzmFIiwpgFAbhv3mqD+t1ELhWFHNquL109pSHwgydI6Vtz/ywWbV9yWb2VMIXiitP5ddfRHg2opOc5SKBegC+K2UONpRfKdiMQM4w7FYQTtHFpwHCNlYKTsjylH5DgimhDxH6v1luMeVxco/n/lmBkMycOnB87+aiiTe6GZovbrQUyzOQpGWw+jgH4A2pTwFuhjS7UdTcDDVlNwMUk4yksd5emYCeAuwP5CoRLcayj8kx41cqjrnYDG1r2+OZHm+EfE5PcarYYylYJQgKg/I8n+6OFH8Xn+5eePXGOhNfMgbt5JcMhD5GeWMSIJr58m1FI0eLE6cF4dd+YNGiEIQvgvBSFFKLAbM+s27YTvVUMZNAvTcLEYLRiLN71arN1lLvzhEbS1KsdGLK7lGeKS29JyN3mpOwCYAGK/1ygCMZ5oKYUtC6dy27Yau9aN7PtKzBNkV9Vf/1S2oIrORHFtIqxq0AUPvAfqdJgveOh/voGf00ZSn5H3T66QvS/0YfL9HB4fUBFKVEH3CkvvjXDjrJsoT8k4z+TuXei/3j4cHw4Bg9+/vfrj68H+hn/0qiW7ZjQ6j2Dg6H++aeau/g+Pzg6BW6xGPM6d6L/aPhwdYyVW8LoWwJn+wbOnnJxvIeKkvCwOIIjq2pmq1TnIz10qMkqa+xdFqtVbA9DSw3GzXM+CUzt8P2sW1T7GHQKzkuVxD/ZqVQ4mGVpOqRTz+qWnv/R10rUZtWKmWpuajES7xqvj9oo8n5KaUW2h1J76w9YmlEeGorYGsiCoofGgPZfuDhZKHnYTct9kfYklq1MuIhImy9Rlp4xvHw+6IFaRON10Ltl0KL7rQXXhd9rBBX3Bw9E3k0RRFJJRMDlI/yVOYDdE/TmN2LnfIdhxvWGPN7mj4wD2QRM56JN1Z76wHM6sAMWzkIykHXzmvboEAej4UP7L/4jjycD50LYSe1Wy9NjfoUkH2a2MIzmswflbGYjChO+3B0acgwagf3k/EUS+USq7YGaMwJGYnYV8EGZvrl9nTj5mB/eFBazPsPkB0MDBHN+N75WA0sGNzIx+TgaHg03N89ODjcnehkiofwoulbwlJxeKgh/REqw/o3b6k7QxHp0gDdo3XLEsYRhPAsXDIavfqO2Q79r5cXojeUZKA5NxygUpxnz6Sep6Oy0nEznW5wWqDu13UCYUj1ekWq1zLRbirlYpdgIQ928RLnwilGuUTGj6hsloU9A0W+4VpXI7eX+pmXHnO4Kjeq30ECFQqcNsvD4YzENJ91Fo+pDPEDa7PhYMOV2FLZS3crZV3WazFdXZ5mQzmJlm2/CgShie+xrZNm3dUS096R7DJmyqPMhCfUsrKXp6XjkDHMbkJ3CxV5iyqL9inhwvMq3naBcGy3khqTBmDbrNJ0jpqySKnNEm5h6Uz5hBHOZM6NT2gKiylHkRNBUvBncWpuDTSakjkawx5OkeG1F5xlv6gVgz/acZzKKbEgm15xT1GeZJx2QYsxdLWGz76zN00RThJzU3GPhQH6oDPM5ygjPCMSsjM0yQXKTY0yGFWaTq5vybwDeYtCxLAAzEUNrq40LiNcUiIqQRAaTGvMuOsbZTnPmHDoSI30PVIouGQIC8EiCtBfVE4LfC/oqny+AjjAwmR0exGyEA4Oz6Nnxb+NGHfQDM81pIGek+pFDf9UYhtYGqJTlop8RgCMfo6iKWNCX+lgPqKSY06TeVMs1LeIZA/N27poPjgzVwfJHMkpZ/dpXeMXpvpUIeX6hFVf2fmlD08EkRqgzc07NMVZRlISm3wotQ8dYeG/1ZbVUwL5LVPWAJfQMBMbqW3DAm6mgcV58iDh6BaKHFUXWFgio6X7nllrDUaykSYILWykwwyS+uH/xHe40FoUJViI+uGYF9KK07iMZYIW4Jl0HbELDfvHuFIrOSXc0ywIbMf5ZCrhKluHvZsLZ41syFHKmqxowurhwD2m4UkcU5NvVDrKtrV/lOW0x4v9piK4Mg9RN0HuCFd7cwcaGjEetwwZWHheNdW9+6wFMPKiaKWGS3jK+Y3d4CTz8hq2eKoDEOL1IqJWyjI50Q0TCcW9HA3oXM2z7VMIeUiZtBmkasX6X2K7mmniQLvVGj23rSAqkJA0SeCSm3JlZHEae5EU5GuOE7sKljgcoFEu9YqaJTgiU5YABiUn8DGuU3CRwjRDgsrc1JGptaooAlgHycx6S/SSLNkEpu/Qh/fHmY7zoCzdBQlFXo2MrZNPH+Ci2TyBPugntqrg/+o5ryVkWqrXLzSbk6ZyAFMqJKCulVzo6rfGr72WZKZ8duc+eix5WDstLHkwS60saTVST/vYPaY9PcYNPxQFEApnbgAt2bsC76UhZ0z2ktFPWzLhEWv0usOSUKhBrzABiXY9SLRmypPl1SarKvS4ObrK4Wdj3+bav+Lw1MPsK/EQ0HQDmm5A0w1ougFNN6DpemE2AU23p7ADmq7xJwKa7qqCD2i6G4Wm6/MSm3SxB7k65Tguv5Vq5Zh+wE+/uWR5PJlwMtE5O57qN9fEFHkzjkuVpTa2UNuWeClv6i9PqXxjNrgNPy9mvyaCk0bG3bANqpvphuNoQZLxdSfvYhG8CuPSWxQ8WtSEtTmH+pA8ZSimnEQSRVOaxBrnRl9TB4X63gpV31qov6VqRr75Uc3gfWWEUxZbQCDc2G6bHnTR2xEn+DZm95tpmxpwBlVPppyXQ0QquBCmzhepH1n49HLG6jvfXj7GRYHbbA5OdC4wY7Ks6zo7OdJpgw4uOcJJlCc61th/HxDOm8aJE5EnD9ytX+lyW3kiGyzMUFeq8quKKYLUl7sabrk6iUt+qPJ9HscB7egJ9TskXdk5eoO249EwY0JOOBFfkyFUlVSOkr1BGBKu/KTtCEdTYhymXt5BHdr0cXg+QeOcw32ryEe7Mb2j/o4DLpCfwVVxwd0AlephLnL8nmQFVFr1eEtfu7VqsVWdU40alzzYDgwb+q+udG3r3OIC4Q1r3IIVbuH6tmx1W7q2nZS5fuiS9pAFrQ5y0C7ilpjFJRf81WjIRT0s7KUmWh8CwW4/zF6yxQFq9YA4KYGL1CltPiCtkdTsyXh4P3a9jRtot0SgEYG7E0ivARhGY3m2p1Jmb/b2jNUZRuzNq31lTEtmSH3B8WhE5ezr3tec5ATiHbZ3QgWM5XSEChgLxj9UwCgPc6iAESpgPOD+LFTACBUwQgWMUAGjTHiogBEqYHSmKVTACBUwuhAaKmCEChjFE6ECRqiA4ZvEUAGjwSiFChihAsaCdLgfqwJGjZhQ5yLUuYC/UOdio+tchCITochEKDLhCykUmQhFJjZa3UKRiVBkIhSZqJqcUGTCZe2HIhOhyEQoMhGKTIQiE6HIRCgyEYpMhCITochEKDLRlZdQZOL7I6WHIhOhyMSTDWMoMhGKTHgBNqHIRCgyEYpMhCITT1NkogbHuuDerzgOyGXEfFk2nR63SXGRENtleKOvbkzPN0jQWZbMUUxSJokoQ4nflepdKP9Z5BFEbTFeTjAeY5rknHintYSLjEQQl2fxU1OpnBaIYs04i/NIDVpDGKvTdNPbL+1DYOCFhT0OsWjC+nxkCYiwechAMzgIYTg/Pf30BbCEZ2TG+BzlGhfagVUVEBmO46bjk84AwsI/z+m8u7jRr93Ygx2BEhbhxG6AKsijLQffUZYvtg1tqnQTZXmtayU3JVBRPz1CFeWXTOJkmDI+G2ZR/fS4BZbWXpxkhEdlnE2dil//XovBgN1OcD7pEYFlWsMT0GLFHKTai0zNDBMRbiMZNRIwhK9gIes33eauGzBrEbW3JgB9bloq4s5UT8oICFBCKhHH6YSUEQZA8/aV2TvY3/9ftaNGrbkrDq1+uTa6Zjb0GeDauFaCWO14juayxJ8ezUps38pDqSgx1A9rdOJI5g2E9ql6AC0UxoLEMG5jTkhTfYPimwLZkTSdOtYgJ9qk1SKvJRJbcqJiuFK0WdZoqrseogtIr7Z4O0qHsdAQBBLOnIfoY4re0zT/ptQ3YqmgQooCl9G1Wek0S3LVbDQ1uj/K1TItoLmPl//SF8uwEkFcvk8cYL9PCaIp1guPGXD16j/1YdzAvA/g/5WeJbMGdWheVI3f1CZWOXa/78wyb3tTy9oPe0o0gNlfQrQ3DNagpNtmnm/TV1DnrpZ9kUa3Wvcl9n2RhW+38Q9T9Me39I9t6+vWvirv2mTqMOof4J2iNoIaXgr4IEocTRkCGSdj+u0N2vo3jNt/tjrpgqC/b551gwBrWBfuKPfNtz/SU9yINMSFGNaJ3BSuPhMBgdDokkh0SX8nQ52DOINDToCkrzHKoijPqA7Qgugj88yzzycfduraFlX1qpO2aTw2peUJnVFZgMrp5gY+8txcFy+BQ3K1qsBS0s30lK1fO4RQeeP96UsxETQ9DkGuoLg8yxelpK0OTHBxVsgl57BDNfScfvrSjPEYjavmYgFsUolrpnZakiRz9A5Tji6jKYnzhHD07PTd5U5FEFUbtxhIS+NRDfvhaZWpe3dpWqkDVFVfXAjEtWRGFTR/zZnEDyUZGlkvxc6ywoJskL8aVaCB5BaINYsfJghJ7SpV07gKZZGGjVxAm5xyJmVC4kei0rW3dnrTFUnFaVEcBryHguQlpGqNSFUD/ZjwLB+OGr2t5dYPnehbMJpOflxDaLzO1UZO7xKd69fet5rbaTHMjz6MDRujTqNY9eceYfRmZDaEdxr8nQVybfd5Wr2eDla6gVnNUNtYiaQUL1ZlDE71No4xyxlQZ61FM2NedbWMszGF5FxzBqrcp0/uy3JgMXxdmugQnqCkePLpw/CPVuUs1BsL9cZWrzcWanWFWl2hVleo1RVqdYVaXf6NTqjV1U/YK9fqCgDVHdbNAFAdAKo3E6A6AOe2ReAuGowltLTQIwNwbgDODcC5PWj6AYFzA+J/QPwPiP89aVoB8T8AVC8duRUgLQNAdQCotsQEgOoAUB0AqgNAdWeqAkB1AKh+ZAEFgOoAUP1AygJAdQCoDgDVAaC6IqQAUB0Aqjda3QJAdQCoDgDVVZMTAKoDQHUAqA4A1QGgegWqAkB1n6umAFAdAKqrDPWICQ4A1QGgOgBUPyYHAaC6JOEAUP20VKYBoPpRgUwDQHUvcnupXwCoDgDVG6DEAaA6AFQHgGpQaQMF0izjhREFVfl2CpXoB39hojc8zB9DcXmDfJIkSICelYWhntSXFlN8p28ZIIrW/nRx1hD7EZuLthozlTi2ZkCffvyd2Uu9IqF7DAk4i7CE+nJrGarADjOEJxNOJpA4aB4RAzQvLmvGlAup9cEDu2yXXBeA0YXZEp0Bd9rBlBrkv2QMQKQOBdEBPRlO4/pY1Pi+x8lyoOBFjNeg5ZYw3ggP91DOFRcNrJfh6mq8G0V8EPvl7eISCazGpgdoZzTYzqCyYYGYYw1R2JBckSQsutawNT8Qx4ZgTT5cjxZQtQ68VmK1w29lujkZqw/LvQC1HqrLFvvZcjxwGNWDXszTNBfkZxhxnMYWLSyZmwi1Vn5/osHuyLdkHWBrm73NdeEctoLweM6QZJlS3uhWw2TYsImIpZJ8A9lQCXDsCeGiGfFubGJaepC52ni9s8EzEDXawkELjTQhPdMOV6PRBJ7Bgb7ttB+pCU2byXw0vfdpTOD8RU/7VjLrS7b69UdTd/wdFL23RuO16PKqStuBmj7q+gC9rFDiAVTqILoZzsoQlZfe16V+3Q/AqkAk5TSakliH5Nkbi18QuqUjnGKtpbqTa52G5V1XuM6HPhmNlXi831H/wNmq2KuHGytgTTRjAjZ1itaWvH01bUYIBPA9JzI0IkqbBJKsWQmbLhUfNiMMHe2pOaM8jRPQCJJhOa0NRK/csPfK7fDBCB3nAMtD1V5cZzXCxsNElkGokCxPhgzwMf15oL8pT4EMpwGiNUC0BojWANEaIFpXpy9AtHblNEC0BojWliEIEK0BonVliFbJcSqwvyt/ihvIZdnrBVFwy9XOS7V3faNQJ6HqVi0moeheFJVlt03b2/p+T4eiEoSTxIZWJaQCE9rYTsqkJVO3pL6A20K19xA6mhXOVPow3hFQYBFQm35YaZTb2XCSkDusnGZz2lNggZhUs2dkMkTbNtFygLZHOLpVypPG/2WjbYAx3OnBR0e3rDmnBLXnlaAVckv+qiNJ1SN04qcy+vppBROxzGSsCppOkmK51xL66/kV2lNes9h7Q+Ptnc5bD8lxX/94XVMT5maBCwOUqW21zuIrtl3Nhw2Ld62ALLORbBrQm25esruxC8jXAfm63/gH5OvyMH9P5OsA1BqAWgNQa0+aVgBqDQjzbQ78A4clIMwHhPmAMP/HQ5gPyNdtwq4RG5CvA/J1J2IC8nUHPlBAvg7I1wH5OiBfB+TrgHwdkK/NX0C+3mjk607oua2EnBmwzQC+HcC3A/h2AN9+PHUL4NubrG4BfDuAbwfw7arJCeDbAXw7gG8H8O0Avr0CVQF8u89tVwDfDuDbVYZ6ZHsE8O0Avh3Atx+TgwC+XZJwAN9+WirTAL79qCCtAXy7F7m91C+Abwfw7Q1Q4gC+HcC3A/j2G1THSFpw71ccB+QyYr4sm06P26S4SIjtMrzRVzem5xsk6CxL5igmKZMEIhTllHAv0pKTjBOhfFWEkcgjCBxjHGG/1TGmSc6Jd1pLuMhIBKGBFkoplcppgUDajLM4j9SgNUTSOk03vf3SbQiiKU3idgf9qbEHLs6eiR13s6xo04w+cztCN0Mz/EiYCR1jovphYq2cz/8GbcejYcaEnHAivibDrznh8+0B2pZkpiYdGRI+Up8jHE2JyfHvFRNZzx96HJ5P0DjnMBFEPtqN6R31T08hNOwZJBEV3A0QSbCQNBIE82i6CKtgNX1bOZLNi0PNcKovCmOIuyffEEkjpqxnbaPf8yy1n3xbERGAwjoUggeU0ARr3Zx291Air/ws51uawhSw97ewUACVJqusMJRFvgJwoxUFdH9g09F2GpFcef0OZT3I86uh1X4cjwWRNSBEb2y2RYHlZ+MB5/agDxj0NbELJH1rAYde6bPrlky19ENX7sS8lIu2AsTMRRrDTX155Qa9u8cCkW8kygElfp5GU85SiEmA5bv0TZPkR0tlvhRWuHEwEpreNg7HagmfWOIRFoBTe9uShcjZvbjG47GfZ1Huuj9EbgHQrlpHtnU782NLlpBYklk5WqY9puexZV0Hp23vYWEvNYn4ET/WITdwQ5XXGglEizOW0RKlqJFTHyD1dzXPSEMEhQepo0zzdgxOSGn53t4ZoksdEFNEpo28QixYwCwbKiqr2Uf67+z80+fz05Or87M3Gh8RBOFCyjmZsTsdEYXROIfkROVWYUHaRdWQ+/tIovKigC36+AKJTaXM3uztGZENI1YX4ABtczwaUTn7ur3zZALipBSX+X2EZIlAIwIbT7iZgDy1Vvm9ebW/UIR7X3OSk2vF5HaD8zAjQnQBdVzNikDfnW3I0vm+PiX2b+CNSDTxECrKMhp5KXj2ASpQlo8SKqZqiDjiJCL0rlRCBJURmR4qCjdqVbdkwXLUgf+TifGWHWfKAUkS2u6ARGyWMUHlujSnV6WUziuuohqC9GIDdafrBxhWtPdTHFu0jKPImzNWV1LpmoO5kFHU7mh2kIP6u8xdYqwr9+XOGirSWeSIooouqPcoS6+FVBZrMn88P+3KowxspelBnwcpX1Xjd3kQ6mVISQ9J/ar0Q6lj76dmXPUAq/4TwqrnPGnlsRWKXGljQiRBX3gyMJHo0ZTMyEAnm+M0RhmWjopO2a3OukBLvyybOe0yu9J4w5JFzKVFmVBai1+jnAjxZsuTUttBU3Nk0Eo0qSYGKGNC0FFiohh2sRBkNkpIjL58ft9ELdK07u2Zs91hxGZvjo6e72lP5y9f/1zyfH6VLOvBViV/ZjXGquAbFWF7hPeReCk2vHEqLVEBLwa8TNHR0fM+dJQqY6ymjVhOW4RjhrGPYOA87IEUQRvm5LKFsopi9ZosHE/KyPS9iUQnrhVzkginYRgltswIbG7uyQhlymGzVEuWLaXUJaxL2W7ge9msWkzaavNIyqwaBG8GBSUExw4eo6FiwLIhMe1UKay4bAvCVWdETlnZc2n3aRZ6NI5V3WRfTrvhfOt8NtFAb6lqiP4jKVYmuOnsZiknEU5ZSiOc2C7dLoqlVN8X/O3q6pNL5ujLCCdjwnkJL22R5BfeD1fY+WyaNjtyKlag1Ds9yFgqauv2UgXzDvFlLq6jMoRly05gGXSwbkxHxZrhMKxpKku8tQ+Cs2c0hT1unbD6ifNC2r6I4pzzN0WcztqQDFFzGq122e4ax5KLEjomKJpHCUE4RYRzxtEUC8QiqOoY9+SnYXK0zY32qbFsDLpPjK5j4o7BQymgUAoolAJyATShFFAoBRRKAW3IEIRSQKEU0MqlgIQ+4l1tU1rlr1No1KrxpNXwKHM23V6KZFijT6h9RtqAmd8dL1A7+6adCv5ZPdyoA42l0QiFmRpICIWZQmGmUJgpFGYKhZnWujkMhZlCYaZQmCkUZlrLyUgozBQKM4XCTF0KMzWSszmll+ojFoorheJKXWkKxZVCcaUuhIbiSqG4UvFEKK4Uiiv5JjEUV2owSqG4Uiiu1BrZGoorLRFQKK604P4vFFfqQtn3La70sKpGapjjx6hslG5GjadQYCkUWAoFlnwhhQJLocDSRqtbKLAUCiyFAktVkxMKLLmk+lBgKRRYCgWWQoGlUGApFFgKBZZCgaVQYCkUWAoFlrryEgosff8qIaHAUiiw9GTDGAoshQJLXpxPKLAUCiyFAkuhwFIosOTz/fMWWHp0JIHexT189SqO4Sv7lMaIwsVjBn9XTOLE0aSFSoXPdZeSHpyIPHkghNGVRk3LE+llYrv6NxpwzAeHGzMOX+7qPM3Yf7yJxhnmt3WE3xps2kKMsWYIsCWMnZRBvjzwLxhxuF4C4jpAl8Nzwz8N/9STkZXgzjaAX/ItI5w2Qo30mS5R0jxfWsTSTTQ1MSgNPs1nuakU9B7PWS7R5ZSOpdqscxo1p+qMWzIXH5+6d5QLiS7SLJfojCR4vpAuOWqGmnh8urQVepuw6FapyhWdkYWUKTsnsWiuqNNY4sXv8b1525m9Pb1R1z1WNaVJr1Ap07KO9o8WWeRGCUiQQBGpysaOSdGOwF+H80ddRgl1HqlGWoXG4U+qYlxA6Ax/+w6EzvC3OpV1IyMynF5Xh7G/kYk5y7IVSy0VKoBnoItsbNsz+DkO6HVxVbhHq4nSxOFqJVGqw7Qg6xQtyfKpye3HqojSJomGkih97EezEJYXRvnfAQAA//+lxZgN" } diff --git a/ingest/pipeline/definition.json b/ingest/pipeline/definition.json index 1ad74c901c6..c60c0294cbb 100644 --- a/ingest/pipeline/definition.json +++ b/ingest/pipeline/definition.json @@ -1,103 +1,194 @@ [ -{ - "id": "apm", - "body": { - "description" : "Default enrichment for APM events", - "processors" : [ - { - "pipeline": { - "name": "apm_user_agent" + { + "id": "apm", + "body": { + "description": "Default enrichment for APM events", + "processors": [ + { + "pipeline": { + "name": "apm_ingest_timestamp" + } + }, + { + "pipeline": { + "name": "apm_user_agent" + } + }, + { + "pipeline": { + "name": "apm_user_geo" + } + }, + { + "pipeline": { + "name": "apm_remove_span_metadata" + } + }, + { + "pipeline": { + "name": "apm_error_grouping_name", + "if": "ctx.processor?.event == 'error'" + } + }, + { + "pipeline": { + "name": "apm_metrics_dynamic_template", + "if": "ctx.processor?.event == 'metric'" + } } - }, - { - "pipeline": { - "name": "apm_user_geo" + ] + } + }, + { + "id": "apm_data_stream_migration", + "body": { + "description": "Migrate APM events to data streams", + "processors": [ + { + "script": { + "if": "ctx.processor?.event == 'span' || ctx.processor?.event == 'transaction'", + "source": "ctx.data_stream = [\"type\": \"traces\", \"dataset\": \"apm\", \"namespace\": \"migrated\"]\n" + } + }, + { + "script": { + "if": "ctx.processor?.event == 'error'", + "source": "ctx.data_stream = [\"type\": \"logs\", \"dataset\": \"apm.error\", \"namespace\": \"migrated\"]\n" + } + }, + { + "script": { + "if": "ctx.processor?.event == 'metric'", + "source": "String dataset;\nif (ctx[\"metricset.name\"] != \"app\") {\n dataset = \"apm.internal\";\n} else {\n String serviceName = ctx.service.name;\n serviceName = serviceName.toLowerCase();\n serviceName = /[\\\\\\/*?\"<>| ,#:-]/.matcher(serviceName).replaceAll('_');\n dataset = \"apm.app.\" + serviceName;\n}\nctx.data_stream = [\"type\": \"metrics\", \"dataset\": dataset, \"namespace\": \"migrated\"];\n" + } + }, + { + "set": { + "if": "ctx.data_stream != null", + "field": "_index", + "value": "{{data_stream.type}}-{{data_stream.dataset}}-{{data_stream.namespace}}" + } } - }, - { - "pipeline": { - "name": "apm_ingest_timestamp" + ] + } + }, + { + "id": "apm_user_agent", + "body": { + "description": "Add user agent information for APM events", + "processors": [ + { + "user_agent": { + "field": "user_agent.original", + "target_field": "user_agent", + "ignore_missing": true, + "ignore_failure": true + } } - }, - { - "pipeline": { - "name": "apm_remove_span_metadata" - } - } - ] - } -}, -{ - "id": "apm_user_agent", - "body": { - "description" : "Add user agent information for APM events", - "processors" : [ - { - "user_agent" : { - "field": "user_agent.original", - "target_field": "user_agent", - "ignore_missing": true, - "ignore_failure": true - } - } - ] - } -}, -{ - "id": "apm_user_geo", - "body": { - "description" : "Add user geo information for APM events", - "processors" : [ - { - "geoip" : { - "database_file": "GeoLite2-City.mmdb", - "field": "client.ip", - "target_field": "client.geo", - "ignore_missing": true, - "on_failure": [ - { - "remove": { + ] + } + }, + { + "id": "apm_user_geo", + "body": { + "description": "Add user geo information for APM events", + "processors": [ + { + "geoip": { + "database_file": "GeoLite2-City.mmdb", + "field": "client.ip", + "target_field": "client.geo", + "ignore_missing": true, + "on_failure": [ + { + "remove": { "field": "client.ip", "ignore_missing": true, "ignore_failure": true + } } - } - ] + ] + } } - } - ] - } -}, -{ - "id": "apm_ingest_timestamp", - "body": { - "description": "Add an ingest timestamp for APM events", - "processors": [ - { - "set": { - "if": "ctx.processor?.event != 'span'", - "field": "event.ingested", - "value": "{{_ingest.timestamp}}" + ] + } + }, + { + "id": "apm_ingest_timestamp", + "body": { + "description": "Add an ingest timestamp for APM events", + "processors": [ + { + "set": { + "if": "ctx.processor?.event != 'span'", + "field": "event.ingested", + "value": "{{_ingest.timestamp}}" + } } - } - ] - } -}, -{ - "id": "apm_remove_span_metadata", - "body": { - "description": "Removes metadata fields available already on the parent transaction, to save storage", - "processors": [ - { - "remove": { - "if": "ctx.processor?.event == 'span'", - "field": ["host", "process", "user", "user_agent", "container", "kubernetes", - "service.node", "service.version", "service.language", "service.runtime", "service.framework"], - "ignore_missing": true, - "ignore_failure": true + ] + } + }, + { + "id": "apm_remove_span_metadata", + "body": { + "description": "Removes metadata fields available already on the parent transaction, to save storage", + "processors": [ + { + "remove": { + "if": "ctx.processor?.event == 'span'", + "field": [ + "host", + "process", + "user", + "user_agent", + "container", + "kubernetes", + "service.node", + "service.version", + "service.language", + "service.runtime", + "service.framework" + ], + "ignore_missing": true, + "ignore_failure": true + } + } + ] + } + }, + { + "id": "apm_error_grouping_name", + "body": { + "description": "Set error.grouping_name for APM error events", + "processors": [ + { + "script": { + "source": "ctx.error.grouping_name = ctx.error.exception[0].message", + "if": "ctx.error?.exception?.length != null && ctx.error?.exception?.length > 0" + } + }, + { + "set": { + "field": "error.grouping_name", + "value": "{{error.log.message}}", + "if": "ctx.error?.log?.message != null" + } + } + ] + } + }, + { + "id": "apm_metrics_dynamic_template", + "body": { + "description": "Set dynamic_templates for application metrics", + "processors": [ + { + "script": { + "if": "ctx._metric_descriptions != null", + "source": "Map dynamic_templates = new HashMap();\nfor (entry in ctx._metric_descriptions.entrySet()) {\n String name = entry.getKey();\n Map description = entry.getValue();\n String metric_type = description.type;\n if (metric_type == \"histogram\") {\n dynamic_templates[name] = \"histogram\";\n }\n}\nctx._dynamic_templates = dynamic_templates;\nctx.remove(\"_metric_descriptions\");\n" + } } - } - ] + ] + } } -} -] +] \ No newline at end of file diff --git a/ingest/pipeline/definition.yml b/ingest/pipeline/definition.yml new file mode 100644 index 00000000000..d18f614b64a --- /dev/null +++ b/ingest/pipeline/definition.yml @@ -0,0 +1,134 @@ +apm: + description: Default enrichment for APM events + processors: + - pipeline: + # apm_ingest_timestamp should always come first, + # ensuring `event.ingested` is set as early as + # possible. + name: apm_ingest_timestamp + - pipeline: + name: apm_user_agent + - pipeline: + name: apm_user_geo + - pipeline: + name: apm_remove_span_metadata + - pipeline: + name: apm_error_grouping_name + if: ctx.processor?.event == 'error' + - pipeline: + name: apm_metrics_dynamic_template + if: ctx.processor?.event == 'metric' + +# apm_data_stream_migration is not used in the main apm pipeline, +# it is installed for migrating legacy indices to data streams, +# e.g. using the Kibana Upgrade Assistant. +apm_data_stream_migration: + description: Migrate APM events to data streams + processors: + - script: + if: ctx.processor?.event == 'span' || ctx.processor?.event == 'transaction' + source: | + ctx.data_stream = ["type": "traces", "dataset": "apm", "namespace": "migrated"] + - script: + if: ctx.processor?.event == 'error' + source: | + ctx.data_stream = ["type": "logs", "dataset": "apm.error", "namespace": "migrated"] + - script: + if: ctx.processor?.event == 'metric' + source: | + String dataset; + if (ctx["metricset.name"] != "app") { + dataset = "apm.internal"; + } else { + String serviceName = ctx.service.name; + serviceName = serviceName.toLowerCase(); + serviceName = /[\\\/*?"<>| ,#:-]/.matcher(serviceName).replaceAll('_'); + dataset = "apm.app." + serviceName; + } + ctx.data_stream = ["type": "metrics", "dataset": dataset, "namespace": "migrated"]; + - set: + if: ctx.data_stream != null + field: _index + value: "{{data_stream.type}}-{{data_stream.dataset}}-{{data_stream.namespace}}" + +apm_user_agent: + description: Add user agent information for APM events + processors: + - user_agent: + field: user_agent.original + target_field: user_agent + ignore_missing: true + ignore_failure: true + +apm_user_geo: + description: Add user geo information for APM events + processors: + - geoip: + database_file: GeoLite2-City.mmdb + field: client.ip + target_field: client.geo + ignore_missing: true + on_failure: + - remove: + field: client.ip + ignore_missing: true + ignore_failure: true + +apm_ingest_timestamp: + description: Add an ingest timestamp for APM events + processors: + - set: + if: ctx.processor?.event != 'span' + field: event.ingested + value: "{{_ingest.timestamp}}" + +apm_remove_span_metadata: + description: Removes metadata fields available already on the parent transaction, to save storage + processors: + - remove: + if: ctx.processor?.event == 'span' + field: + - host + - process + - user + - user_agent + - container + - kubernetes + - service.node + - service.version + - service.language + - service.runtime + - service.framework + ignore_missing: true + ignore_failure: true + +apm_error_grouping_name: + description: Set error.grouping_name for APM error events + processors: + - script: + source: ctx.error.grouping_name = ctx.error.exception[0].message + if: ctx.error?.exception?.length != null && ctx.error?.exception?.length > 0 + - set: + field: error.grouping_name + value: "{{error.log.message}}" + if: ctx.error?.log?.message != null + +# TODO(axw) handle unit in metric descriptions. +# See https://github.com/elastic/elasticsearch/issues/72536 +apm_metrics_dynamic_template: + description: Set dynamic_templates for application metrics + processors: + - script: + if: ctx._metric_descriptions != null + source: | + Map dynamic_templates = new HashMap(); + for (entry in ctx._metric_descriptions.entrySet()) { + String name = entry.getKey(); + Map description = entry.getValue(); + String metric_type = description.type; + if (metric_type == "histogram") { + dynamic_templates[name] = "histogram"; + } + } + ctx._dynamic_templates = dynamic_templates; + ctx.remove("_metric_descriptions"); diff --git a/ingest/pipeline/generate.go b/ingest/pipeline/generate.go new file mode 100644 index 00000000000..9f3e670ede2 --- /dev/null +++ b/ingest/pipeline/generate.go @@ -0,0 +1,131 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build ignore + +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "os" + + "gopkg.in/yaml.v3" +) + +type pipelineDefinition struct { + ID interface{} `json:"id"` + Body interface{} `json:"body"` +} + +func main() { + var doc yaml.Node + fin, err := os.Open("definition.yml") + if err != nil { + log.Fatal(err) + } + yamlDecoder := yaml.NewDecoder(fin) + if err := yamlDecoder.Decode(&doc); err != nil { + log.Fatal(err) + } + + // Convert the document structure into the one expected by libbeat. + // e.g. convert {a: 1, b: 2, ...} to [{id: a, body: 1}, {id: b, body: 2}, ...] + if n := len(doc.Content); n != 1 { + log.Fatalf("expected 1 document, got %d", n) + } + mappingNode := doc.Content[0] + sequenceNode := &yaml.Node{Kind: yaml.SequenceNode, Content: make([]*yaml.Node, len(mappingNode.Content)/2)} + for i := 0; i < len(mappingNode.Content); i += 2 { + idNode := mappingNode.Content[i] + bodyNode := mappingNode.Content[i+1] + sequenceNode.Content[i/2] = &yaml.Node{ + Kind: yaml.MappingNode, + Content: []*yaml.Node{ + {Kind: yaml.ScalarNode, Tag: "!!str", Value: "id"}, + idNode, + {Kind: yaml.ScalarNode, Tag: "!!str", Value: "body"}, + bodyNode, + }, + } + } + doc.Content[0] = sequenceNode + + var buf bytes.Buffer + if err := encodeJSON(&buf, &doc); err != nil { + log.Fatal(err) + } + var indented bytes.Buffer + if err := json.Indent(&indented, buf.Bytes(), "", " "); err != nil { + log.Fatal(err) + } + if err := ioutil.WriteFile("definition.json", indented.Bytes(), 0644); err != nil { + log.Fatal(err) + } +} + +func encodeJSON(buf *bytes.Buffer, node *yaml.Node) error { + switch node.Kind { + case yaml.DocumentNode: + return encodeJSON(buf, node.Content[0]) + case yaml.SequenceNode: + buf.WriteByte('[') + for i, node := range node.Content { + if i > 0 { + buf.WriteByte(',') + } + if err := encodeJSON(buf, node); err != nil { + return err + } + } + buf.WriteByte(']') + return nil + case yaml.MappingNode: + buf.WriteByte('{') + for i := 0; i < len(node.Content); i += 2 { + if i > 0 { + buf.WriteByte(',') + } + if err := encodeJSON(buf, node.Content[i]); err != nil { + return err + } + buf.WriteByte(':') + if err := encodeJSON(buf, node.Content[i+1]); err != nil { + return err + } + } + buf.WriteByte('}') + return nil + case yaml.ScalarNode: + switch node.Tag { + case "!!str": + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + return enc.Encode(node.Value) + case "!!bool", "!!int": + buf.WriteString(node.Value) + return nil + default: + return fmt.Errorf("unexpected tag %q at %d:%d", node.Tag, node.Line, node.Column) + } + default: + return fmt.Errorf("unexpected kind %d at %d:%d", node.Kind, node.Line, node.Column) + } +} diff --git a/ingest/pipeline/register.go b/ingest/pipeline/register.go index cb3c365bd1e..72ef26bc909 100644 --- a/ingest/pipeline/register.go +++ b/ingest/pipeline/register.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +//go:generate go run generate.go + package pipeline import ( diff --git a/ingest/pipeline/register_test.go b/ingest/pipeline/register_test.go index 91e042a345b..c8e285e3fff 100644 --- a/ingest/pipeline/register_test.go +++ b/ingest/pipeline/register_test.go @@ -19,22 +19,22 @@ package pipeline import ( "fmt" + "path/filepath" "strings" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/elastic/apm-server/tests/loader" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/esleg/eslegclient" ) func TestRegisterPipelines(t *testing.T) { - esClients, err := eslegclient.NewClients(getFakeESConfig(9200)) + esClients, err := eslegclient.NewClients(getFakeESConfig(9200), "apm-server") require.NoError(t, err) esClient := &esClients[0] - path, err := loader.FindFile("..", "ingest", "pipeline", "definition.json") + path, err := filepath.Abs("definition.json") require.NoError(t, err) // pipeline loading goes wrong @@ -43,13 +43,13 @@ func TestRegisterPipelines(t *testing.T) { assertContainsErrMsg(t, err.Error(), []string{"cannot find the file", "no such file or directory"}) // pipeline definition empty - emptyPath, err := loader.FindFile("..", "testdata", "ingest", "pipeline", "empty.json") + emptyPath, err := filepath.Abs(filepath.FromSlash("../../testdata/ingest/pipeline/empty.json")) require.NoError(t, err) err = RegisterPipelines(esClient, true, emptyPath) assert.NoError(t, err) // invalid esClient - invalidClients, err := eslegclient.NewClients(getFakeESConfig(1234)) + invalidClients, err := eslegclient.NewClients(getFakeESConfig(1234), "apm-server") require.NoError(t, err) err = RegisterPipelines(&invalidClients[0], true, path) assert.Error(t, err) diff --git a/internal/.otel_collector_mixin/receiver/otlpreceiver/mixin.go b/internal/.otel_collector_mixin/receiver/otlpreceiver/mixin.go new file mode 100644 index 00000000000..e8602b95b1b --- /dev/null +++ b/internal/.otel_collector_mixin/receiver/otlpreceiver/mixin.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpreceiver + +import ( + "context" + + "google.golang.org/grpc" + + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/model/otlpgrpc" + "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs" + "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics" + "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace" +) + +// RegisterTraceReceiver registers the trace receiver with a gRPC server. +func RegisterTraceReceiver(ctx context.Context, consumer consumer.Traces, serverGRPC *grpc.Server) error { + receiver := trace.New(config.NewID("otlp"), consumer) + otlpgrpc.RegisterTracesServer(serverGRPC, receiver) + return nil +} + +// RegisterMetricsReceiver registers the metrics receiver with a gRPC server. +func RegisterMetricsReceiver(ctx context.Context, consumer consumer.Metrics, serverGRPC *grpc.Server) error { + receiver := metrics.New(config.NewID("otlp"), consumer) + otlpgrpc.RegisterMetricsServer(serverGRPC, receiver) + return nil +} + +// RegisterLogsReceiver registers the logs receiver with a gRPC server. +func RegisterLogsReceiver(ctx context.Context, consumer consumer.Logs, serverGRPC *grpc.Server) error { + receiver := logs.New(config.NewID("otlp"), consumer) + otlpgrpc.RegisterLogsServer(serverGRPC, receiver) + return nil +} diff --git a/internal/.otel_collector_mixin/service/defaultcomponents/defaults.go b/internal/.otel_collector_mixin/service/defaultcomponents/defaults.go new file mode 100644 index 00000000000..23dbabfbd49 --- /dev/null +++ b/internal/.otel_collector_mixin/service/defaultcomponents/defaults.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package defaultcomponents composes the default set of components used by the otel service +package defaultcomponents + +import ( + "go.opentelemetry.io/collector/component" +) + +// Components returns the default set of components used by the +// OpenTelemetry collector. +func Components() (component.Factories, error) { + return component.Factories{}, nil +} diff --git a/internal/glog/glog.go b/internal/glog/glog.go new file mode 100644 index 00000000000..c7f62eecf96 --- /dev/null +++ b/internal/glog/glog.go @@ -0,0 +1,28 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package glog + +import "log" + +func Fatal(args ...interface{}) { + log.Fatal(args...) +} + +func Fatalf(format string, args ...interface{}) { + log.Fatalf(format, args...) +} diff --git a/internal/glog/go.mod b/internal/glog/go.mod new file mode 100644 index 00000000000..92f85f6b65e --- /dev/null +++ b/internal/glog/go.mod @@ -0,0 +1,3 @@ +module github.com/golang/glog + +go 1.16 diff --git a/testing/environments/docker/elasticsearch_kerberos/init.sh b/internal/glog/go.sum similarity index 100% rename from testing/environments/docker/elasticsearch_kerberos/init.sh rename to internal/glog/go.sum diff --git a/internal/otel_collector/CHANGELOG.md b/internal/otel_collector/CHANGELOG.md new file mode 100644 index 00000000000..4750d33d4b6 --- /dev/null +++ b/internal/otel_collector/CHANGELOG.md @@ -0,0 +1,1216 @@ +# Changelog + +## Unreleased + +## v0.34.0 Beta + +## 🛑 Breaking changes 🛑 + +- Artifacts are no longer published in this repository, check [here](https://github.com/open-telemetry/opentelemetry-collector-releases) (#3941) +- Remove deprecated `tracetranslator.AttributeValueToString` and `tracetranslator.AttributeMapToMap` (#3873) +- Change semantic conventions for status (code, msg) as per specifications (#3872) +- Add `pdata.NewTimestampFromTime`, deprecate `pdata.TimestampFromTime` (#3868) +- Add `pdata.NewAttributeMapFromMap`, deprecate `pdata.AttributeMap.InitFromMap` (#3936) +- Move `fileexporter` to contrib (#3474) +- Move `jaegerexporter` to contrib (#3474) +- Move `kafkaexporter` to contrib (#3474) +- Move `opencensusexporter` to contrib (#3474) +- Move `prometheusexporter` to contrib (#3474) +- Move `prometheusremotewriteexporter` to contrib (#3474) +- Move `zipkinexporter` to contrib (#3474) +- Move `attributeprocessor` to contrib (#3474) +- Move `filterprocessor` to contrib (#3474) +- Move `probabilisticsamplerprocessor` to contrib (#3474) +- Move `resourceprocessor` to contrib (#3474) +- Move `spanprocessor` to contrib (#3474) +- Move `hostmetricsreceiver` to contrib (#3474) +- Move `jaegerreceiver` to contrib (#3474) +- Move `kafkareceiver` to contrib (#3474) +- Move `opencensusreceiver` to contrib (#3474) +- Move `prometheusreceiver` to contrib (#3474) +- Move `zipkinreceiver` to contrib (#3474) +- Move `bearertokenauthextension` to contrib (#3474) +- Move `healthcheckextension` to contrib (#3474) +- Move `oidcauthextension` to contrib (#3474) +- Move `pprofextension` to contrib (#3474) +- Move `translator/internaldata` to contrib (#3474) +- Move `translator/trace/jaeger` to contrib (#3474) +- Move `translator/trace/zipkin` to contrib (#3474) +- Move `testbed` to contrib (#3474) +- Move `exporter/exporterhelper/resource_to_telemetry` to contrib (#3474) +- Move `processor/processorhelper/attraction` to contrib (#3474) +- Move `translator/conventions` to `model/semconv` (#3901) + +## v0.33.0 Beta + +## 🛑 Breaking changes 🛑 + +- Rename `configloader` interface to `configunmarshaler` (#3774) +- Remove `LabelsMap` from all the metrics points (#3706) +- Update generated K8S attribute labels to fix capitalization (#3823) + +## 💡 Enhancements 💡 + +- Collector has now full support for metrics proto v0.9.0. + +## v0.32.0 Beta + +This release is marked as "bad" since the metrics pipelines will produce bad data. + +- See https://github.com/open-telemetry/opentelemetry-collector/issues/3824 + +## 🛑 Breaking changes 🛑 + +- Rename `CustomUnmarshable` interface to `Unmarshallable` (#3774) + +## 💡 Enhancements 💡 + +- Change default OTLP/HTTP port number from 55681 to 4318 (#3743) +- Update OTLP proto to v0.9.0 (#3740) + - Remove `SetValue`/`Value` func for `NumberDataPoint`/`Exemplar` (#3730) + - Remove `IntGauge`/`IntSum`from pdata (#3731) + - Remove `IntDataPoint` from pdata (#3735) + - Add support for `Bytes` attribute type (#3756) + - Add `SchemaUrl` field (#3759) + - Add `Attributes` to `NumberDataPoint`, `HistogramDataPoint`, `SummaryDataPoint` (#3761) +- `conventions` translator: Replace with conventions generated from spec v1.5.0 (#3494) +- `prometheus` receiver: Add `ToMetricPdata` method (#3695) +- Make configsource `Watchable` an optional interface (#3792) +- `obsreport` exporter: Change to accept `ExporterCreateSettings` (#3789) + +## 🧰 Bug fixes 🧰 + +- `configgrpc`: Use chained interceptors in the gRPC server (#3744) +- `prometheus` receiver: Use actual interval startTimeMs for cumulative types (#3694) +- `jaeger` translator: Fix bug that could generate empty proto spans (#3808) + +## v0.31.0 Beta + +## 🛑 Breaking changes 🛑 + +- Remove Resize() from pdata slice APIs (#3675) +- Remove the ballast allocation when `mem-ballast-size-mib` is set in command line (#3626) + - Use [`ballast extension`](./extension/ballastextension/README.md) to set memory ballast instead. +- Rename `DoubleDataPoint` to `NumberDataPoint` (#3633) +- Remove `IntHistogram` (#3676) + +## 💡 Enhancements 💡 + +- Update to OTLP 0.8.0: + - Translate `IntHistogram` to `Histogram` in `otlp_wrappers` (#3676) + - Translate `IntGauge` to `Gauge` in `otlp_wrappers` (#3619) + - Translate `IntSum` to `Sum` in `otlp_wrappers` (#3621) + - Update `NumberDataPoint` to support `DoubleVal` and `IntVal` (#3689) + - Update `Exemplar` to use `oneOfPrimitiveValue` (#3699) + - Remove `IntExemplar` and `IntExemplarSlice` from `pdata` (#3705) + - Mark `IntGauge`/`IntSum`/`IntDataPoint` as deprecated (#3707) + - Remove `IntGauge`/`IntSum` from `batchprocessor` (#3718) + - `prometheusremotewrite` exporter: Convert to new Number metrics (#3714) + - `prometheus` receiver: Convert to new Number metrics (#3716) + - `prometheus` exporter: Convert to new Number metrics (#3709) + - `hostmetrics` receiver: Convert to new Number metrics (#3710) + - `opencensus`: Convert to new Number metrics (#3708) + - `scraperhelper` receiver: Convert to new Number metrics (#3717) + - `testbed`: Convert to new Number metrics (#3719) + - `expoerterhelper`: Convert `resourcetolabel` to new Number metrics (#3723) +- `configauth`: Prepare auth API to return a context (#3618) +- `pdata`: + - Implement `Equal()` for map-valued `AttributeValues` (#3612) + - Add `[Type]Slice.Sort(func)` to sort slices (#3671) +- `memorylimiter`: + - Add validation on ballast size between `memorylimiter` and `ballastextension` (#3532) + - Access Ballast extension via `Host.GetExtensions` (#3634) +- `prometheusremotewrite` exporter: Add a WAL implementation without wiring up (#3597) +- `prometheus` receiver: Add `metricGroup.toDistributionPoint` pdata conversion (#3667) +- Use `ComponentID` as identifier instead of config (#3696) +- `zpages`: Move config validation from factory to `Validate` (#3697) +- Enable `tracez` z-pages from otel-go, disable opencensus (#3698) +- Convert temporality and monotonicity for deprecated sums (#3729) + +## 🧰 Bug fixes 🧰 + +- `otlpexporter`: Allow endpoint to be configured with a scheme of `http` or `https` (#3575) +- Handle errors when reloading the collector service (#3615) +- Do not report fatal error when `cmux.ErrServerClosed` (#3703) +- Fix bool attribute equality in `pdata` (#3688) + +## v0.30.0 Beta + +## 🛑 Breaking changes 🛑 + +- Rename `pdata.DoubleSum` to `pdata.Sum` (#3583) +- Rename `pdata.DoubleGauge` to `pdata.Gauge` (#3599) +- Migrated `pdata` to a dedicated package (#3483) +- Change Marshaler/Unmarshaler to be consistent with other interfaces (#3502) +- Remove consumer/simple package (#3438) +- Remove unnecessary interfaces from pdata (#3506) +- zipkinv1 implement directly Unmarshaler interface (#3504) +- zipkinv2 implement directly Marshaler/Unmarshaler interface (#3505) +- Change exporterhelper to accept ExporterCreateSettings instead of just logger (#3569) +- Deprecate Resize() from pdata slice APIs (#3573) +- Use Func pattern in processorhelper, consistent with others (#3570) + +## 💡 Enhancements 💡 + +- Update OTLP to v0.8.0 (#3572) +- Migrate from OpenCensus to OpenTelemetry for internal tracing (#3567) +- Move internal/pdatagrpc to model/otlpgrpc (#3507) +- Move internal/otlp to model/otlp (#3508) +- Create http Server via Config, enable cors and decompression (#3513) +- Allow users to set min and max TLS versions (#3591) +- Support setting ballast size in percentage of total Mem in ballast extension (#3456) +- Publish go.opentelemetry.io/collector/model as a separate module (#3530) +- Pass a TracerProvider via construct settings to all the components (#3592) +- Make graceful shutdown optional (#3577) + +## 🧰 Bug fixes 🧰 + +- `scraperhelper`: Include the scraper name in log messages (#3487) +- `scraperhelper`: fix case when returned pdata is empty (#3520) +- Record the correct number of points not metrics in Kafka receiver (#3553) +- Validate the Prometheus configuration (#3589) + +## v0.29.0 Beta + +## 🛑 Breaking changes 🛑 + +- Rename `service.Application` to `service.Collector` (#3268) +- Provide case sensitivity in config yaml mappings by using Koanf instead of Viper (#3337) +- Move zipkin constants to an internal package (#3431) +- Disallow renaming metrics using metric relabel configs (#3410) +- Move cgroup and iruntime utils from memory_limiter to internal folder (#3448) +- Move model pdata interfaces to pdata, expose them publicly (#3455) + +## 💡 Enhancements 💡 + +- Change obsreport helpers for scraper to use the same pattern as Processor/Exporter (#3327) +- Convert `otlptext` to implement Marshaler interfaces (#3366) +- Add encoder/decoder and marshaler/unmarshaler for OTLP protobuf (#3401) +- Use the new marshaler/unmarshaler in `kafka` exporter (#3403) +- Convert `zipkinv2` to to/from translator interfaces (#3409) +- `zipkinv1`: Move to translator and encoders interfaces (#3419) +- Use the new marshaler/unmarshaler in `kafka` receiver #3402 +- Change `oltp` receiver to use the new unmarshaler, avoid grpc-gateway dependency (#3406) +- Use the new Marshaler in the `otlphttp` exporter (#3433) +- Add grpc response struct for all signals instead of returning interface in `otlp` receiver/exporter (#3437) +- `zipkinv2`: Add encoders, decoders, marshalers (#3426) +- `scrapererror` receiver: Return concrete error type (#3360) +- `kafka` receiver: Add metrics support (#3452) +- `prometheus` receiver: + - Add store to track stale metrics (#3414) + - Add `up` and `scrape_xxxx` internal metrics (#3116) + +## 🧰 Bug fixes 🧰 + +- `prometheus` receiver: + - Reject datapoints with duplicate label keys (#3408) + - Scrapers are not stopped when receiver is shutdown (#3450) +- `prometheusremotewrite` exporter: Adjust default retry settings (#3416) +- `hostmetrics` receiver: Fix missing startTimestamp for `processes` scraper (#3461) + +## v0.28.0 Beta + +## 🛑 Breaking changes 🛑 + +- Remove unused logstest package (#3222) +- Introduce `AppSettings` instead of `Parameters` (#3163) +- Remove unused testutil.TempSocketName (#3291) +- Move BigEndian helper functions in `tracetranslator` to an internal package.(#3298) +- Rename `configtest.LoadConfigFile` to `configtest.LoadConfigAndValidate` (#3306) +- Replace `ExtensionCreateParams` with `ExtensionCreateSettings` (#3294) +- Replace `ProcessorCreateParams` with `ProcessorCreateSettings`. (#3181) +- Replace `ExporterCreateParams` with `ExporterCreateSettings` (#3164) +- Replace `ReceiverCreateParams` with `ReceiverCreateSettings`. (#3167) +- Change `batchprocessor` logic to limit data points rather than metrics (#3141) +- Rename `PrwExporter` to `PRWExporter` and `NewPrwExporter` to `NewPRWExporter` (#3246) +- Avoid exposing OpenCensus reference in public APIs (#3253) +- Move `config.Parser` to `configparser.Parser` (#3304) +- Remove deprecated funcs inside the obsreceiver (#3314) +- Remove `obsreport.GRPCServerWithObservabilityEnabled`, enable observability in config (#3315) +- Remove `obsreport.ProcessorMetricViews`, use `BuildProcessorCustomMetricName` where needed (#3316) +- Remove "Receive" from `obsreport.Receiver` funcs (#3326) +- Remove "Export" from `obsreport.Exporter` funcs (#3333) +- Hide unnecessary public struct `obsreport.StartReceiveOptions` (#3353) +- Avoid exposing internal implementation public in OC/OTEL receivers (#3355) +- Updated configgrpc `ToDialOptions` and confighttp `ToClient` apis to take extensions configuration map (#3340) +- Remove `GenerateSequentialTraceID` and `GenerateSequentialSpanIDin` functions in testbed (#3390) +- Change "grpc" to "GRPC" in configauth function/type names (#3285) + +## 💡 Enhancements 💡 + +- Add `doc.go` files to the consumer package and its subpackages (#3270) +- Improve documentation of consumer package and subpackages (#3269, #3361) +- Automate triggering of doc-update on release (#3234) +- Enable Dependabot for Github Actions (#3312) +- Remove the proto dependency in `goldendataset` for traces (#3322) +- Add telemetry for dropped data due to exporter sending queue overflow (#3328) +- Add initial implementation of `pdatagrcp` (#3231) +- Change receiver obsreport helpers pattern to match the Processor/Exporter (#3227) +- Add model translation and encoding interfaces (#3200) +- Add otlpjson as a serializer implementation (#3238) +- `prometheus` receiver: + - Add `createNodeAndResourcePdata` for Prometheus->OTLP pdata (#3139) + - Direct metricfamily Prometheus->OTLP (#3145) +- Add `componenttest.NewNop*CreateSettings` to simplify tests (#3375) +- Add support for markdown generation (#3100) +- Refactor components for the Client Authentication Extensions (#3287) + +## 🧰 Bug fixes 🧰 + +- Use dedicated `zapcore.Core` for Windows service (#3147) +- Hook up start and shutdown functions in fileexporter (#3260) +- Fix oc to pdata translation for sum non-monotonic cumulative (#3272) +- Fix `timeseriesSignature` in prometheus receiver (#3310) + +## v0.27.0 Beta + +## 🛑 Breaking changes 🛑 + +- Change `Marshal` signatures in kafkaexporter's Marshalers to directly convert pdata to `sarama.ProducerMessage` (#3162) +- Remove `tracetranslator.DetermineValueType`, only used internally by Zipkin (#3114) +- Remove OpenCensus conventions, should not be used (#3113) +- Remove Zipkin specific translation constants, move to internal (#3112) +- Remove `tracetranslator.TagHTTPStatusCode`, use `conventions.AttributeHTTPStatusCode` (#3111) +- Remove OpenCensus status constants and transformation (#3110) +- Remove `tracetranslator.AttributeArrayToSlice`, not used in core or contrib (#3109) +- Remove `internaldata.MetricsData`, same APIs as for traces (#3156) +- Rename `config.IDFromString` to `NewIDFromString`, remove `MustIDFromString` (#3177) +- Move consumerfanout package to internal (#3207) +- Canonicalize enum names in pdata. Fix usage of uppercase names (#3208) + +## 💡 Enhancements 💡 + +- Use `config.ComponentID` for obsreport receiver/scraper (#3098) +- Add initial implementation of the consumerhelper (#3146) +- Add Collector version to Prometheus Remote Write Exporter user-agent header (#3094) +- Refactor processorhelper to use consumerhelper, split by signal type (#3180) +- Use consumerhelper for exporterhelper, add WithCapabilities (#3186) +- Set capabilities for all core exporters, remove unnecessary funcs (#3190) +- Add an internal sharedcomponent to be shared by receivers with shared resources (#3198) +- Allow users to configure the Prometheus remote write queue (#3046) +- Mark internaldata traces translation as deprecated for external usage (#3176) + +## 🧰 Bug fixes 🧰 + +- Fix Prometheus receiver metric start time and reset determination logic. (#3047) + - The receiver will no longer drop the first sample for `counter`, `summary`, and `histogram` metrics. +- The Prometheus remote write exporter will no longer force `counter` metrics to have a `_total` suffix. (#2993) +- Remove locking from jaeger receiver start and stop processes (#3070) +- Fix batch processor metrics reorder, improve performance (#3034) +- Fix batch processor traces reorder, improve performance (#3107) +- Fix batch processor logs reorder, improve performance (#3125) +- Avoid one unnecessary allocation in grpc OTLP exporter (#3122) +- `batch` processor: Validate that batch config max size is greater than send size (#3126) +- Add capabilities to consumer, remove from processor (#2770) +- Remove internal protos usage in Prometheusremotewrite exporter (#3184) +- `prometheus` receiver: Honor Prometheus external labels (#3127) +- Validate that remote write queue settings are not negative (#3213) + +## v0.26.0 Beta + +## 🛑 Breaking changes 🛑 + +- Change `With*Unmarshallers` signatures in Kafka exporter/receiver (#2973) +- Rename `marshall` to `marshal` in all the occurrences (#2977) +- Remove `componenterror.ErrAlreadyStarted` and `componenterror.ErrAlreadyStopped`, components should not protect against this, Service will start/stop once. +- Rename `ApplicationStartInfo` to `BuildInfo` +- Rename `ApplicationStartInfo.ExeName` to `BuildInfo.Command` +- Rename `ApplicationStartInfo.LongName` to `BuildInfo.Description` + +## 💡 Enhancements 💡 + +- `kafka` exporter: Add logs support (#2943) +- Add AppendEmpty and deprecate Append for slices (#2970) +- Update mdatagen to create factories of init instead of new (#2978) +- `zipkin` receiver: Reduce the judgment of zipkin v1 version (#2990) +- Custom authenticator logic to accept a `component.Host` which will extract the authenticator to use based on a new authenticator name property (#2767) +- `prometheusremotewrite` exporter: Add `resource_to_telemetry_conversion` config option (#3031) +- `logging` exporter: Extract OTLP text logging (#3082) +- Format timestamps as strings instead of int in otlptext output (#3088) +- Add darwin arm64 build (#3090) + +## 🧰 Bug fixes 🧰 + +- Fix Jaeger receiver to honor TLS Settings (#2866) +- `zipkin` translator: Handle missing starttime case for zipkin json v2 format spans (#2506) +- `prometheus` exporter: Fix OTEL resource label drops (#2899) +- `prometheusremotewrite` exporter: + - Enable the queue internally (#2974) + - Don't drop instance and job labels (#2979) +- `jaeger` receiver: Wait for server goroutines exit on shutdown (#2985) +- `logging` exporter: Ignore invalid handle on close (#2994) +- Fix service zpages (#2996) +- `batch` processor: Fix to avoid reordering and send max size (#3029) + + +## v0.25.0 Beta + +## 🛑 Breaking changes 🛑 + +- Rename ForEach (in pdata) with Range to be consistent with sync.Map (#2931) +- Rename `componenthelper.Start` to `componenthelper.StartFunc` (#2880) +- Rename `componenthelper.Stop` to `componenthelper.StopFunc` (#2880) +- Remove `exporterheleper.WithCustomUnmarshaler`, `processorheleper.WithCustomUnmarshaler`, `receiverheleper.WithCustomUnmarshaler`, `extensionheleper.WithCustomUnmarshaler`, implement `config.CustomUnmarshaler` interface instead (#2867) +- Remove `component.CustomUnmarshaler` implement `config.CustomUnmarshaler` interface instead (#2867) +- Remove `testutil.HostPortFromAddr`, users can write their own parsing helper (#2919) +- Remove `configparser.DecodeTypeAndName`, use `config.IDFromString` (#2869) +- Remove `config.NewViper`, users should use `config.NewParser` (#2917) +- Remove `testutil.WaitFor`, use `testify.Eventually` helper if needed (#2920) +- Remove testutil.WaitForPort, users can use testify.Eventually (#2926) +- Rename `processorhelper.NewTraceProcessor` to `processorhelper.NewTracesProcessor` (#2935) +- Rename `exporterhelper.NewTraceExporter` to `exporterhelper.NewTracesExporter` (#2937) +- Remove InitEmptyWithCapacity, add EnsureCapacity and Clear (#2845) +- Rename traces methods/objects to include Traces in Kafka receiver (#2966) + +## 💡 Enhancements 💡 + +- Add `validatable` interface with `Validate()` to all `config.` (#2898) + - add the empty `Validate()` implementation for all component configs +- **Experimental**: Add a config source manager that wraps the interaction with config sources (#2857, #2903, #2948) +- `kafka` exporter: Key jaeger messages on traceid (#2855) +- `scraperhelper`: Don't try to count metrics if scraper returns an error (#2902) +- Extract ConfigFactory in a ParserProvider interface (#2868) +- `prometheus` exporter: Allows Summary metrics to be exported to Prometheus (#2900) +- `prometheus` receiver: Optimize `dpgSignature` function (#2945) +- `kafka` receiver: Add logs support (#2944) + +## 🧰 Bug fixes 🧰 + +- `prometheus` receiver: + - Treat Summary and Histogram metrics without "_sum" counter as valid metric (#2812) + - Add `job` and `instance` as well-known labels (#2897) +- `prometheusremotewrite` exporter: + - Sort Sample by Timestamp to avoid out of order errors (#2941) + - Remove incompatible queued retry (#2951) +- `kafka` receiver: Fix data race with batchprocessor (#2957) +- `jaeger` receiver: Jaeger agent should not report ErrServerClosed (#2965) + +## v0.24.0 Beta + +## 🛑 Breaking changes 🛑 + +- Remove legacy internal metrics for memorylimiter processor, `spans_dropped` and `trace_batches_dropped` (#2841) + - For `spans_dropped` use `processor/refused_spans` with `processor=memorylimiter` +- Rename pdata.*.[Start|End]Time to pdata.*.[Start|End]Timestamp (#2847) +- Rename pdata.DoubleExemplar to pdata.Exemplar (#2804) +- Rename pdata.DoubleHistogram to pdata.Histogram (#2797) +- Rename pdata.DoubleSummary to pdata.Summary (#2774) +- Refactor `consumererror` package (#2768) + - Remove `PartialError` type in favor of signal-specific types + - Rename `CombineErrors()` to `Combine()` +- Refactor `componenthelper` package (#2778) + - Remove `ComponentSettings` and `DefaultComponentSettings()` + - Rename `NewComponent()` to `New()` +- obsReport.NewExporter accepts a settings struct (#2668) +- Remove ErrorWaitingHost from `componenttest` (#2582) +- Move `config.Load` to `configparser.Load` (#2796) +- Remove `configtest.NewViperFromYamlFile()`, use `config.Parser.NewParserFromFile()` (#2806) +- Remove `config.ViperSubExact()`, use `config.Parser.Sub()` (#2806) +- Update LoadReceiver signature to remove unused params (#2823) +- Move `configerror.ErrDataTypeIsNotSupported` to `componenterror.ErrDataTypeIsNotSupported` (#2886) +- Rename`CreateTraceExporter` type to `CreateTracesExporter` in `exporterhelper` (#2779) +- Move `fluentbit` extension to contrib (#2795) +- Move `configmodels` to `config` (#2808) +- Move `fluentforward` receiver to contrib (#2723) + +## 💡 Enhancements 💡 + +- `batch` processor: - Support max batch size for logs (#2736) +- Use `Endpoint` for health check extension (#2782) +- Use `confignet.TCPAddr` for `pprof` and `zpages` extensions (#2829) +- Deprecate `consumetest.New[${SIGNAL}]Nop` in favor of `consumetest.NewNop` (#2878) +- Deprecate `consumetest.New[${SIGNAL}]Err` in favor of `consumetest.NewErr` (#2878) +- Add watcher to values retrieved via config sources (#2803) +- Updates for cloud semantic conventions (#2809) + - `cloud.infrastructure_service` -> `cloud.platform` + - `cloud.zone` -> `cloud.availability_zone` +- Add systemd environment file for deb/rpm packages (#2822) +- Add validate interface in `configmodels` to force each component do configuration validation (#2802, #2856) +- Add `aws.ecs.task.revision` to semantic conventions list (#2816) +- Set unprivileged user to container image (#2838) +- Add New funcs for extension, exporter, processor config settings (#2872) +- Report metric about current size of the exporter retry queue (#2858) +- Allow adding new signals in `ProcessorFactory` by forcing everyone to embed `BaseProcessorFactory` (#2885) + +## 🧰 Bug fixes 🧰 + +- `pdata.TracesFromOtlpProtoBytes`: Fixes to handle backwards compatibility changes in proto (#2798) +- `jaeger` receiver: Escape user input used in output (#2815) +- `prometheus` exporter: Ensure same time is used for updated time (#2745) +- `prometheusremotewrite` exporter: Close HTTP response body (#2875) + +## v0.23.0 Beta + +## 🛑 Breaking changes 🛑 + +- Move fanout consumers to fanoutconsumer package (#2615) +- Rename ExporterObsReport to Exporter (#2658) +- Rename ProcessorObsReport to Processor (#2657) +- Remove ValidateConfig and add Validate on the Config struct (#2665) +- Rename pdata Size to OtlpProtoSize (#2726) +- Rename [Traces|Metrics|Logs]Consumer to [Traces|Metrics|Logs] (#2761) +- Remove public access for `componenttest.Example*` components: + - Users of these structs for testing configs should use the newly added `componenttest.Nop*` (update all components name in the config `example*` -> `nop` and use `componenttest.NopComponents()`). + - Users of these structs for sink like behavior should use `consumertest.*Sink`. + +## 💡 Enhancements 💡 + +- `hostmetrics` receiver: List labels along with respective metrics in metadata (#2662) +- `exporter` helper: Remove obsreport.ExporterContext, always add exporter name as a tag to the metrics (#2682) +- `jaeger` exporter: Change to not use internal data (#2698) +- `kafka` receiver: Change to not use internal data (#2697) +- `zipkin` receiver: Change to not use internal data (#2699) +- `kafka` exporter: Change to not use internal data (#2696) +- Ensure that extensions can be created and started multiple times (#2679) +- Use otlp request in logs wrapper, hide members in the wrapper (#2692) +- Add MetricsWrapper to dissallow access to internal representation (#2693) +- Add TracesWrapper to dissallow access to internal representation (#2721) +- Allow multiple OTLP receivers to be created (#2743) + +## 🧰 Bug fixes 🧰 + +- `prometheus` exporter: Fix to work with standard labels that follow the naming convention of using periods instead of underscores (#2707) +- Propagate name and transport for `prometheus` receiver and exporter (#2680) +- `zipkin` receiver: Ensure shutdown correctness (#2765) + +## v0.22.0 Beta + +## 🛑 Breaking changes 🛑 + +- Rename ServiceExtension to just Extension (#2581) +- Remove `consumerdata.TraceData` (#2551) +- Move `consumerdata.MetricsData` to `internaldata.MetricsData` (#2512) +- Remove custom OpenCensus sematic conventions that have equivalent in otel (#2552) +- Move ScrapeErrors and PartialScrapeError to `scrapererror` (#2580) +- Remove support for deprecated unmarshaler `CustomUnmarshaler`, only `Unmarshal` is supported (#2591) +- Remove deprecated componenterror.CombineErrors (#2598) +- Rename `pdata.TimestampUnixNanos` to `pdata.Timestamp` (#2549) + +## 💡 Enhancements 💡 + +- `prometheus` exporter: Re-implement on top of `github.com/prometheus/client_golang/prometheus` and add `metric_expiration` option +- `logging` exporter: Add support for AttributeMap (#2609) +- Add semantic conventions for instrumentation library (#2602) + +## 🧰 Bug fixes 🧰 + +- `otlp` receiver: Fix `Shutdown()` bug (#2564) +- `batch` processor: Fix Shutdown behavior (#2537) +- `logging` exporter: Fix handling the loop for empty attributes (#2610) +- `prometheusremotewrite` exporter: Fix counter name check (#2613) + +## v0.21.0 Beta + +## 🛑 Breaking changes 🛑 + +- Remove deprecated function `IsValid` from trace/span ID (#2522) +- Remove accessors for deprecated status code (#2521) + +## 💡 Enhancements 💡 + +- `otlphttp` exporter: Add `compression` option for gzip encoding of outgoing http requests (#2502) +- Add `ScrapeErrors` struct to `consumererror` to simplify errors usage (#2414) +- Add `cors_allowed_headers` option to `confighttp` (#2454) +- Add SASL/SCRAM authentication mechanism on `kafka` receiver and exporter (#2503) + +## 🧰 Bug fixes 🧰 + +- `otlp` receiver: Sets the correct deprecated status code before sending data to the pipeline (#2521) +- Fix `IsPermanent` to account for wrapped errors (#2455) +- `otlp` exporter: Preserve original error messages (#2459) + +## v0.20.0 Beta + +## 🛑 Breaking changes 🛑 + +- Rename `samplingprocessor/probabilisticsamplerprocessor` to `probabilisticsamplerprocessor` (#2392) + +## 💡 Enhancements 💡 + +- `hostmetrics` receiver: Refactor to use metrics metadata utilities (#2405, #2406, #2421) +- Add k8s.node semantic conventions (#2425) + +## v0.19.0 Beta + +## 🛑 Breaking changes 🛑 +- Remove deprecated `queued_retry` processor +- Remove deprecated configs from `resource` processor: `type` (set "opencensus.type" key in "attributes.upsert" map instead) and `labels` (use "attributes.upsert" instead). + +## 💡 Enhancements 💡 + +- `hostmetrics` receiver: Refactor load metrics to use generated metrics (#2375) +- Add uptime to the servicez debug page (#2385) +- Add new semantic conventions for AWS (#2365) + +## 🧰 Bug fixes 🧰 + +- `jaeger` exporter: Improve connection state logging (#2239) +- `pdatagen`: Fix slice of values generated code (#2403) +- `filterset` processor: Avoid returning always nil error in strict filterset (#2399) + +## v0.18.0 Beta + +## 🛑 Breaking changes 🛑 +- Rename host metrics according to metrics spec and rename `swap` scraper to `paging` (#2311) + +## 💡 Enhancements 💡 + +- Add check for `NO_WINDOWS_SERVICE` environment variable to force interactive mode on Windows (#2272) +- `hostmetrics` receiver: Add `disk/weighted_io_time` metric (Linux only) (#2312) +- `opencensus` exporter: Add queue-retry (#2307) +- `filter` processor: Filter metrics using resource attributes (#2251) + +## 🧰 Bug fixes 🧰 + +- `fluentforward` receiver: Fix string conversions (#2314) +- Fix zipkinv2 translation error tag handling (#2253) + +## v0.17.0 Beta + +## 💡 Enhancements 💡 + +- Default config environment variable expansion (#2231) +- `prometheusremotewrite` exporter: Add batched exports (#2249) +- `memorylimiter` processor: Introduce soft and hard limits (#2250) + +## 🧰 Bug fixes 🧰 + +- Fix nits in pdata usage (#2235) +- Convert status to not be a pointer in the Span (#2242) +- Report the error from `pprof.StartCPUProfile` (#2263) +- Rename `service.Application.SignalTestComplete` to `Shutdown` (#2277) + +## v0.16.0 Beta + +## 🛑 Breaking changes 🛑 + +- Rename Push functions to be consistent across signals in `exporterhelper` (#2203) + +## 💡 Enhancements 💡 + +- Change default OTLP/gRPC port number to 4317, also continue receiving on legacy port + 55680 during transition period (#2104). +- `kafka` exporter: Add support for exporting metrics as otlp Protobuf. (#1966) +- Move scraper helpers to its own `scraperhelper` package (#2185) +- Add `componenthelper` package to help build components (#2186) +- Remove usage of custom init/stop in `scraper` and use start/shutdown from `component` (#2193) +- Add more trace annotations, so zpages are more useful to determine failures (#2206) +- Add support to skip TLS verification (#2202) +- Expose non-nullable metric types (#2208) +- Expose non-nullable elements from slices of pointers (#2200) + +## 🧰 Bug fixes 🧰 + +- Change InstrumentationLibrary to be non-nullable (#2196) +- Add support for slices to non-pointers, use non-nullable AnyValue (#2192) +- Fix `--set` flag to work with `{}` in configs (#2162) + +## v0.15.0 Beta + +## 🛑 Breaking changes 🛑 + +- Remove legacy metrics, they were marked as legacy for ~12 months #2105 + +## 💡 Enhancements 💡 + +- Implement conversion between OpenCensus and OpenTelemetry Summary Metric (#2048) +- Add ability to generate non nullable messages (#2005) +- Implement Summary Metric in Prometheus RemoteWrite Exporter (#2083) +- Add `resource_to_telemetry_conversion` to exporter helper expose exporter settings (#2060) +- Add `CustomRoundTripper` function to httpclientconfig (#2085) +- Allow for more logging options to be passed to `service` (#2132) +- Add config parameters for `jaeger` receiver (#2068) +- Map unset status code for `jaegar` translator as per spec (#2134) +- Add more trace annotations to the queue-retry logic (#2136) +- Add config settings for component telemetry (#2148) +- Use net.SplitHostPort for IPv6 support in `prometheus` receiver (#2154) +- Add --log-format command line option (default to "console") #2177. + +## 🧰 Bug fixes 🧰 + +- `logging` exporter: Add Logging for Summary Datapoint (#2084) +- `hostmetrics` receiver: use correct TCP state labels on Unix systems (#2087) +- Fix otlp_log receiver wrong use of trace measurement (#2117) +- Fix "process/memory/rss" metric units (#2112) +- Fix "process/cpu_seconds" metrics (#2113) +- Add check for nil logger in exporterhelper functions (#2141) +- `prometheus` receiver: + - Upgrade Prometheus version to fix race condition (#2121) + - Fix the scraper/discover manager coordination (#2089) + - Fix panic when adjusting buckets (#2168) + +## v0.14.0 Beta + +## 🚀 New components 🚀 + +- `otlphttp` exporter which implements OTLP over HTTP protocol. + +## 🛑 Breaking changes 🛑 + +- Rename consumer.TraceConsumer to consumer.TracesConsumer #1974 +- Rename component.TraceReceiver to component.TracesReceiver #1975 +- Rename component.TraceProcessor to component.TracesProcessor #1976 +- Rename component.TraceExporter to component.TracesExporter #1975 +- Deprecate NopExporter, add NopConsumer (#1972) +- Deprecate SinkExporter, add SinkConsumer (#1973) +- Move `tailsampling` processor to contrib (#2012) +- Remove NewAttributeValueSlice (#2028) and mark NewAttributeValue as deprecated (#2022) +- Remove pdata.StringValue (#2021) +- Remove pdata.InitFromAttributeMap, use CopyTo if needed (#2042) +- Remove SetMapVal and SetArrayVal for pdata.AttributeValue (#2039) + +## 💡 Enhancements 💡 + +- `zipkin` exporter: Add queue retry to zipkin (#1971) +- `prometheus` exporter: Add `send_timestamps` option (#1951) +- `filter` processor: Add `expr` pdata.Metric filtering support (#1940, #1996) +- `attribute` processor: Add log support (#1934) +- `logging` exporter: Add index for histogram buckets count (#2009) +- `otlphttp` exporter: Add correct handling of server error responses (#2016) +- `prometheusremotewrite` exporter: + - Add user agent header to outgoing http request (#2000) + - Convert histograms to cumulative (#2049) + - Return permanent errors (#2053) + - Add external labels (#2044) +- `hostmetrics` receiver: Use scraper controller (#1949) +- Change Span/Trace ID to be byte array (#2001) +- Add `simple` metrics helper to facilitate building pdata.Metrics in receivers (#1540) +- Improve diagnostic logging for exporters (#2020) +- Add obsreport to receiverhelper scrapers (#1961) +- Update OTLP to 0.6.0 and use the new Span Status code (#2031) +- Add support of partial requests for logs and metrics to the exporterhelper (#2059) + +## 🧰 Bug fixes 🧰 + +- `logging` exporter: Added array serialization (#1994) +- `zipkin` receiver: Allow receiver to parse string tags (#1893) +- `batch` processor: Fix shutdown race (#1967) +- Guard for nil data points (#2055) + +## v0.13.0 Beta + +## 🛑 Breaking changes 🛑 + +- Host metric `system.disk.time` renamed to `system.disk.operation_time` (#1887) +- Use consumer for sender interface, remove unnecessary receiver address from Runner (#1941) +- Enable sending queue by default in all exporters configured to use it (#1924) +- Removed `groupbytraceprocessor` (#1891) +- Remove ability to configure collection interval per scraper (#1947) + +## 💡 Enhancements 💡 + +- Host Metrics receiver now reports both `system.disk.io_time` and `system.disk.operation_time` (#1887) +- Match spans against the instrumentation library and resource attributes (#928) +- Add `receiverhelper` for creating flexible "scraper" metrics receiver (#1886, #1890, #1945, #1946) +- Migrate `tailsampling` processor to new OTLP-based internal data model and add Composite Sampler (#1894) +- Metadata Generator: Change Metrics fields to implement an interface with new methods (#1912) +- Add unmarshalling for `pdata.Traces` (#1948) +- Add debug-level message on error for `jaeger` exporter (#1964) + +## 🧰 Bug fixes 🧰 + +- Fix bug where the service does not correctly start/stop the log exporters (#1943) +- Fix Queued Retry Unusable without Batch Processor (#1813) - (#1930) +- `prometheus` receiver: Log error message when `process_start_time_seconds` gauge is missing (#1921) +- Fix trace jaeger conversion to internal traces zero time bug (#1957) +- Fix panic in otlp traces to zipkin (#1963) +- Fix OTLP/HTTP receiver's path to be /v1/traces (#1979) + +## v0.12.0 Beta + +## 🚀 New components 🚀 + +- `configauth` package with the auth settings that can be used by receivers (#1807, #1808, #1809, #1810) +- `perfcounters` package that uses perflib for host metrics receiver (#1835, #1836, #1868, #1869, #1870) + +## 💡 Enhancements 💡 + +- Remove `queued_retry` and enable `otlp` metrics receiver in default config (#1823, #1838) +- Add `limit_percentage` and `spike_limit_percentage` options to `memorylimiter` processor (#1622) +- `hostmetrics` receiver: + - Collect additional labels from partitions in the filesystems scraper (#1858) + - Add filters for mount point and filesystem type (#1866) +- Add cloud.provider semantic conventions (#1865) +- `attribute` processor: Add log support (#1783) +- Deprecate OpenCensus-based internal data structures (#1843) +- Introduce SpanID data type, not yet used in Protobuf messages ($1854, #1855) +- Enable `otlp` trace by default in the released docker image (#1883) +- `tailsampling` processor: Combine batches of spans into a single batch (#1864) +- `filter` processor: Update to use pdata (#1885) +- Allow MSI upgrades (#1914) + +## 🧰 Bug fixes 🧰 + +- `prometheus` receiver: Print a more informative message about 'up' metric value (#1826) +- Use custom data type and custom JSON serialization for traceid (#1840) +- Skip creation of redundant nil resource in translation from OC if there are no combined metrics (#1803) +- `tailsampling` processor: Only send to next consumer once (#1735) +- Report Windows pagefile usage in bytes (#1837) +- Fix issue where Prometheus SD config cannot be parsed (#1877) + +## v0.11.0 Beta + +## 🛑 Breaking changes 🛑 + +- Rename service.Start() to Run() since it's a blocking call +- Fix slice Append to accept by value the element in pdata +- Change CreateTraceProcessor and CreateMetricsProcessor to use the same parameter order as receivers/logs processor and exporters. +- Prevent accidental use of LogsToOtlp and LogsFromOtlp and the OTLP data structs (#1703) +- Remove SetType from configmodels, ensure all registered factories set the type in config (#1798) +- Move process telemetry to service/internal (#1794) + +## 💡 Enhancements 💡 + +- Add map and array attribute value type support (#1656) +- Add authentication support to kafka (#1632) +- Implement InstrumentationLibrary translation to jaeger (#1645) +- Add public functions to export pdata to ExportXServicesRequest Protobuf bytes (#1741) +- Expose telemetry level in the configtelemetry (#1796) +- Add configauth package (#1807) +- Add config to docker image (#1792) + +## 🧰 Bug fixes 🧰 + +- Use zap int argument for int values instead of conversion (#1779) +- Add support for gzip encoded payload in OTLP/HTTP receiver (#1581) +- Return proto status for OTLP receiver when failed (#1788) + +## v0.10.0 Beta + +## 🛑 Breaking changes 🛑 + +- **Update OTLP to v0.5.0, incompatible metrics protocol.** +- Remove support for propagating summary metrics in OtelCollector. + - This is a temporary change, and will affect mostly OpenCensus users who use metrics. + +## 💡 Enhancements 💡 +- Support zipkin proto in `kafka` receiver (#1646) +- Prometheus Remote Write Exporter supporting Cortex (#1577, #1643) +- Add deployment environment semantic convention (#1722) +- Add logs support to `batch` and `resource` processors (#1723, #1729) + +## 🧰 Bug fixes 🧰 +- Identify config error when expected map is other value type (#1641) +- Fix Kafka receiver closing ready channel multiple times (#1696) +- Fix a panic issue while processing Zipkin spans with an empty service name (#1742) +- Zipkin Receiver: Always set the endtime (#1750) + +## v0.9.0 Beta + +## 🛑 Breaking changes 🛑 + +- **Remove old base factories**: + - `ReceiverFactoryBase` (#1583) + - `ProcessorFactoryBase` (#1596) + - `ExporterFactoryBase` (#1630) +- Remove logs factories and merge with normal factories (#1569) +- Remove `reconnection_delay` from OpenCensus exporter (#1516) +- Remove `ConsumerOld` interfaces (#1631) + +## 🚀 New components 🚀 +- `prometheusremotewrite` exporter: Send metrics data in Prometheus TimeSeries format to Cortex or any Prometheus (#1544) +- `kafka` receiver: Receive traces from Kafka (#1410) + +## 💡 Enhancements 💡 +- `kafka` exporter: Enable queueing, retry, timeout (#1455) +- Add `Headers` field in HTTPClientSettings (#1552) +- Change OpenCensus receiver (#1556) and exporter (#1571) to the new interfaces +- Add semantic attribute for `telemetry.auto.version` (#1578) +- Add uptime and RSS memory self-observability metrics (#1549) +- Support conversion for OpenCensus `SameProcessAsParentSpan` (#1629) +- Access application version in components (#1559) +- Make Kafka payload encoding configurable (#1584) + +## 🧰 Bug fixes 🧰 +- Stop further processing if `filterprocessor` filters all data (#1500) +- `processscraper`: Use same scrape time for all data points coming from same process (#1539) +- Ensure that time conversion for 0 returns nil timestamps or Time where IsZero returns true (#1550) +- Fix multiple exporters panic (#1563) +- Allow `attribute` processor for external use (#1574) +- Do not duplicate filesystem metrics for devices with many mount points (#1617) + +## v0.8.0 Beta + +## 🚀 New components 🚀 + +- `groupbytrace` processor that waits for a trace to be completed (#1362) + +## 💡 Enhancements 💡 + +- Migrate `zipkin` receiver/exporter to the new interfaces (#1484) +- Migrate `prometheus` receiver/exporter to the new interfaces (#1477, #1515) +- Add new FactoryUnmarshaler support to all components, deprecate old way (#1468) +- Update `fileexporter` to write data in OTLP (#1488) +- Add extension factory helper (#1485) +- Host scrapers: Use same scrape time for all data points coming from same source (#1473) +- Make logs SeverityNumber publicly available (#1496) +- Add recently included conventions for k8s and container resources (#1519) +- Add new config StartTimeMetricRegex to `prometheus` receiver (#1511) +- Convert Zipkin receiver and exporter to use OTLP (#1446) + +## 🧰 Bug fixes 🧰 + +- Infer OpenCensus resource type based on OpenTelemetry's semantic conventions (#1462) +- Fix log adapter in `prometheus` receiver (#1493) +- Avoid frequent errors for process telemetry on Windows (#1487) + +## v0.7.0 Beta + +## 🚀 New components 🚀 + +- Receivers + - `fluentfoward` runs a TCP server that accepts events via the [Fluent Forward protocol](https://github.com/fluent/fluentd/wiki/Forward-Protocol-Specification-v1) (#1173) +- Exporters + - `kafka` exports traces to Kafka (#1439) +- Extensions + - **Experimental** `fluentbit` facilitates running a FluentBit subprocess of the collector (#1381) + +## 💡 Enhancements 💡 + +- Updated `golang/protobuf` from v1.3.5 to v1.4.2 (#1308) +- Updated `opencensus-proto` from v0.2.1 to v0.3.0 (#1308) +- Added round_robin `balancer_name` as an option to gRPC client settings (#1353) +- `hostmetrics` receiver + - Switch to using perf counters to get disk io metrics on Windows (#1340) + - Add device filter for file system (#1379) and disk (#1378) scrapers + - Record process physical & virtual memory stats separately (#1403) + - Scrape system.disk.time on Windows (#1408) + - Add disk.pending_operations metric (#1428) + - Add network interface label to network metrics (#1377) +- Add `exporterhelper` (#1351) and `processorhelper` (#1359) factories +- Update OTLP to latest version (#1384) +- Disable timeout, retry on failure and sending queue for `logging` exporter (#1400) +- Add support for retry and sending queue for `jaeger` exporter (#1401) +- Add batch size bytes metric to `batch` processor (#1270) +- `otlp` receiver: Add Log Support (#1444) +- Allow to configure read/write buffer sizes for http Client (#1447) +- Update DB conventions to latest and add exception conventions (#1452) + +## 🧰 Bug fixes 🧰 + +- Fix `resource` processor for old metrics (#1412) +- `jaeger` receiver: Do not try to stop if failed to start. Collector service will do that (#1434) + +## v0.6.0 Beta + +## 🛑 Breaking changes 🛑 + +- Renamed the metrics generated by `hostmetrics` receiver to match the (currently still pending) OpenTelemetry system metric conventions (#1261) (#1269) +- Removed `vmmetrics` receiver (#1282) +- Removed `cpu` scraper `report_per_cpu` config option (#1326) + +## 💡 Enhancements 💡 + +- Added disk merged (#1267) and process count (#1268) metrics to `hostmetrics` +- Log metric data points in `logging` exporter (#1258) +- Changed the `batch` processor to not ignore the errors returned by the exporters (#1259) +- Build and publish MSI (#1153) and DEB/RPM packages (#1278, #1335) +- Added batch size metric to `batch` processor (#1241) +- Added log support for `memorylimiter` processor (#1291) and `logging` exporter (#1298) +- Always add tags for `observability`, other metrics may use them (#1312) +- Added metrics support (#1313) and allow partial retries in `queued_retry` processor (#1297) +- Update `resource` processor: introduce `attributes` config parameter to specify actions on attributes similar to `attributes` processor, old config interface is deprecated (#1315) +- Update memory state labels for non-Linux OSs (#1325) +- Ensure tcp connection value is provided for all states, even when count is 0 (#1329) +- Set `batch` processor channel size to num cpus (#1330) +- Add `send_batch_max_size` config parameter to `batch` processor enforcing hard limit on batch size (#1310) +- Add support for including a per-RPC authentication to gRPC settings (#1250) + +## 🧰 Bug fixes 🧰 + +- Fixed OTLP waitForReady, not set from config (#1254) +- Fixed all translation diffs between OTLP and Jaeger (#1222) +- Disabled `process` scraper for any non Linux/Windows OS (#1328) + +## v0.5.0 Beta + +## 🛑 Breaking changes 🛑 + +- **Update OTLP to v0.4.0 (#1142)**: Collector will be incompatible with any other sender or receiver of OTLP protocol +of different versions +- Make "--new-metrics" command line flag the default (#1148) +- Change `endpoint` to `url` in Zipkin exporter config (#1186) +- Change `tls_credentials` to `tls_settings` in Jaegar receiver config (#1233) +- OTLP receiver config change for `protocols` to support mTLS (#1223) +- Remove `export_resource_labels` flag from Zipkin exporter (#1163) + +## 🚀 New components 🚀 + +- Receivers + - Added process scraper to the `hostmetrics` receiver (#1047) + +## 💡 Enhancements 💡 + +- otlpexporter: send configured headers in request (#1130) +- Enable Collector to be run as a Windows service (#1120) +- Add config for HttpServer (#1196) +- Allow cors in HTTPServerSettings (#1211) +- Add a generic grpc server settings config, cleanup client config (#1183) +- Rely on gRPC to batch and loadbalance between connections instead of custom logic (#1212) +- Allow to tune the read/write buffers for gRPC clients (#1213) +- Allow to tune the read/write buffers for gRPC server (#1218) + +## 🧰 Bug fixes 🧰 + +- Handle overlapping metrics from different jobs in prometheus exporter (#1096) +- Fix handling of SpanKind INTERNAL in OTLP OC translation (#1143) +- Unify zipkin v1 and v2 annotation/tag parsing logic (#1002) +- mTLS: Add support to configure client CA and enforce ClientAuth (#1185) +- Fixed untyped Prometheus receiver bug (#1194) +- Do not embed ProtocolServerSettings in gRPC (#1210) +- Add Context to the missing CreateMetricsReceiver method (#1216) + +## v0.4.0 Beta + +Released 2020-06-16 + +## 🛑 Breaking changes 🛑 + +- `isEnabled` configuration option removed (#909) +- `thrift_tchannel` protocol moved from `jaeger` receiver to `jaeger_legacy` in contrib (#636) + +## ⚠️ Major changes ⚠️ + +- Switch from `localhost` to `0.0.0.0` by default for all receivers (#1006) +- Internal API Changes (only impacts contributors) + - Add context to `Start` and `Stop` methods in the component (#790) + - Rename `AttributeValue` and `AttributeMap` method names (#781) +(other breaking changes in the internal trace data types) + - Change entire repo to use the new vanityurl go.opentelemetry.io/collector (#977) + +## 🚀 New components 🚀 + +- Receivers + - `hostmetrics` receiver with CPU (#862), disk (#921), load (#974), filesystem (#926), memory (#911), network (#930), and virtual memory (#989) support +- Processors + - `batch` for batching received metrics (#1060) + - `filter` for filtering (dropping) received metrics (#1001) + +## 💡 Enhancements 💡 + +- `otlp` receiver implement HTTP X-Protobuf (#1021) +- Exporters: Support mTLS in gRPC exporters (#927) +- Extensions: Add `zpages` for service (servicez, pipelinez, extensions) (#894) + +## 🧰 Bug fixes 🧰 + +- Add missing logging for metrics at `debug` level (#1108) +- Fix setting internal status code in `jaeger` receivers (#1105) +- `zipkin` export fails on span without timestamp when used with `queued_retry` (#1068) +- Fix `zipkin` receiver status code conversion (#996) +- Remove extra send/receive annotations with using `zipkin` v1 (#960) +- Fix resource attribute mutation bug when exporting in `jaeger` proto (#907) +- Fix metric/spans count, add tests for nil entries in the slices (#787) + + +## 🧩 Components 🧩 + +### Traces + +| Receivers | Processors | Exporters | +|:----------:|:-----------:|:----------:| +| Jaeger | Attributes | File | +| OpenCensus | Batch | Jaeger | +| OTLP | Memory Limiter | Logging | +| Zipkin | Queued Retry | OpenCensus | +| | Resource | OTLP | +| | Sampling | Zipkin | +| | Span || + +### Metrics + +| Receivers | Processors | Exporters | +|:----------:|:-----------:|:----------:| +| HostMetrics | Batch | File | +| OpenCensus | Filter | Logging | +| OTLP | Memory Limiter | OpenCensus | +| Prometheus || OTLP | +| VM Metrics || Prometheus | + +### Extensions + +- Health Check +- Performance Profiler +- zPages + + +## v0.3.0 Beta + +Released 2020-03-30 + +### Breaking changes + +- Make prometheus receiver config loading strict. #697 +Prometheus receiver will now fail fast if the config contains unused keys in it. + +### Changes and fixes + +- Enable best effort serve by default of Prometheus Exporter (https://github.com/orijtech/prometheus-go-metrics-exporter/pull/6) +- Fix null pointer exception in the logging exporter #743 +- Remove unnecessary condition to have at least one processor #744 + +### Components + +| Receivers / Exporters | Processors | Extensions | +|:---------------------:|:-----------:|:-----------:| +| Jaeger | Attributes | Health Check | +| OpenCensus | Batch | Performance Profiler | +| OpenTelemetry | Memory Limiter | zPages | +| Zipkin | Queued Retry | | +| | Resource | | +| | Sampling | | +| | Span | | + + +## v0.2.8 Alpha + +Alpha v0.2.8 of OpenTelemetry Collector + +- Implemented OTLP receiver and exporter. +- Added ability to pass config to the service programmatically (useful for custom builds). +- Improved own metrics / observability. +- Refactored component and factory interface definitions (breaking change #683) + + +## v0.2.7 Alpha + +Alpha v0.2.7 of OpenTelemetry Collector + +- Improved error handling on shutdown +- Partial implementation of new metrics (new obsreport package) +- Include resource labels for Zipkin exporter +- New `HASH` action to attribute processor + + + +## v0.2.6 Alpha + +Alpha v0.2.6 of OpenTelemetry Collector. +- Update metrics prefix to `otelcol` and expose command line argument to modify the prefix value. +- Extend Span processor to have include/exclude span logic. +- Batch dropped span now emits zero when no spans are dropped. + + +## v0.2.5 Alpha + +Alpha v0.2.5 of OpenTelemetry Collector. + +- Regexp-based filtering of spans based on service names. +- Ability to choose strict or regexp matching for include/exclude filters. + + +## v0.2.4 Alpha + +Alpha v0.2.4 of OpenTelemetry Collector. + +- Regexp-based filtering of span names. +- Ability to extract attributes from span names and rename span. +- File exporter for debugging. +- Span processor is now enabled by default. + + +## v0.2.3 Alpha + +Alpha v0.2.3 of OpenTelemetry Collector. + +Changes: +21a70d6 Add a memory limiter processor (#498) +9778b16 Refactor Jaeger Receiver config (#490) +ec4ad0c Remove workers from OpenCensus receiver implementation (#497) +4e01fa3 Update k8s config to use opentelemetry docker image and configuration (#459) + + +## v0.2.2 Alpha + +Alpha v0.2.2 of OpenTelemetry Collector. + +Main changes visible to users since previous release: + +- Improved Testbed and added more E2E tests. +- Made component interfaces more uniform (this is a breaking change). + +Note: v0.2.1 never existed and is skipped since it was tainted in some dependencies. + + +## v0.2.0 Alpha + +Alpha v0.2 of OpenTelemetry Collector. + +Docker image: omnition/opentelemetry-collector:v0.2.0 (we are working on getting this under an OpenTelemetry org) + +Main changes visible to users since previous release: + +* Rename from `service` to `collector`, the binary is now named `otelcol` + +* Configuration reorganized and using strict mode + +* Concurrency issues for pipelines transforming data addressed + +Commits: + +```terminal +0e505d5 Refactor config: pipelines now under service (#376) +402b80c Add Capabilities to Processor and use for Fanout cloning decision (#374) +b27d824 Use strict mode to read config (#375) +d769eb5 Fix concurrency handling when data is fanned out (#367) +dc6b290 Rename all github paths from opentelemtry-service to opentelemetry-collector (#371) +d038801 Rename otelsvc to otelcol (#365) +c264e0e Add Include/Exclude logic for Attributes Processor (#363) +8ce427a Pin a commit for Prometheus dependency in go.mod (#364) +2393774 Bump Jaeger version to 1.14.0 (latest) (#349) +63362d5 Update testbed modules (#360) +c0e2a27 Change dashes to underscores to separate words in config files (#357) +7609eaa Rename OpenTelemetry Service to Collector in docs and comments (#354) +bc5b299 Add common gRPC configuration settings (#340) +b38505c Remove network access popups on macos (#348) +f7727d1 Fixed loop variable pointer bug in jaeger translator (#341) +958beed Ensure that ConsumeMetricsData() is not passed empty metrics in the Prometheus receiver (#345) +0be295f Change log statement in Prometheus receiver from info to debug. (#344) +d205393 Add Owais to codeowners (#339) +8fa6afe Translate OC resource labels to Jaeger process tags (#325) +``` + + +## v0.0.2 Alpha + +Alpha release of OpenTelemetry Service. + +Docker image: omnition/opentelemetry-service:v0.0.2 (we are working on getting this under an OpenTelemetry org) + +Main changes visible to users since previous release: + +```terminal +8fa6afe Translate OC resource labels to Jaeger process tags (#325) +047b0f3 Allow environment variables in config (#334) +96c24a3 Add exclude/include spans option to attributes processor (#311) +4db0414 Allow metric processors to be specified in pipelines (#332) +c277569 Add observability instrumentation for Prometheus receiver (#327) +f47aa79 Add common configuration for receiver tls (#288) +a493765 Refactor extensions to new config format (#310) +41a7afa Add Span Processor logic +97a71b3 Use full name for the metrics and spans created for observability (#316) +fed4ed2 Add support to record metrics for metricsexporter (#315) +5edca32 Add include_filter configuration to prometheus receiver (#298) +0068d0a Passthrough CORS allowed origins (#260) +``` + + +## v0.0.1 Alpha + +This is the first alpha release of OpenTelemetry Service. + +Docker image: omnition/opentelemetry-service:v0.0.1 + + +[v0.3.0]: https://github.com/open-telemetry/opentelemetry-collector/compare/v0.2.10...v0.3.0 +[v0.2.10]: https://github.com/open-telemetry/opentelemetry-collector/compare/v0.2.8...v0.2.10 +[v0.2.8]: https://github.com/open-telemetry/opentelemetry-collector/compare/v0.2.7...v0.2.8 +[v0.2.7]: https://github.com/open-telemetry/opentelemetry-collector/compare/v0.2.6...v0.2.7 +[v0.2.6]: https://github.com/open-telemetry/opentelemetry-collector/compare/v0.2.5...v0.2.6 +[v0.2.5]: https://github.com/open-telemetry/opentelemetry-collector/compare/v0.2.4...v0.2.5 +[v0.2.4]: https://github.com/open-telemetry/opentelemetry-collector/compare/v0.2.3...v0.2.4 +[v0.2.3]: https://github.com/open-telemetry/opentelemetry-collector/compare/v0.2.2...v0.2.3 +[v0.2.2]: https://github.com/open-telemetry/opentelemetry-collector/compare/v0.2.0...v0.2.2 +[v0.2.0]: https://github.com/open-telemetry/opentelemetry-collector/compare/v0.0.2...v0.2.0 +[v0.0.2]: https://github.com/open-telemetry/opentelemetry-collector/compare/v0.0.1...v0.0.2 +[v0.0.1]: https://github.com/open-telemetry/opentelemetry-collector/tree/v0.0.1 diff --git a/internal/otel_collector/CONTRIBUTING.md b/internal/otel_collector/CONTRIBUTING.md new file mode 100644 index 00000000000..d280771364e --- /dev/null +++ b/internal/otel_collector/CONTRIBUTING.md @@ -0,0 +1,311 @@ +# Contributing Guide + +We'd love your help! Please join our weekly [SIG +meeting](https://github.com/open-telemetry/community#special-interest-groups). + +## How to structure PRs to get expedient reviews? + +We recommend that any PR (unless it is trivial) to be smaller than 500 lines +(excluding go mod/sum changes) in order to help reviewers to do a thorough and +reasonably fast reviews. + +### When adding a new component + +Consider submitting different PRs for (more details about adding new components +[here](#adding-new-components)) : + +* First PR should include the overall structure of the new component: + * Readme, configuration, and factory implementation usually using the helper + factory structs. + * This PR is usually trivial to review, so the size limit does not apply to + it. +* Second PR should include the concrete implementation of the component. If the + size of this PR is larger than the recommended size consider splitting it in + multiple PRs. +* Last PR should enable the new component and add it to the `otelcontribcol` + binary by updating the `components.go` file. The component must be enabled + only after sufficient testing, and there is enough confidence in the + stability and quality of the component. +* Once a new component has been added to the executable, please add the component + to the [OpenTelemetry.io registry](https://github.com/open-telemetry/opentelemetry.io#adding-a-project-to-the-opentelemetry-registry). + +### Refactoring Work + +Any refactoring work must be split in its own PR that does not include any +behavior changes. It is important to do this to avoid hidden changes in large +and trivial refactoring PRs. + +## Report a bug or requesting feature + +Reporting bugs is an important contribution. Please make sure to include: + +* Expected and actual behavior +* OpenTelemetry version you are running +* If possible, steps to reproduce + +## How to contribute + +### Before you start + +Please read project contribution +[guide](https://github.com/open-telemetry/community/blob/main/CONTRIBUTING.md) +for general practices for OpenTelemetry project. + +Select a good issue from the links below (ordered by difficulty/complexity): + +* [Good First Issue](https://github.com/open-telemetry/opentelemetry-collector/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) +* [Up for Grabs](https://github.com/open-telemetry/opentelemetry-collector/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3Aup-for-grabs+) +* [Help Wanted](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) + +Comment on the issue that you want to work on so we can assign it to you and +clarify anything related to it. + +If you would like to work on something that is not listed as an issue +(e.g. a new feature or enhancement) please first read our [vision](docs/vision.md) and +[roadmap](docs/roadmap.md) to make sure your proposal aligns with the goals of the +Collector, then create an issue and describe your proposal. It is best to do this +in advance so that maintainers can decide if the proposal is a good fit for +this repository. This will help avoid situations when you spend significant time +on something that maintainers may decide this repo is not the right place for. + +Follow the instructions below to create your PR. + +### Fork + +In the interest of keeping this repository clean and manageable, you should +work from a fork. To create a fork, click the 'Fork' button at the top of the +repository, then clone the fork locally using `git clone +git@github.com:USERNAME/opentelemetry-collector.git`. + +You should also add this repository as an "upstream" repo to your local copy, +in order to keep it up to date. You can add this as a remote like so: + +`git remote add upstream https://github.com/open-telemetry/opentelemetry-collector.git` + +Verify that the upstream exists: + +`git remote -v` + +To update your fork, fetch the upstream repo's branches and commits, then merge +your `main` with upstream's `main`: + +``` +git fetch upstream +git checkout main +git merge upstream/main +``` + +Remember to always work in a branch of your local copy, as you might otherwise +have to contend with conflicts in `main`. + +Please also see [GitHub +workflow](https://github.com/open-telemetry/community/blob/main/CONTRIBUTING.md#github-workflow) +section of general project contributing guide. + +## Required Tools + +Working with the project sources requires the following tools: + +1. [git](https://git-scm.com/) +2. [go](https://golang.org/) (version 1.17 and up) +3. [make](https://www.gnu.org/software/make/) +4. [docker](https://www.docker.com/) + +## Repository Setup + +Fork the repo, checkout the upstream repo to your GOPATH by: + +``` +$ git clone git@github.com:open-telemetry/opentelemetry-collector.git +``` + +Add your fork as an origin: + +```shell +$ cd opentelemetry-collector +$ git remote add fork git@github.com:YOUR_GITHUB_USERNAME/opentelemetry-collector.git +``` + +Run tests, fmt and lint: + +```shell +$ make install-tools # Only first time. +$ make +``` + +*Note:* the default build target requires tools that are installed at `$(go env +GOPATH)/bin`, ensure that `$(go env GOPATH)/bin` is included in your `PATH`. + +## Creating a PR + +Checkout a new branch, make modifications, build locally, and push the branch to your fork +to open a new PR: + +```shell +$ git checkout -b feature +# edit +$ make +$ make fmt +$ git commit +$ git push fork feature +``` + +## General Notes + +This project uses Go 1.17.* and CircleCI. + +CircleCI uses the Makefile with the `ci` target, it is recommended to +run it before submitting your PR. It runs `gofmt -s` (simplify) and `golint`. + +The dependencies are managed with `go mod` if you work with the sources under your +`$GOPATH` you need to set the environment variable `GO111MODULE=on`. + +## Coding Guidelines + +Although OpenTelemetry project as a whole is still in Alpha stage we consider +OpenTelemetry Collector to be close to production quality and the quality bar +for contributions is set accordingly. Contributions must have readable code written +with maintainability in mind (if in doubt check [Effective Go](https://golang.org/doc/effective_go.html) +for coding advice). The code must adhere to the following robustness principles that +are important for software that runs autonomously and continuously without direct +interaction with a human (such as this Collector). + +### Startup Error Handling + +Verify configuration during startup and fail fast if the configuration is invalid. +This will bring the attention of a human to the problem as it is more typical for humans +to notice problems when the process is starting as opposed to problems that may arise +sometime (potentially long time) after process startup. Monitoring systems are likely +to automatically flag processes that exit with failure during startup, making it +easier to notice the problem. The Collector should print a reasonable log message to +explain the problem and exit with a non-zero code. It is acceptable to crash the process +during startup if there is no good way to exit cleanly but do your best to log and +exit cleanly with a process exit code. + +### Propagate Errors to the Caller + +Do not crash or exit outside the `main()` function, e.g. via `log.Fatal` or `os.Exit`, +even during startup. Instead, return detailed errors to be handled appropriately +by the caller. The code in packages other than `main` may be imported and used by +third-party applications, and they should have full control over error handling +and process termination. + +### Do not Crash after Startup + +Do not crash or exit the Collector process after the startup sequence is finished. +A running Collector typically contains data that is received but not yet exported further +(e.g. is stored in the queues and other processors). Crashing or exiting the Collector +process will result in losing this data since typically the receiver has +already acknowledged the receipt for this data and the senders of the data will +not send that data again. + +### Bad Input Handling + +Do not crash on bad input in receivers or elsewhere in the pipeline. +[Crash-only software](https://en.wikipedia.org/wiki/Crash-only_software) +is valid in certain cases; however, this is not a correct approach for Collector (except +during startup, see above). The reason is that many senders from which Collector +receives data have built-in automatic retries of the _same_ data if no +acknowledgment is received from the Collector. If you crash on bad input +chances are high that after the Collector is restarted it will see the same +data in the input and will crash again. This will likely result in infinite +crashing loop if you have automatic retries in place. + +Typically bad input when detected in a receiver should be reported back to the +sender. If it is elsewhere in the pipeline it may be too late to send a response +to the sender (particularly in processors which are not synchronously processing +data). In either case it is recommended to keep a metric that counts bad input data. + +### Error Handling and Retries + +Be rigorous in error handling. Don't ignore errors. Think carefully about each +error and decide if it is a fatal problem or a transient problem that may go away +when retried. Fatal errors should be logged or recorded in an internal metric to +provide visibility to users of the Collector. For transient errors come up with a +retrying strategy and implement it. Typically you will +want to implement retries with some sort of exponential back-off strategy. For +connection or sending retries use jitter for back-off intervals to avoid overwhelming +your destination when network is restored or the destination is recovered. +[Exponential Backoff](https://github.com/cenkalti/backoff) is a good library that +provides all this functionality. + +### Logging + +Log your component startup and shutdown, including successful outcomes (but don't +overdo it, keep the number of success message to a minimum). +This can help to understand the context of failures if they occur elsewhere after +your code is successfully executed. + +Use logging carefully for events that can happen frequently to avoid flooding +the logs. Avoid outputting logs per a received or processed data item since this can +amount to very large number of log entries (Collector is designed to process +many thousands of spans and metrics per second). For such high-frequency events +instead of logging consider adding an internal metric and increment it when +the event happens. + +Make log message human readable and also include data that is needed for easier +understanding of what happened and in what context. + +### Observability + +Out of the box, your users should be able to observe the state of your component. +The collector exposes an OpenMetrics endpoint at `http://localhost:8888/metrics` +where your data will land. + +When using the regular helpers, you should have some metrics added around key +events automatically. For instance, exporters should have `otelcol_exporter_sent_spans` +tracked without your exporter doing anything. + +### Resource Usage + +Limit usage of CPU, RAM or other resources that the code can use. Do not write code +that consumes resources in an uncontrolled manner. For example if you have a queue +that can contain unprocessed messages always limit the size of the queue unless you +have other ways to guarantee that the queue will be consumed faster than items are +added to it. + +Performance test the code for both normal use-cases under acceptable load and also for +abnormal use-cases when the load exceeds acceptable many times. Ensure that +your code performs predictably under abnormal use. For example if the code +needs to process received data and cannot keep up with the receiving rate it is +not acceptable to keep allocating more memory for received data until the Collector +runs out of memory. Instead have protections for these situations, e.g. when hitting +resource limits drop the data and record the fact that it was dropped in a metric +that is exposed to users. + +### Graceful Shutdown + +Collector does not yet support graceful shutdown but we plan to add it. All components +must be ready to shutdown gracefully via `Shutdown()` function that all component +interfaces require. If components contain any temporary data they need to process +and export it out of the Collector before shutdown is completed. The shutdown process +will have a maximum allowed duration so put a limit on how long your shutdown +operation can take. + +### Unit Tests + +Cover important functionality with unit tests. We require that contributions +do not decrease overall code coverage of the codebase - this is aligned with our +goal to increase coverage over time. Keep track of execution time for your unit +tests and try to keep them as short as possible. + +## Release + +See [release](docs/release.md) for details. + +## Contributing Images +If you are adding any new images, please use [Excalidraw](https://excalidraw.com). It's a free and open source web application and doesn't require any account to get started. Once you've created the design, while exporting the image, make sure to tick **"Embed scene into exported file"** option. This allows the image to be imported in an editable format for other contributors later. + +## Common Issues + +Build fails due to dependency issues, e.g. + +```sh +go: github.com/golangci/golangci-lint@v1.31.0 requires + github.com/tommy-muehle/go-mnd@v1.3.1-0.20200224220436-e6f9a994e8fa: invalid pseudo-version: git fetch --unshallow -f origin in /root/go/pkg/mod/cache/vcs/053b1e985f53e43f78db2b3feaeb7e40a2ae482c92734ba3982ca463d5bf19ce: exit status 128: + fatal: git fetch-pack: expected shallow list + ``` + +`go env GOPROXY` should return `https://proxy.golang.org,direct`. If it does not, set it as an environment variable: + +`export GOPROXY=https://proxy.golang.org,direct` diff --git a/internal/otel_collector/LICENSE b/internal/otel_collector/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/internal/otel_collector/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/internal/otel_collector/Makefile b/internal/otel_collector/Makefile new file mode 100644 index 00000000000..f2a3b198590 --- /dev/null +++ b/internal/otel_collector/Makefile @@ -0,0 +1,390 @@ +include ./Makefile.Common + +# This is the code that we want to run lint, etc. +ALL_SRC := $(shell find . -name '*.go' \ + -not -path './cmd/schemagen/*' \ + -not -path './internal/tools/*' \ + -not -path './examples/demo/app/*' \ + -not -path './model/internal/data/protogen/*' \ + -not -path './service/internal/zpages/tmplgen/*' \ + -type f | sort) + +# All source code and documents. Used in spell check. +ALL_DOC := $(shell find . \( -name "*.md" -o -name "*.yaml" \) \ + -type f | sort) + +# ALL_MODULES includes ./* dirs (excludes . dir) +ALL_MODULES := $(shell find . -type f -name "go.mod" -exec dirname {} \; | sort | egrep '^./' ) + +CMD?= +TOOLS_MOD_DIR := ./internal/tools + +GOOS=$(shell go env GOOS) +GOARCH=$(shell go env GOARCH) + +BUILD_INFO_IMPORT_PATH=go.opentelemetry.io/collector/internal/version +VERSION=$(shell git describe --always --match "v[0-9]*" HEAD) +BUILD_INFO=-ldflags "-X $(BUILD_INFO_IMPORT_PATH).Version=$(VERSION)" + +RUN_CONFIG?=examples/local/otel-config.yaml +CONTRIB_PATH=$(CURDIR)/../opentelemetry-collector-contrib +COMP_REL_PATH=service/defaultcomponents/defaults.go +MOD_NAME=go.opentelemetry.io/collector + +GO_ACC=go-acc +ADDLICENSE=addlicense +MISSPELL=misspell -error +MISSPELL_CORRECTION=misspell -w + +# Function to execute a command. Note the empty line before endef to make sure each command +# gets executed separately instead of concatenated with previous one. +# Accepts command to execute as first parameter. +define exec-command +$(1) + +endef + +.DEFAULT_GOAL := all + +.PHONY: version +version: + @echo ${VERSION} + +.PHONY: all +all: checklicense checkdoc misspell goimpi golint gotest otelcol + +all-modules: + @echo $(ALL_MODULES) | tr ' ' '\n' | sort + +.PHONY: gomoddownload +gomoddownload: + @$(MAKE) for-all CMD="go mod download" + +.PHONY: gotestinstall +gotestinstall: + @$(MAKE) for-all CMD="make test GOTEST_OPT=\"-i\"" + +.PHONY: gotest +gotest: + @$(MAKE) for-all CMD="make test" + +.PHONY: gobenchmark +gobenchmark: + @$(MAKE) for-all CMD="make benchmark" + +.PHONY: gotest-with-cover +gotest-with-cover: + @echo pre-compiling tests + @time $(GOTEST) -i ./... + $(GO_ACC) ./... + go tool cover -html=coverage.txt -o coverage.html + +.PHONY: golint +golint: + @$(MAKE) for-all CMD="make lint" + +.PHONY: goimpi +goimpi: + @$(MAKE) for-all CMD="make impi" + +.PHONY: gofmt +gofmt: + @$(MAKE) for-all CMD="make fmt" + +.PHONY: gotidy +gotidy: + $(MAKE) for-all CMD="rm -fr go.sum" + $(MAKE) for-all CMD="go mod tidy -go=1.16" + $(MAKE) for-all CMD="go mod tidy -go=1.17" + +.PHONY: addlicense +addlicense: + @ADDLICENSEOUT=`$(ADDLICENSE) -y "" -c "The OpenTelemetry Authors" $(ALL_SRC) 2>&1`; \ + if [ "$$ADDLICENSEOUT" ]; then \ + echo "$(ADDLICENSE) FAILED => add License errors:\n"; \ + echo "$$ADDLICENSEOUT\n"; \ + exit 1; \ + else \ + echo "Add License finished successfully"; \ + fi + +.PHONY: checklicense +checklicense: + @ADDLICENSEOUT=`$(ADDLICENSE) -check $(ALL_SRC) 2>&1`; \ + if [ "$$ADDLICENSEOUT" ]; then \ + echo "$(ADDLICENSE) FAILED => add License errors:\n"; \ + echo "$$ADDLICENSEOUT\n"; \ + echo "Use 'make addlicense' to fix this."; \ + exit 1; \ + else \ + echo "Check License finished successfully"; \ + fi + +.PHONY: misspell +misspell: + $(MISSPELL) $(ALL_DOC) + +.PHONY: misspell-correction +misspell-correction: + $(MISSPELL_CORRECTION) $(ALL_DOC) + +.PHONY: install-tools +install-tools: + cd $(TOOLS_MOD_DIR) && go install github.com/client9/misspell/cmd/misspell + cd $(TOOLS_MOD_DIR) && go install github.com/golangci/golangci-lint/cmd/golangci-lint + cd $(TOOLS_MOD_DIR) && go install github.com/google/addlicense + cd $(TOOLS_MOD_DIR) && go install github.com/jstemmer/go-junit-report + cd $(TOOLS_MOD_DIR) && go install github.com/mjibson/esc + cd $(TOOLS_MOD_DIR) && go install github.com/ory/go-acc + cd $(TOOLS_MOD_DIR) && go install github.com/pavius/impi/cmd/impi + cd $(TOOLS_MOD_DIR) && go install github.com/tcnksm/ghr + cd $(TOOLS_MOD_DIR) && go install go.opentelemetry.io/build-tools/semconvgen + cd $(TOOLS_MOD_DIR) && go install go.opentelemetry.io/build-tools/checkdoc + cd $(TOOLS_MOD_DIR) && go install golang.org/x/exp/cmd/apidiff + cd $(TOOLS_MOD_DIR) && go install golang.org/x/tools/cmd/goimports + +.PHONY: otelcol +otelcol: + go generate ./... + $(MAKE) build-binary-internal + +.PHONY: run +run: + GO111MODULE=on go run --race ./cmd/otelcol/... --config ${RUN_CONFIG} ${RUN_ARGS} + +.PHONY: docker-component # Not intended to be used directly +docker-component: check-component + GOOS=linux $(MAKE) $(COMPONENT) + cp ./bin/$(COMPONENT)_linux_amd64 ./cmd/$(COMPONENT)/$(COMPONENT) + docker build -t $(COMPONENT) ./cmd/$(COMPONENT)/ + rm ./cmd/$(COMPONENT)/$(COMPONENT) + +.PHONY: for-all +for-all: + @echo "running $${CMD} in root" + @$${CMD} + @set -e; for dir in $(ALL_MODULES); do \ + (cd "$${dir}" && \ + echo "running $${CMD} in $${dir}" && \ + $${CMD} ); \ + done + +.PHONY: check-component +check-component: +ifndef COMPONENT + $(error COMPONENT variable was not defined) +endif + +.PHONY: add-tag +add-tag: + @[ "${TAG}" ] || ( echo ">> env var TAG is not set"; exit 1 ) + @echo "Adding tag ${TAG}" + @git tag -a ${TAG} -s -m "Version ${TAG}" + @set -e; for dir in $(ALL_MODULES); do \ + (echo Adding tag "$${dir:2}/$${TAG}" && \ + git tag -a "$${dir:2}/$${TAG}" -s -m "Version ${dir:2}/${TAG}" ); \ + done + +.PHONY: push-tag +push-tag: + @[ "${TAG}" ] || ( echo ">> env var TAG is not set"; exit 1 ) + @echo "Pushing tag ${TAG}" + @git push upstream ${TAG} + @set -e; for dir in $(ALL_MODULES); do \ + (echo Pushing tag "$${dir:2}/$${TAG}" && \ + git push upstream "$${dir:2}/$${TAG}"); \ + done + +.PHONY: delete-tag +delete-tag: + @[ "${TAG}" ] || ( echo ">> env var TAG is not set"; exit 1 ) + @echo "Deleting tag ${TAG}" + @git tag -d ${TAG} + @set -e; for dir in $(ALL_MODULES); do \ + (echo Deleting tag "$${dir:2}/$${TAG}" && \ + git tag -d "$${dir:2}/$${TAG}" ); \ + done + +.PHONY: docker-otelcol +docker-otelcol: + COMPONENT=otelcol $(MAKE) docker-component + +# build collector binaries with different OS and Architecture +.PHONY: binaries-all-sys +binaries-all-sys: binaries-darwin_amd64 binaries-darwin_arm64 binaries-linux_amd64 binaries-linux_arm64 binaries-windows_amd64 + +.PHONY: binaries-darwin_amd64 +binaries-darwin_amd64: + GOOS=darwin GOARCH=amd64 $(MAKE) build-binary-internal + +.PHONY: binaries-darwin_arm64 +binaries-darwin_arm64: + GOOS=darwin GOARCH=arm64 $(MAKE) build-binary-internal + +.PHONY: binaries-linux_amd64 +binaries-linux_amd64: + GOOS=linux GOARCH=amd64 $(MAKE) build-binary-internal + +.PHONY: binaries-linux_arm64 +binaries-linux_arm64: + GOOS=linux GOARCH=arm64 $(MAKE) build-binary-internal + +.PHONY: binaries-windows_amd64 +binaries-windows_amd64: + GOOS=windows GOARCH=amd64 EXTENSION=.exe $(MAKE) build-binary-internal + +.PHONY: build-binary-internal +build-binary-internal: + GO111MODULE=on CGO_ENABLED=0 go build -trimpath -o ./bin/otelcol_$(GOOS)_$(GOARCH)$(EXTENSION) $(BUILD_INFO) ./cmd/otelcol + +.PHONY: deb-rpm-package +%-package: ARCH ?= amd64 +%-package: + $(MAKE) binaries-linux_$(ARCH) + docker build -t otelcol-fpm internal/buildscripts/packaging/fpm + docker run --rm -v $(CURDIR):/repo -e PACKAGE=$* -e VERSION=$(VERSION) -e ARCH=$(ARCH) otelcol-fpm + +.PHONY: genmdata +genmdata: + $(MAKE) for-all CMD="go generate ./..." + +DEPENDABOT_PATH=".github/dependabot.yml" +.PHONY: internal-gendependabot +internal-gendependabot: + @echo "Add rule for \"${PACKAGE}\" in \"${DIR}\""; + @echo " - package-ecosystem: \"${PACKAGE}\"" >> ${DEPENDABOT_PATH}; + @echo " directory: \"${DIR}\"" >> ${DEPENDABOT_PATH}; + @echo " schedule:" >> ${DEPENDABOT_PATH}; + @echo " interval: \"weekly\"" >> ${DEPENDABOT_PATH}; + +.PHONY: gendependabot +gendependabot: + @echo "Recreating ${DEPENDABOT_PATH} file" + @echo "# File generated by \"make gendependabot\"; DO NOT EDIT." > ${DEPENDABOT_PATH} + @echo "" >> ${DEPENDABOT_PATH} + @echo "version: 2" >> ${DEPENDABOT_PATH} + @echo "updates:" >> ${DEPENDABOT_PATH} + $(MAKE) internal-gendependabot DIR="/" PACKAGE="github-actions" + $(MAKE) internal-gendependabot DIR="/" PACKAGE="docker" + $(MAKE) internal-gendependabot DIR="/" PACKAGE="gomod" + @set -e; for dir in $(ALL_MODULES); do \ + $(MAKE) internal-gendependabot DIR=$${dir:1} PACKAGE="gomod"; \ + done + +# Definitions for ProtoBuf generation. + +# The source directory for OTLP ProtoBufs. +OPENTELEMETRY_PROTO_SRC_DIR=model/internal/opentelemetry-proto + +# Find all .proto files. +OPENTELEMETRY_PROTO_FILES := $(subst $(OPENTELEMETRY_PROTO_SRC_DIR)/,,$(wildcard $(OPENTELEMETRY_PROTO_SRC_DIR)/opentelemetry/proto/*/v1/*.proto $(OPENTELEMETRY_PROTO_SRC_DIR)/opentelemetry/proto/collector/*/v1/*.proto)) + +# Target directory to write generated files to. +PROTO_TARGET_GEN_DIR=model/internal/data/protogen + +# Go package name to use for generated files. +PROTO_PACKAGE=go.opentelemetry.io/collector/$(PROTO_TARGET_GEN_DIR) + +# Intermediate directory used during generation. +PROTO_INTERMEDIATE_DIR=model/internal/.patched-otlp-proto + +DOCKER_PROTOBUF ?= otel/build-protobuf:0.4.1 +PROTOC := docker run --rm -u ${shell id -u} -v${PWD}:${PWD} -w${PWD}/$(PROTO_INTERMEDIATE_DIR) ${DOCKER_PROTOBUF} --proto_path=${PWD} +PROTO_INCLUDES := -I/usr/include/github.com/gogo/protobuf -I./ + +# Generate OTLP Protobuf Go files. This will place generated files in PROTO_TARGET_GEN_DIR. +genproto: + git submodule update --init + # Call a sub-make to ensure OPENTELEMETRY_PROTO_FILES is populated after the submodule + # files are present. + $(MAKE) genproto_sub + $(MAKE) fmt + +genproto_sub: + @echo Generating code for the following files: + @$(foreach file,$(OPENTELEMETRY_PROTO_FILES),$(call exec-command,echo $(file))) + + @echo Delete intermediate directory. + @rm -rf $(PROTO_INTERMEDIATE_DIR) + + @echo Copy .proto file to intermediate directory. + mkdir -p $(PROTO_INTERMEDIATE_DIR)/opentelemetry + cp -R $(OPENTELEMETRY_PROTO_SRC_DIR)/opentelemetry/* $(PROTO_INTERMEDIATE_DIR)/opentelemetry + + # Patch proto files. See proto_patch.sed for patching rules. + @echo Modify them in the intermediate directory. + $(foreach file,$(OPENTELEMETRY_PROTO_FILES),$(call exec-command,sed -f proto_patch.sed $(OPENTELEMETRY_PROTO_SRC_DIR)/$(file) > $(PROTO_INTERMEDIATE_DIR)/$(file))) + + @echo Generate Go code from .proto files in intermediate directory. + $(foreach file,$(OPENTELEMETRY_PROTO_FILES),$(call exec-command,$(PROTOC) $(PROTO_INCLUDES) --gogofaster_out=plugins=grpc:./ $(file))) + + @echo Move generated code to target directory. + mkdir -p $(PROTO_TARGET_GEN_DIR) + cp -R $(PROTO_INTERMEDIATE_DIR)/$(PROTO_PACKAGE)/* $(PROTO_TARGET_GEN_DIR)/ + rm -rf $(PROTO_INTERMEDIATE_DIR)/go.opentelemetry.io + + @rm -rf $(OPENTELEMETRY_PROTO_SRC_DIR)/* + @rm -rf $(OPENTELEMETRY_PROTO_SRC_DIR)/.* > /dev/null 2>&1 || true + +# Generate structs, functions and tests for pdata package. Must be used after any changes +# to proto and after running `make genproto` +genpdata: + go run cmd/pdatagen/main.go + $(MAKE) fmt + +# Generate semantic convention constants. Requires a clone of the opentelemetry-specification repo +gensemconv: + @[ "${SPECPATH}" ] || ( echo ">> env var SPECPATH is not set"; exit 1 ) + @[ "${SPECTAG}" ] || ( echo ">> env var SPECTAG is not set"; exit 1 ) + @echo "Generating semantic convention constants from specification version ${SPECTAG} at ${SPECPATH}" + semconvgen -o model/semconv/${SPECTAG} -t model/internal/semconv/template.j2 -s ${SPECTAG} -i ${SPECPATH}/semantic_conventions/resource -p conventionType=resource + semconvgen -o model/semconv/${SPECTAG} -t model/internal/semconv/template.j2 -s ${SPECTAG} -i ${SPECPATH}/semantic_conventions/trace -p conventionType=trace + +# Checks that the HEAD of the contrib repo checked out in CONTRIB_PATH compiles +# against the current version of this repo. +.PHONY: check-contrib +check-contrib: + @echo Setting contrib at $(CONTRIB_PATH) to use this core checkout + make -C $(CONTRIB_PATH) for-all CMD="go mod edit -replace go.opentelemetry.io/collector=$(CURDIR)" + make -C $(CONTRIB_PATH) for-all CMD="go mod edit -replace go.opentelemetry.io/collector/model=$(CURDIR)/model" + make -C $(CONTRIB_PATH) for-all CMD="go mod tidy -go=1.16" + make -C $(CONTRIB_PATH) for-all CMD="go mod tidy -go=1.17" + make -C $(CONTRIB_PATH) test + @echo Restoring contrib to no longer use this core checkout + make -C $(CONTRIB_PATH) for-all CMD="go mod edit -dropreplace go.opentelemetry.io/collector" + +# List of directories where certificates are stored for unit tests. +CERT_DIRS := config/configgrpc/testdata \ + config/confighttp/testdata + +# Generate certificates for unit tests relying on certificates. +.PHONY: certs +certs: + $(foreach dir, $(CERT_DIRS), $(call exec-command, @internal/buildscripts/gen-certs.sh -o $(dir))) + +# Generate certificates for unit tests relying on certificates without copying certs to specific test directories. +.PHONY: certs-dryrun +certs-dryrun: + @internal/buildscripts/gen-certs.sh -d + +# Verify existence of READMEs for components specified as default components in the collector. +.PHONY: checkdoc +checkdoc: + checkdoc --project-path $(CURDIR) --component-rel-path $(COMP_REL_PATH) --module-name $(MOD_NAME) + +# Construct new API state snapshots +.PHONY: apidiff-build +apidiff-build: + @$(foreach pkg,$(ALL_PKGS),$(call exec-command,./internal/buildscripts/gen-apidiff.sh -p $(pkg))) + +# If we are running in CI, change input directory +ifeq ($(CI), true) +APICOMPARE_OPTS=$(COMPARE_OPTS) +else +APICOMPARE_OPTS=-d "./internal/data/apidiff" +endif + +# Compare API state snapshots +.PHONY: apidiff-compare +apidiff-compare: + @$(foreach pkg,$(ALL_PKGS),$(call exec-command,./internal/buildscripts/compare-apidiff.sh -p $(pkg))) diff --git a/internal/otel_collector/Makefile.Common b/internal/otel_collector/Makefile.Common new file mode 100644 index 00000000000..85d282b2d54 --- /dev/null +++ b/internal/otel_collector/Makefile.Common @@ -0,0 +1,30 @@ +# ALL_PKGS is the list of all packages where ALL_SRC files reside. +ALL_PKGS := $(sort $(shell go list ./...)) + +# Use a single process (-p 1) on go test to avoid tests clashing on machine +# wide resources, e.g. ports. +GOTEST_OPT?= -v -p 1 -race -timeout 180s +GOTEST=go test +LINT=golangci-lint +IMPI=impi + +.PHONY: test +test: + @echo $(ALL_PKGS) | xargs -n 10 $(GOTEST) $(GOTEST_OPT) + +.PHONY: benchmark +benchmark: + $(GOTEST) -bench=. -run=notests ./... + +.PHONY: fmt +fmt: + gofmt -w -s ./ + goimports -w -local go.opentelemetry.io/collector ./ + +.PHONY: lint +lint: + $(LINT) run --allow-parallel-runners + +.PHONY: impi +impi: + @$(IMPI) --local go.opentelemetry.io/collector --scheme stdThirdPartyLocal ./... diff --git a/internal/otel_collector/README.md b/internal/otel_collector/README.md new file mode 100644 index 00000000000..76bc3350758 --- /dev/null +++ b/internal/otel_collector/README.md @@ -0,0 +1,94 @@ +--- + +

+ + Getting Started +   •   + Getting Involved +   •   + Getting In Touch + +

+ +

+ + Go Report Card + + + Build Status + + + Codecov Status + + + GitHub release (latest by date including pre-releases) + + Beta +

+ +

+ + Vision +   •   + Design +   •   + Monitoring +   •   + Performance +   •   + Security +   •   + Roadmap +   •   + Package + +

+ +--- + +# OpenTelemetry Icon OpenTelemetry Collector + +The OpenTelemetry Collector offers a vendor-agnostic implementation on how to +receive, process and export telemetry data. In addition, it removes the need +to run, operate and maintain multiple agents/collectors in order to support +open-source telemetry data formats (e.g. Jaeger, Prometheus, etc.) sending to +multiple open-source or commercial back-ends. + +Objectives: + +- Usable: Reasonable default configuration, supports popular protocols, runs and collects out of the box. +- Performant: Highly stable and performant under varying loads and configurations. +- Observable: An exemplar of an observable service. +- Extensible: Customizable without touching the core code. +- Unified: Single codebase, deployable as an agent or collector with support for traces, metrics and logs. + +## Contributing + +See [CONTRIBUTING.md](CONTRIBUTING.md). + +Triagers ([@open-telemetry/collector-triagers](https://github.com/orgs/open-telemetry/teams/collector-triagers)): + +- [Alolita Sharma](https://github.com/alolita), AWS +- [Punya Biswal](https://github.com/punya), Google +- [Steve Flanders](https://github.com/flands), Splunk + +Approvers ([@open-telemetry/collector-approvers](https://github.com/orgs/open-telemetry/teams/collector-approvers)): + +- [Alex Boten](https://github.com/codeboten), Lightstep +- [Anthony Mirabella](https://github.com/Aneurysm9), AWS +- [Dmitrii Anoshin](https://github.com/dmitryax), Splunk +- [Juraci Paixão Kröhling](https://github.com/jpkrohling), Red Hat +- [Owais Lone](https://github.com/owais), Splunk + +Maintainers ([@open-telemetry/collector-maintainers](https://github.com/orgs/open-telemetry/teams/collector-maintainers)): + +- [Bogdan Drutu](https://github.com/BogdanDrutu), Splunk +- [Tigran Najaryan](https://github.com/tigrannajaryan), Splunk + +Learn more about roles in the [community repository](https://github.com/open-telemetry/community/blob/main/community-membership.md). + +Thanks to all the people who already contributed! + + + + diff --git a/internal/otel_collector/VERSIONING.md b/internal/otel_collector/VERSIONING.md new file mode 100644 index 00000000000..87dbfc83154 --- /dev/null +++ b/internal/otel_collector/VERSIONING.md @@ -0,0 +1,100 @@ +# Versioning + +This document describes the versioning policy for this repository. This policy +is designed so that the following goal can be achieved: + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this project will be idiomatic of a Go project using [Go + modules](https://golang.org/ref/mod#versions). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * If a module is version `v2` or higher, the major version of the module + must be included as a `/vN` at the end of the module paths used in + `go.mod` files (e.g., `module go.opentelemetry.io/collector/v2`, `require + go.opentelemetry.io/collector/v2 v2.0.1`) and in the package import path + (e.g., `import "go.opentelemetry.io/collector/v2/component"`). This includes the + paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/collector/v2@v2.0.1`. Note there is both a `/v2` and a + `@v2.0.1` in that example. One way to think about it is that the module + name now includes the `/v2`, so include `/v2` whenever you are using the + module name). + * If a module is version `v0` or `v1`, do not include the major version in + either the module path or the import path. + * A single module should exist, rooted at the top level of this repository, + that contains all packages provided for use outside this repository. + * Additional modules may be created in this repository to provide for + isolation of build-time tools or other commands. Such modules should be + versioned in sync with the `go.opentelemetry.io/collector` module. + * Experimental modules still under active development will be versioned with a major + version of `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Configuration structures should be considered part of the public API and backward + compatibility maintained through any changes made to configuration structures. + * Because configuration structures are typically instantiated through unmarshalling + a serialized representation of the structure, and not through structure literals, + additive changes to the set of exported fields in a configuration structure are + not considered to break backward compatibility. + * Struct tags used to configure serialization mechanisms (`yaml:`, `mapstructure:`, etc) + are to be considered part of the structure definition and must maintain compatibility + to the same extent as the structure. +* Versioning of the associated [contrib + repository](https://github.com/open-telemetry/opentelemetry-collector-contrib) of + this project will be idiomatic of a Go project using [Go + modules](https://golang.org/ref/mod#versions). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * If a module is version `v2` or higher, the + major version of the module must be included as a `/vN` at the end of the + module paths used in `go.mod` files (e.g., `module + github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sprocessor/v2`, `require + github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sprocessor/v2 v2.0.1`) and in the + package import path (e.g., `import + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sprocessor/v2"`). This includes + the paths used in `go get` commands (e.g., `go get + github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sprocessor/v2@v2.0.1`. Note there + is both a `/v2` and a `@v2.0.1` in that example. One way to think about + it is that the module name now includes the `/v2`, so include `/v2` + whenever you are using the module name). + * If a module is version `v0` or `v1`, do not include the major version + in either the module path or the import path. + * Configuration structures should be considered part of the public API and backward + compatibility maintained through any changes made to configuration structures. + * Because configuration structures are typically instantiated through unmarshalling + a serialized representation of the structure, and not through structure literals, + additive changes to the set of exported fields in a configuration structure are + not considered to break backward compatibility. + * Modules will be used to encapsulate receivers, processor, exporters, + extensions, and any other independent sets of related components. + * Experimental modules still under active development will be versioned with a major + version of `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * Mature modules for which we guarantee a stable public API will + be versioned with a major version of `v1` or greater. + * All stable contrib modules of the same major version with this project + will use the same entire version. + * Stable modules may be released with an incremented minor or patch + version even though that module's code has not been changed. Instead + the only change that will have been included is to have updated that + modules dependency on this project's stable APIs. + * Contrib modules will be kept up to date with this project's releases. +* GitHub releases will be made for all releases. +* Go modules will be made available at Go package mirrors. diff --git a/internal/otel_collector/client/client.go b/internal/otel_collector/client/client.go new file mode 100644 index 00000000000..6b134a30abf --- /dev/null +++ b/internal/otel_collector/client/client.go @@ -0,0 +1,74 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package client contains generic representations of clients connecting to different receivers +package client + +import ( + "context" + "net" + "net/http" + + "google.golang.org/grpc/peer" +) + +type ctxKey struct{} + +// Client represents a generic client that sends data to any receiver supported by the OT receiver +type Client struct { + IP string +} + +// NewContext takes an existing context and derives a new context with the client value stored on it +func NewContext(ctx context.Context, c *Client) context.Context { + return context.WithValue(ctx, ctxKey{}, c) +} + +// FromContext takes a context and returns a Client value from it, if present. +func FromContext(ctx context.Context) (*Client, bool) { + c, ok := ctx.Value(ctxKey{}).(*Client) + return c, ok +} + +// FromGRPC takes a GRPC context and tries to extract client information from it +func FromGRPC(ctx context.Context) (*Client, bool) { + if p, ok := peer.FromContext(ctx); ok { + ip := parseIP(p.Addr.String()) + if ip != "" { + return &Client{ip}, true + } + } + return nil, false +} + +// FromHTTP takes a net/http Request object and tries to extract client information from it +func FromHTTP(r *http.Request) (*Client, bool) { + ip := parseIP(r.RemoteAddr) + if ip == "" { + return nil, false + } + return &Client{ip}, true +} + +func parseIP(source string) string { + ipstr, _, err := net.SplitHostPort(source) + if err == nil { + return ipstr + } + ip := net.ParseIP(source) + if ip != nil { + return ip.String() + } + return "" +} diff --git a/internal/otel_collector/cmd/otelcol/Dockerfile b/internal/otel_collector/cmd/otelcol/Dockerfile new file mode 100644 index 00000000000..f80bab5f790 --- /dev/null +++ b/internal/otel_collector/cmd/otelcol/Dockerfile @@ -0,0 +1,20 @@ +FROM alpine:3.13 as certs +RUN apk --update add ca-certificates + +FROM alpine:3.13 AS otelcol +COPY otelcol / +# Note that this shouldn't be necessary, but in some cases the file seems to be +# copied with the execute bit lost (see #1317) +RUN chmod 755 /otelcol + +FROM scratch + +ARG USER_UID=10001 +USER ${USER_UID} + +COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt +COPY --from=otelcol /otelcol / +COPY config.yaml /etc/otel/config.yaml +ENTRYPOINT ["/otelcol"] +CMD ["--config", "/etc/otel/config.yaml"] +EXPOSE 4317 55678 55679 diff --git a/internal/otel_collector/cmd/otelcol/config.yaml b/internal/otel_collector/cmd/otelcol/config.yaml new file mode 100644 index 00000000000..967dcebb40a --- /dev/null +++ b/internal/otel_collector/cmd/otelcol/config.yaml @@ -0,0 +1,34 @@ +extensions: + memory_ballast: + size_mib: 512 + zpages: + endpoint: 0.0.0.0:55679 + +receivers: + otlp: + protocols: + grpc: + http: + +processors: + batch: + +exporters: + logging: + logLevel: debug + +service: + + pipelines: + + traces: + receivers: [otlp] + processors: [batch] + exporters: [logging] + + metrics: + receivers: [otlp] + processors: [batch] + exporters: [logging] + + extensions: [memory_ballast, zpages] diff --git a/internal/otel_collector/cmd/otelcol/main.go b/internal/otel_collector/cmd/otelcol/main.go new file mode 100644 index 00000000000..581876cb78f --- /dev/null +++ b/internal/otel_collector/cmd/otelcol/main.go @@ -0,0 +1,57 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Program otelcol is the OpenTelemetry Collector that collects stats +// and traces and exports to a configured backend. +package main + +import ( + "fmt" + "log" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/internal/version" + "go.opentelemetry.io/collector/service" + "go.opentelemetry.io/collector/service/defaultcomponents" +) + +func main() { + factories, err := defaultcomponents.Components() + if err != nil { + log.Fatalf("failed to build default components: %v", err) + } + info := component.BuildInfo{ + Command: "otelcol", + Description: "OpenTelemetry Collector", + Version: version.Version, + } + + if err := run(service.CollectorSettings{BuildInfo: info, Factories: factories}); err != nil { + log.Fatal(err) + } +} + +func runInteractive(settings service.CollectorSettings) error { + app, err := service.New(settings) + if err != nil { + return fmt.Errorf("failed to construct the collector server: %w", err) + } + + err = app.Run() + if err != nil { + return fmt.Errorf("collector server run finished with error: %w", err) + } + + return nil +} diff --git a/internal/otel_collector/cmd/otelcol/main_others.go b/internal/otel_collector/cmd/otelcol/main_others.go new file mode 100644 index 00000000000..5f7a442d9ac --- /dev/null +++ b/internal/otel_collector/cmd/otelcol/main_others.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows +// +build !windows + +package main + +import "go.opentelemetry.io/collector/service" + +func run(settings service.CollectorSettings) error { + return runInteractive(settings) +} diff --git a/internal/otel_collector/cmd/otelcol/main_windows.go b/internal/otel_collector/cmd/otelcol/main_windows.go new file mode 100644 index 00000000000..fa88a4d8c49 --- /dev/null +++ b/internal/otel_collector/cmd/otelcol/main_windows.go @@ -0,0 +1,62 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build windows +// +build windows + +package main + +import ( + "fmt" + "os" + + "golang.org/x/sys/windows/svc" + + "go.opentelemetry.io/collector/service" +) + +func run(set service.CollectorSettings) error { + if useInteractiveMode, err := checkUseInteractiveMode(); err != nil { + return err + } else if useInteractiveMode { + return runInteractive(set) + } else { + return runService(set) + } +} + +func checkUseInteractiveMode() (bool, error) { + // If environment variable NO_WINDOWS_SERVICE is set with any value other + // than 0, use interactive mode instead of running as a service. This should + // be set in case running as a service is not possible or desired even + // though the current session is not detected to be interactive + if value, present := os.LookupEnv("NO_WINDOWS_SERVICE"); present && value != "0" { + return true, nil + } + + if isInteractiveSession, err := svc.IsAnInteractiveSession(); err != nil { + return false, fmt.Errorf("failed to determine if we are running in an interactive session %w", err) + } else { + return isInteractiveSession, nil + } +} + +func runService(set service.CollectorSettings) error { + // do not need to supply service name when startup is invoked through Service Control Manager directly + if err := svc.Run("", service.NewWindowsService(set)); err != nil { + return fmt.Errorf("failed to start service %w", err) + } + + return nil +} diff --git a/internal/otel_collector/cmd/pdatagen/internal/base_fields.go b/internal/otel_collector/cmd/pdatagen/internal/base_fields.go new file mode 100644 index 00000000000..5dbb37b3d47 --- /dev/null +++ b/internal/otel_collector/cmd/pdatagen/internal/base_fields.go @@ -0,0 +1,505 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "os" + "strings" +) + +const accessorSliceTemplate = `// ${fieldName} returns the ${originFieldName} associated with this ${structName}. +func (ms ${structName}) ${fieldName}() ${returnType} { + return new${returnType}(&(*ms.orig).${originFieldName}) +}` + +const accessorsSliceTestTemplate = `func Test${structName}_${fieldName}(t *testing.T) { + ms := New${structName}() + assert.EqualValues(t, New${returnType}(), ms.${fieldName}()) + fillTest${returnType}(ms.${fieldName}()) + testVal${fieldName} := generateTest${returnType}() + assert.EqualValues(t, testVal${fieldName}, ms.${fieldName}()) +}` + +const accessorsMessageValueTemplate = `// ${fieldName} returns the ${lowerFieldName} associated with this ${structName}. +func (ms ${structName}) ${fieldName}() ${returnType} { + return new${returnType}(&(*ms.orig).${originFieldName}) +}` + +const accessorsMessageValueTestTemplate = `func Test${structName}_${fieldName}(t *testing.T) { + ms := New${structName}() + fillTest${returnType}(ms.${fieldName}()) + assert.EqualValues(t, generateTest${returnType}(), ms.${fieldName}()) +}` + +const accessorsPrimitiveTemplate = `// ${fieldName} returns the ${lowerFieldName} associated with this ${structName}. +func (ms ${structName}) ${fieldName}() ${returnType} { + return (*ms.orig).${originFieldName} +} + +// Set${fieldName} replaces the ${lowerFieldName} associated with this ${structName}. +func (ms ${structName}) Set${fieldName}(v ${returnType}) { + (*ms.orig).${originFieldName} = v +}` + +const accessorsOneofPrimitiveTemplate = `// ${fieldName} returns the ${lowerFieldName} associated with this ${structName}. +func (ms ${structName}) ${fieldName}() ${returnType} { + return (*ms.orig).GetAs${fieldType}() +} + +// Set${fieldName} replaces the ${lowerFieldName} associated with this ${structName}. +func (ms ${structName}) Set${fieldName}(v ${returnType}) { + (*ms.orig).${originFieldName} = &${originFullName}_As${fieldType}{ + As${fieldType}: v, + } +}` + +const accessorsPrimitiveTestTemplate = `func Test${structName}_${fieldName}(t *testing.T) { + ms := New${structName}() + assert.EqualValues(t, ${defaultVal}, ms.${fieldName}()) + testVal${fieldName} := ${testValue} + ms.Set${fieldName}(testVal${fieldName}) + assert.EqualValues(t, testVal${fieldName}, ms.${fieldName}()) +}` + +const accessorsPrimitiveTypedTemplate = `// ${fieldName} returns the ${lowerFieldName} associated with this ${structName}. +func (ms ${structName}) ${fieldName}() ${returnType} { + return ${returnType}((*ms.orig).${originFieldName}) +} + +// Set${fieldName} replaces the ${lowerFieldName} associated with this ${structName}. +func (ms ${structName}) Set${fieldName}(v ${returnType}) { + (*ms.orig).${originFieldName} = ${rawType}(v) +}` + +const accessorsPrimitiveWithoutSetterTypedTemplate = `// ${fieldName} returns the ${lowerFieldName} associated with this ${structName}. +func (ms ${structName}) ${fieldName}() ${returnType} { + return ${returnType}((*ms.orig).${originFieldName}) +}` + +const accessorsPrimitiveStructTemplate = `// ${fieldName} returns the ${lowerFieldName} associated with this ${structName}. +func (ms ${structName}) ${fieldName}() ${returnType} { + return ${returnType}{orig: ((*ms.orig).${originFieldName})} +} + +// Set${fieldName} replaces the ${lowerFieldName} associated with this ${structName}. +func (ms ${structName}) Set${fieldName}(v ${returnType}) { + (*ms.orig).${originFieldName} = v.orig +}` + +type baseField interface { + generateAccessors(ms baseStruct, sb *strings.Builder) + + generateAccessorsTest(ms baseStruct, sb *strings.Builder) + + generateSetWithTestValue(sb *strings.Builder) + + generateCopyToValue(sb *strings.Builder) +} + +type sliceField struct { + fieldName string + originFieldName string + returnSlice baseSlice +} + +func (sf *sliceField) generateAccessors(ms baseStruct, sb *strings.Builder) { + sb.WriteString(os.Expand(accessorSliceTemplate, func(name string) string { + switch name { + case "structName": + return ms.getName() + case "fieldName": + return sf.fieldName + case "returnType": + return sf.returnSlice.getName() + case "originFieldName": + return sf.originFieldName + default: + panic(name) + } + })) +} + +func (sf *sliceField) generateAccessorsTest(ms baseStruct, sb *strings.Builder) { + sb.WriteString(os.Expand(accessorsSliceTestTemplate, func(name string) string { + switch name { + case "structName": + return ms.getName() + case "fieldName": + return sf.fieldName + case "returnType": + return sf.returnSlice.getName() + default: + panic(name) + } + })) +} + +func (sf *sliceField) generateSetWithTestValue(sb *strings.Builder) { + sb.WriteString("\tfillTest" + sf.returnSlice.getName() + "(tv." + sf.fieldName + "())") +} + +func (sf *sliceField) generateCopyToValue(sb *strings.Builder) { + sb.WriteString("\tms." + sf.fieldName + "().CopyTo(dest." + sf.fieldName + "())") +} + +var _ baseField = (*sliceField)(nil) + +type messageValueField struct { + fieldName string + originFieldName string + returnMessage *messageValueStruct +} + +func (mf *messageValueField) generateAccessors(ms baseStruct, sb *strings.Builder) { + sb.WriteString(os.Expand(accessorsMessageValueTemplate, func(name string) string { + switch name { + case "structName": + return ms.getName() + case "fieldName": + return mf.fieldName + case "lowerFieldName": + return strings.ToLower(mf.fieldName) + case "returnType": + return mf.returnMessage.structName + case "structOriginFullName": + return mf.returnMessage.originFullName + case "originFieldName": + return mf.originFieldName + default: + panic(name) + } + })) +} + +func (mf *messageValueField) generateAccessorsTest(ms baseStruct, sb *strings.Builder) { + sb.WriteString(os.Expand(accessorsMessageValueTestTemplate, func(name string) string { + switch name { + case "structName": + return ms.getName() + case "fieldName": + return mf.fieldName + case "returnType": + return mf.returnMessage.structName + default: + panic(name) + } + })) +} + +func (mf *messageValueField) generateSetWithTestValue(sb *strings.Builder) { + sb.WriteString("\tfillTest" + mf.returnMessage.structName + "(tv." + mf.fieldName + "())") +} + +func (mf *messageValueField) generateCopyToValue(sb *strings.Builder) { + sb.WriteString("\tms." + mf.fieldName + "().CopyTo(dest." + mf.fieldName + "())") +} + +var _ baseField = (*messageValueField)(nil) + +type primitiveField struct { + fieldName string + originFieldName string + returnType string + defaultVal string + testVal string +} + +func (pf *primitiveField) generateAccessors(ms baseStruct, sb *strings.Builder) { + sb.WriteString(os.Expand(accessorsPrimitiveTemplate, func(name string) string { + switch name { + case "structName": + return ms.getName() + case "fieldName": + return pf.fieldName + case "lowerFieldName": + return strings.ToLower(pf.fieldName) + case "returnType": + return pf.returnType + case "originFieldName": + return pf.originFieldName + default: + panic(name) + } + })) +} + +func (pf *primitiveField) generateAccessorsTest(ms baseStruct, sb *strings.Builder) { + sb.WriteString(os.Expand(accessorsPrimitiveTestTemplate, func(name string) string { + switch name { + case "structName": + return ms.getName() + case "defaultVal": + return pf.defaultVal + case "fieldName": + return pf.fieldName + case "testValue": + return pf.testVal + default: + panic(name) + } + })) +} + +func (pf *primitiveField) generateSetWithTestValue(sb *strings.Builder) { + sb.WriteString("\ttv.Set" + pf.fieldName + "(" + pf.testVal + ")") +} + +func (pf *primitiveField) generateCopyToValue(sb *strings.Builder) { + sb.WriteString("\tdest.Set" + pf.fieldName + "(ms." + pf.fieldName + "())") +} + +var _ baseField = (*primitiveField)(nil) + +// Types that has defined a custom type (e.g. "type Timestamp uint64") +type primitiveTypedField struct { + fieldName string + originFieldName string + returnType string + defaultVal string + testVal string + rawType string + manualSetter bool +} + +func (ptf *primitiveTypedField) generateAccessors(ms baseStruct, sb *strings.Builder) { + template := accessorsPrimitiveTypedTemplate + if ptf.manualSetter { + // Generate code without setter. Setter will be manually coded. + template = accessorsPrimitiveWithoutSetterTypedTemplate + } + + sb.WriteString(os.Expand(template, func(name string) string { + switch name { + case "structName": + return ms.getName() + case "fieldName": + return ptf.fieldName + case "lowerFieldName": + return strings.ToLower(ptf.fieldName) + case "returnType": + return ptf.returnType + case "rawType": + return ptf.rawType + case "originFieldName": + return ptf.originFieldName + default: + panic(name) + } + })) +} + +func (ptf *primitiveTypedField) generateAccessorsTest(ms baseStruct, sb *strings.Builder) { + sb.WriteString(os.Expand(accessorsPrimitiveTestTemplate, func(name string) string { + switch name { + case "structName": + return ms.getName() + case "defaultVal": + return ptf.defaultVal + case "fieldName": + return ptf.fieldName + case "testValue": + return ptf.testVal + default: + panic(name) + } + })) +} + +func (ptf *primitiveTypedField) generateSetWithTestValue(sb *strings.Builder) { + sb.WriteString("\ttv.Set" + ptf.fieldName + "(" + ptf.testVal + ")") +} + +func (ptf *primitiveTypedField) generateCopyToValue(sb *strings.Builder) { + sb.WriteString("\tdest.Set" + ptf.fieldName + "(ms." + ptf.fieldName + "())") +} + +var _ baseField = (*primitiveTypedField)(nil) + +// Types that has defined a custom type (e.g. "type TraceID struct {}") +type primitiveStructField struct { + fieldName string + originFieldName string + returnType string + defaultVal string + testVal string +} + +func (ptf *primitiveStructField) generateAccessors(ms baseStruct, sb *strings.Builder) { + template := accessorsPrimitiveStructTemplate + sb.WriteString(os.Expand(template, func(name string) string { + switch name { + case "structName": + return ms.getName() + case "fieldName": + return ptf.fieldName + case "lowerFieldName": + return strings.ToLower(ptf.fieldName) + case "returnType": + return ptf.returnType + case "originFieldName": + return ptf.originFieldName + default: + panic(name) + } + })) +} + +func (ptf *primitiveStructField) generateAccessorsTest(ms baseStruct, sb *strings.Builder) { + sb.WriteString(os.Expand(accessorsPrimitiveTestTemplate, func(name string) string { + switch name { + case "structName": + return ms.getName() + case "defaultVal": + return ptf.defaultVal + case "fieldName": + return ptf.fieldName + case "testValue": + return ptf.testVal + default: + panic(name) + } + })) +} + +func (ptf *primitiveStructField) generateSetWithTestValue(sb *strings.Builder) { + sb.WriteString("\ttv.Set" + ptf.fieldName + "(" + ptf.testVal + ")") +} + +func (ptf *primitiveStructField) generateCopyToValue(sb *strings.Builder) { + sb.WriteString("\tdest.Set" + ptf.fieldName + "(ms." + ptf.fieldName + "())") +} + +var _ baseField = (*primitiveStructField)(nil) + +// oneofField is used in case where the proto defines an "oneof". +type oneofField struct { + copyFuncName string + originFieldName string + testVal string + fillTestName string +} + +func (one oneofField) generateAccessors(baseStruct, *strings.Builder) {} + +func (one oneofField) generateAccessorsTest(baseStruct, *strings.Builder) {} + +func (one oneofField) generateSetWithTestValue(sb *strings.Builder) { + sb.WriteString("\t(*tv.orig)." + one.originFieldName + " = " + one.testVal + "\n") + sb.WriteString("\tfillTest" + one.fillTestName + "(tv." + one.fillTestName + "())") +} + +func (one oneofField) generateCopyToValue(sb *strings.Builder) { + sb.WriteString("\t" + one.copyFuncName + "(ms.orig, dest.orig)") +} + +var _ baseField = (*oneofField)(nil) + +type oneOfPrimitiveValue struct { + name string + defaultVal string + testVal string + returnType string + originFieldName string + originFullName string + fieldType string +} + +func (opv *oneOfPrimitiveValue) generateAccessors(ms baseStruct, sb *strings.Builder) { + sb.WriteString(os.Expand(accessorsOneofPrimitiveTemplate, func(name string) string { + switch name { + case "structName": + return ms.getName() + case "fieldName": + return opv.name + case "lowerFieldName": + return strings.ToLower(opv.name) + case "returnType": + return opv.returnType + case "originFieldName": + return opv.originFieldName + case "originFullName": + return opv.originFullName + case "fieldType": + return opv.fieldType + default: + panic(name) + } + })) + sb.WriteString("\n") +} + +func (opv *oneOfPrimitiveValue) generateAccessorsTest(ms baseStruct, sb *strings.Builder) { + sb.WriteString(os.Expand(accessorsPrimitiveTestTemplate, func(name string) string { + switch name { + case "structName": + return ms.getName() + case "defaultVal": + return opv.defaultVal + case "fieldName": + return opv.name + case "testValue": + return opv.testVal + default: + panic(name) + } + })) + sb.WriteString("\n") +} + +func (opv *oneOfPrimitiveValue) generateSetWithTestValue(sb *strings.Builder) { + sb.WriteString("\t tv.Set" + opv.name + "(" + opv.testVal + ")\n") +} + +func (opv *oneOfPrimitiveValue) generateCopyToValue(sb *strings.Builder) { + sb.WriteString("\tcase MetricValueType" + opv.fieldType + ":\n") + sb.WriteString("\t dest.Set" + opv.name + "(ms." + opv.name + "())\n") +} + +var _ baseField = (*oneOfPrimitiveValue)(nil) + +type numberField struct { + fields []*oneOfPrimitiveValue +} + +func (nf *numberField) generateAccessors(ms baseStruct, sb *strings.Builder) { + for _, field := range nf.fields { + field.generateAccessors(ms, sb) + } +} + +func (nf *numberField) generateAccessorsTest(ms baseStruct, sb *strings.Builder) { + for _, field := range nf.fields { + field.generateAccessorsTest(ms, sb) + } +} + +func (nf *numberField) generateSetWithTestValue(sb *strings.Builder) { + for _, field := range nf.fields { + field.generateSetWithTestValue(sb) + // TODO: this test should be generated for all number values, + // for now, it's ok to only set one value + return + } +} + +func (nf *numberField) generateCopyToValue(sb *strings.Builder) { + sb.WriteString("\tswitch ms.Type() {\n") + for _, field := range nf.fields { + field.generateCopyToValue(sb) + } + sb.WriteString("\t}\n") +} + +var _ baseField = (*numberField)(nil) diff --git a/internal/otel_collector/cmd/pdatagen/internal/base_slices.go b/internal/otel_collector/cmd/pdatagen/internal/base_slices.go new file mode 100644 index 00000000000..45cf6b901b4 --- /dev/null +++ b/internal/otel_collector/cmd/pdatagen/internal/base_slices.go @@ -0,0 +1,515 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "os" + "strings" +) + +const commonSliceTemplate = ` +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ${structName}) MoveAndAppendTo(dest ${structName}) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es ${structName}) RemoveIf(f func(${elementName}) bool) { + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + // TODO: Prevent memory leak by erasing truncated values. + *es.orig = (*es.orig)[:newLen] +}` + +const commonSliceTestTemplate = ` + +func Test${structName}_MoveAndAppendTo(t *testing.T) { + // Test MoveAndAppendTo to empty + expectedSlice := generateTest${structName}() + dest := New${structName}() + src := generateTest${structName}() + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTest${structName}(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo empty slice + src.MoveAndAppendTo(dest) + assert.EqualValues(t, generateTest${structName}(), dest) + assert.EqualValues(t, 0, src.Len()) + assert.EqualValues(t, expectedSlice.Len(), dest.Len()) + + // Test MoveAndAppendTo not empty slice + generateTest${structName}().MoveAndAppendTo(dest) + assert.EqualValues(t, 2*expectedSlice.Len(), dest.Len()) + for i := 0; i < expectedSlice.Len(); i++ { + assert.EqualValues(t, expectedSlice.At(i), dest.At(i)) + assert.EqualValues(t, expectedSlice.At(i), dest.At(i+expectedSlice.Len())) + } +} + +func Test${structName}_RemoveIf(t *testing.T) { + // Test RemoveIf on empty slice + emptySlice := New${structName}() + emptySlice.RemoveIf(func (el ${elementName}) bool { + t.Fail() + return false + }) + + // Test RemoveIf + filtered := generateTest${structName}() + pos := 0 + filtered.RemoveIf(func (el ${elementName}) bool { + pos++ + return pos%3 == 0 + }) + assert.Equal(t, 5, filtered.Len()) +}` + +const commonSliceGenerateTest = `func generateTest${structName}() ${structName} { + tv := New${structName}() + fillTest${structName}(tv) + return tv +} + +func fillTest${structName}(tv ${structName}) { + l := 7 + tv.EnsureCapacity(l) + for i := 0; i < l; i++ { + fillTest${elementName}(tv.AppendEmpty()) + } +}` + +const slicePtrTemplate = `// ${structName} logically represents a slice of ${elementName}. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use New${structName} function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ${structName} struct { + // orig points to the slice ${originName} field contained somewhere else. + // We use pointer-to-slice to be able to modify it in functions like EnsureCapacity. + orig *[]*${originName} +} + +func new${structName}(orig *[]*${originName}) ${structName} { + return ${structName}{orig} +} + +// New${structName} creates a ${structName} with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func New${structName}() ${structName} { + orig := []*${originName}(nil) + return ${structName}{&orig} +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "New${structName}()". +func (es ${structName}) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ${structName}) At(ix int) ${elementName} { + return new${elementName}((*es.orig)[ix]) +} + +// CopyTo copies all elements from the current slice to the dest. +func (es ${structName}) CopyTo(dest ${structName}) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + new${elementName}((*es.orig)[i]).CopyTo(new${elementName}((*dest.orig)[i])) + } + return + } + origs := make([]${originName}, srcLen) + wrappers := make([]*${originName}, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + new${elementName}((*es.orig)[i]).CopyTo(new${elementName}(wrappers[i])) + } + *dest.orig = wrappers +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ${structName} can be initialized: +// es := New${structName}() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ${structName}) EnsureCapacity(newCap int) { + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*${originName}, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty ${elementName}. +// It returns the newly added ${elementName}. +func (es ${structName}) AppendEmpty() ${elementName} { + *es.orig = append(*es.orig, &${originName}{}) + return es.At(es.Len() - 1) +} + +// Sort sorts the ${elementName} elements within ${structName} given the +// provided less function so that two instances of ${structName} +// can be compared. +// +// Returns the same instance to allow nicer code like: +// lessFunc := func(a, b ${elementName}) bool { +// return a.Name() < b.Name() // choose any comparison here +// } +// assert.EqualValues(t, expected.Sort(lessFunc), actual.Sort(lessFunc)) +func (es ${structName}) Sort(less func(a, b ${elementName}) bool) ${structName} { + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) + return es +} +` + +const slicePtrTestTemplate = `func Test${structName}(t *testing.T) { + es := New${structName}() + assert.EqualValues(t, 0, es.Len()) + es = new${structName}(&[]*${originName}{}) + assert.EqualValues(t, 0, es.Len()) + + es.EnsureCapacity(7) + emptyVal := new${elementName}(&${originName}{}) + testVal := generateTest${elementName}() + assert.EqualValues(t, 7, cap(*es.orig)) + for i := 0; i < es.Len(); i++ { + el := es.AppendEmpty() + assert.EqualValues(t, emptyVal, el) + fillTest${elementName}(el) + assert.EqualValues(t, testVal, el) + } +} + +func Test${structName}_CopyTo(t *testing.T) { + dest := New${structName}() + // Test CopyTo to empty + New${structName}().CopyTo(dest) + assert.EqualValues(t, New${structName}(), dest) + + // Test CopyTo larger slice + generateTest${structName}().CopyTo(dest) + assert.EqualValues(t, generateTest${structName}(), dest) + + // Test CopyTo same size slice + generateTest${structName}().CopyTo(dest) + assert.EqualValues(t, generateTest${structName}(), dest) +} + +func Test${structName}_EnsureCapacity(t *testing.T) { + es := generateTest${structName}() + // Test ensure smaller capacity. + const ensureSmallLen = 4 + expectedEs := make(map[*${originName}]bool) + for i := 0; i < es.Len(); i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, es.Len(), len(expectedEs)) + es.EnsureCapacity(ensureSmallLen) + assert.Less(t, ensureSmallLen, es.Len()) + foundEs := make(map[*${originName}]bool, es.Len()) + for i := 0; i < es.Len(); i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + + // Test ensure larger capacity + const ensureLargeLen = 9 + oldLen := es.Len() + expectedEs = make(map[*${originName}]bool, oldLen) + for i := 0; i < oldLen; i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, oldLen, len(expectedEs)) + es.EnsureCapacity(ensureLargeLen) + assert.Equal(t, ensureLargeLen, cap(*es.orig)) + foundEs = make(map[*${originName}]bool, oldLen) + for i := 0; i < oldLen; i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) +}` + +const sliceValueTemplate = `// ${structName} logically represents a slice of ${elementName}. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use New${structName} function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ${structName} struct { + // orig points to the slice ${originName} field contained somewhere else. + // We use pointer-to-slice to be able to modify it in functions like EnsureCapacity. + orig *[]${originName} +} + +func new${structName}(orig *[]${originName}) ${structName} { + return ${structName}{orig} +} + +// New${structName} creates a ${structName} with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func New${structName}() ${structName} { + orig := []${originName}(nil) + return ${structName}{&orig} +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "New${structName}()". +func (es ${structName}) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ${structName}) At(ix int) ${elementName} { + return new${elementName}(&(*es.orig)[ix]) +} + +// CopyTo copies all elements from the current slice to the dest. +func (es ${structName}) CopyTo(dest ${structName}) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + } else { + (*dest.orig) = make([]${originName}, srcLen) + } + + for i := range *es.orig { + new${elementName}(&(*es.orig)[i]).CopyTo(new${elementName}(&(*dest.orig)[i])) + } +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ${structName} can be initialized: +// es := New${structName}() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ${structName}) EnsureCapacity(newCap int) { + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]${originName}, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty ${elementName}. +// It returns the newly added ${elementName}. +func (es ${structName}) AppendEmpty() ${elementName} { + *es.orig = append(*es.orig, ${originName}{}) + return es.At(es.Len() - 1) +}` + +const sliceValueTestTemplate = `func Test${structName}(t *testing.T) { + es := New${structName}() + assert.EqualValues(t, 0, es.Len()) + es = new${structName}(&[]${originName}{}) + assert.EqualValues(t, 0, es.Len()) + + es.EnsureCapacity(7) + emptyVal := new${elementName}(&${originName}{}) + testVal := generateTest${elementName}() + assert.EqualValues(t, 7, cap(*es.orig)) + for i := 0; i < es.Len(); i++ { + el := es.AppendEmpty() + assert.EqualValues(t, emptyVal, el) + fillTest${elementName}(el) + assert.EqualValues(t, testVal, el) + } +} + +func Test${structName}_CopyTo(t *testing.T) { + dest := New${structName}() + // Test CopyTo to empty + New${structName}().CopyTo(dest) + assert.EqualValues(t, New${structName}(), dest) + + // Test CopyTo larger slice + generateTest${structName}().CopyTo(dest) + assert.EqualValues(t, generateTest${structName}(), dest) + + // Test CopyTo same size slice + generateTest${structName}().CopyTo(dest) + assert.EqualValues(t, generateTest${structName}(), dest) +} + +func Test${structName}_EnsureCapacity(t *testing.T) { + es := generateTest${structName}() + // Test ensure smaller capacity. + const ensureSmallLen = 4 + expectedEs := make(map[*${originName}]bool) + for i := 0; i < es.Len(); i++ { + expectedEs[es.At(i).orig] = true + } + assert.Equal(t, es.Len(), len(expectedEs)) + es.EnsureCapacity(ensureSmallLen) + assert.Less(t, ensureSmallLen, es.Len()) + foundEs := make(map[*${originName}]bool, es.Len()) + for i := 0; i < es.Len(); i++ { + foundEs[es.At(i).orig] = true + } + assert.EqualValues(t, expectedEs, foundEs) + + // Test ensure larger capacity + const ensureLargeLen = 9 + oldLen := es.Len() + assert.Equal(t, oldLen, len(expectedEs)) + es.EnsureCapacity(ensureLargeLen) + assert.Equal(t, ensureLargeLen, cap(*es.orig)) +}` + +type baseSlice interface { + getName() string +} + +// Will generate code only for a slice of pointer fields. +type sliceOfPtrs struct { + structName string + element *messageValueStruct +} + +func (ss *sliceOfPtrs) getName() string { + return ss.structName +} + +func (ss *sliceOfPtrs) generateStruct(sb *strings.Builder) { + sb.WriteString(os.Expand(slicePtrTemplate, ss.templateFields())) + sb.WriteString(os.Expand(commonSliceTemplate, ss.templateFields())) +} + +func (ss *sliceOfPtrs) generateTests(sb *strings.Builder) { + sb.WriteString(os.Expand(slicePtrTestTemplate, ss.templateFields())) + sb.WriteString(os.Expand(commonSliceTestTemplate, ss.templateFields())) +} + +func (ss *sliceOfPtrs) generateTestValueHelpers(sb *strings.Builder) { + sb.WriteString(os.Expand(commonSliceGenerateTest, ss.templateFields())) +} + +func (ss *sliceOfPtrs) templateFields() func(name string) string { + return func(name string) string { + switch name { + case "structName": + return ss.structName + case "elementName": + return ss.element.structName + case "originName": + return ss.element.originFullName + default: + panic(name) + } + } +} + +var _ baseStruct = (*sliceOfPtrs)(nil) + +// Will generate code only for a slice of value fields. +type sliceOfValues struct { + structName string + element *messageValueStruct +} + +func (ss *sliceOfValues) getName() string { + return ss.structName +} + +func (ss *sliceOfValues) generateStruct(sb *strings.Builder) { + sb.WriteString(os.Expand(sliceValueTemplate, ss.templateFields())) + sb.WriteString(os.Expand(commonSliceTemplate, ss.templateFields())) +} + +func (ss *sliceOfValues) generateTests(sb *strings.Builder) { + sb.WriteString(os.Expand(sliceValueTestTemplate, ss.templateFields())) + sb.WriteString(os.Expand(commonSliceTestTemplate, ss.templateFields())) +} + +func (ss *sliceOfValues) generateTestValueHelpers(sb *strings.Builder) { + sb.WriteString(os.Expand(commonSliceGenerateTest, ss.templateFields())) +} + +func (ss *sliceOfValues) templateFields() func(name string) string { + return func(name string) string { + switch name { + case "structName": + return ss.structName + case "elementName": + return ss.element.structName + case "originName": + return ss.element.originFullName + default: + panic(name) + } + } +} + +var _ baseStruct = (*sliceOfValues)(nil) diff --git a/internal/otel_collector/cmd/pdatagen/internal/base_structs.go b/internal/otel_collector/cmd/pdatagen/internal/base_structs.go new file mode 100644 index 00000000000..be16e07a946 --- /dev/null +++ b/internal/otel_collector/cmd/pdatagen/internal/base_structs.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "os" + "strings" +) + +const messageValueTemplate = `${description} +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use New${structName} function to create new instances. +// Important: zero-initialized instance is not valid for use. +// ${deprecated} +type ${structName} struct { + orig *${originName} +} + +func new${structName}(orig *${originName}) ${structName} { + return ${structName}{orig: orig} +} + +// New${structName} creates a new empty ${structName}. +// +// This must be used only in testing code since no "Set" method available. +func New${structName}() ${structName} { + return new${structName}(&${originName}{}) +}` + +const messageValueCopyToHeaderTemplate = `// CopyTo copies all properties from the current struct to the dest. +func (ms ${structName}) CopyTo(dest ${structName}) {` + +const messageValueCopyToFooterTemplate = `}` + +const messageValueTestTemplate = ` +func Test${structName}_CopyTo(t *testing.T) { + ms := New${structName}() + generateTest${structName}().CopyTo(ms) + assert.EqualValues(t, generateTest${structName}(), ms) +}` + +const messageValueGenerateTestTemplate = `func generateTest${structName}() ${structName} { + tv := New${structName}() + fillTest${structName}(tv) + return tv +}` + +const messageValueFillTestHeaderTemplate = `func fillTest${structName}(tv ${structName}) {` +const messageValueFillTestFooterTemplate = `}` + +const newLine = "\n" + +type baseStruct interface { + getName() string + + generateStruct(sb *strings.Builder) + + generateTests(sb *strings.Builder) + + generateTestValueHelpers(sb *strings.Builder) +} + +type messageValueStruct struct { + structName string + description string + originFullName string + deprecated string + fields []baseField +} + +func (ms *messageValueStruct) getName() string { + return ms.structName +} + +func (ms *messageValueStruct) generateStruct(sb *strings.Builder) { + sb.WriteString(os.Expand(messageValueTemplate, func(name string) string { + switch name { + case "structName": + return ms.structName + case "originName": + return ms.originFullName + case "description": + return ms.description + case "deprecated": + return ms.deprecated + default: + panic(name) + } + })) + // Write accessors for the struct + for _, f := range ms.fields { + sb.WriteString(newLine + newLine) + f.generateAccessors(ms, sb) + } + sb.WriteString(newLine + newLine) + sb.WriteString(os.Expand(messageValueCopyToHeaderTemplate, func(name string) string { + switch name { + case "structName": + return ms.structName + default: + panic(name) + } + })) + // Write accessors CopyTo for the struct + for _, f := range ms.fields { + sb.WriteString(newLine) + f.generateCopyToValue(sb) + } + sb.WriteString(newLine) + sb.WriteString(os.Expand(messageValueCopyToFooterTemplate, func(name string) string { + panic(name) + })) +} + +func (ms *messageValueStruct) generateTests(sb *strings.Builder) { + sb.WriteString(os.Expand(messageValueTestTemplate, func(name string) string { + switch name { + case "structName": + return ms.structName + default: + panic(name) + } + })) + // Write accessors tests for the struct + for _, f := range ms.fields { + sb.WriteString(newLine + newLine) + f.generateAccessorsTest(ms, sb) + } +} + +func (ms *messageValueStruct) generateTestValueHelpers(sb *strings.Builder) { + sb.WriteString(os.Expand(messageValueGenerateTestTemplate, func(name string) string { + switch name { + case "structName": + return ms.structName + case "originName": + return ms.originFullName + default: + panic(name) + } + })) + sb.WriteString(newLine + newLine) + sb.WriteString(os.Expand(messageValueFillTestHeaderTemplate, func(name string) string { + switch name { + case "structName": + return ms.structName + default: + panic(name) + } + })) + // Write accessors test value for the struct + for _, f := range ms.fields { + sb.WriteString(newLine) + f.generateSetWithTestValue(sb) + } + sb.WriteString(newLine) + sb.WriteString(os.Expand(messageValueFillTestFooterTemplate, func(name string) string { + panic(name) + })) +} + +var _ baseStruct = (*messageValueStruct)(nil) diff --git a/internal/otel_collector/cmd/pdatagen/internal/common_structs.go b/internal/otel_collector/cmd/pdatagen/internal/common_structs.go new file mode 100644 index 00000000000..c532a08ed65 --- /dev/null +++ b/internal/otel_collector/cmd/pdatagen/internal/common_structs.go @@ -0,0 +1,123 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +var commonFile = &File{ + Name: "common", + imports: []string{ + `otlpcommon "go.opentelemetry.io/collector/model/internal/data/protogen/common/v1"`, + }, + testImports: []string{ + `"testing"`, + ``, + `"github.com/stretchr/testify/assert"`, + ``, + `otlpcommon "go.opentelemetry.io/collector/model/internal/data/protogen/common/v1"`, + }, + structs: []baseStruct{ + instrumentationLibrary, + anyValueArray, + }, +} + +var instrumentationLibrary = &messageValueStruct{ + structName: "InstrumentationLibrary", + description: "// InstrumentationLibrary is a message representing the instrumentation library information.", + originFullName: "otlpcommon.InstrumentationLibrary", + fields: []baseField{ + nameField, + &primitiveField{ + fieldName: "Version", + originFieldName: "Version", + returnType: "string", + defaultVal: `""`, + testVal: `"test_version"`, + }, + }, +} + +// This will not be generated by this class. +// Defined here just to be available as returned message for the fields. +var attributeMap = &sliceOfPtrs{ + structName: "AttributeMap", + element: attributeKeyValue, +} + +var attributeKeyValue = &messageValueStruct{} + +var instrumentationLibraryField = &messageValueField{ + fieldName: "InstrumentationLibrary", + originFieldName: "InstrumentationLibrary", + returnMessage: instrumentationLibrary, +} + +var startTimeField = &primitiveTypedField{ + fieldName: "StartTimestamp", + originFieldName: "StartTimeUnixNano", + returnType: "Timestamp", + rawType: "uint64", + defaultVal: "Timestamp(0)", + testVal: "Timestamp(1234567890)", +} + +var timeField = &primitiveTypedField{ + fieldName: "Timestamp", + originFieldName: "TimeUnixNano", + returnType: "Timestamp", + rawType: "uint64", + defaultVal: "Timestamp(0)", + testVal: "Timestamp(1234567890)", +} + +var endTimeField = &primitiveTypedField{ + fieldName: "EndTimestamp", + originFieldName: "EndTimeUnixNano", + returnType: "Timestamp", + rawType: "uint64", + defaultVal: "Timestamp(0)", + testVal: "Timestamp(1234567890)", +} + +var attributes = &sliceField{ + fieldName: "Attributes", + originFieldName: "Attributes", + returnSlice: attributeMap, +} + +var nameField = &primitiveField{ + fieldName: "Name", + originFieldName: "Name", + returnType: "string", + defaultVal: `""`, + testVal: `"test_name"`, +} + +var anyValue = &messageValueStruct{ + structName: "AttributeValue", + originFullName: "otlpcommon.AnyValue", +} + +var anyValueArray = &sliceOfValues{ + structName: "AnyValueArray", + element: anyValue, +} + +var schemaURLField = &primitiveField{ + fieldName: "SchemaUrl", + originFieldName: "SchemaUrl", + returnType: "string", + defaultVal: `""`, + testVal: `"https://opentelemetry.io/schemas/1.5.0"`, +} diff --git a/internal/otel_collector/cmd/pdatagen/internal/files.go b/internal/otel_collector/cmd/pdatagen/internal/files.go new file mode 100644 index 00000000000..62b9b673c38 --- /dev/null +++ b/internal/otel_collector/cmd/pdatagen/internal/files.go @@ -0,0 +1,111 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import "strings" + +const header = `// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by "cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "go run cmd/pdatagen/main.go". + +package pdata` + +// AllFiles is a list of all files that needs to be generated. +var AllFiles = []*File{ + commonFile, + metricsFile, + resourceFile, + traceFile, + logFile, +} + +// File represents the struct for one generated file. +type File struct { + Name string + imports []string + testImports []string + // Can be any of sliceOfPtrs, sliceOfValues, messageValueStruct, or messagePtrStruct + structs []baseStruct +} + +// GenerateFile generates the configured data structures for this File. +func (f *File) GenerateFile() string { + var sb strings.Builder + + // Write headers + sb.WriteString(header) + sb.WriteString(newLine + newLine) + // Add imports + sb.WriteString("import (" + newLine) + for _, imp := range f.imports { + if imp != "" { + sb.WriteString("\t" + imp + newLine) + } else { + sb.WriteString(newLine) + } + } + sb.WriteString(")") + // Write all structs + for _, s := range f.structs { + sb.WriteString(newLine + newLine) + s.generateStruct(&sb) + } + sb.WriteString(newLine) + return sb.String() +} + +// GenerateTestFile generates tests for the configured data structures for this File. +func (f *File) GenerateTestFile() string { + var sb strings.Builder + + // Write headers + sb.WriteString(header) + sb.WriteString(newLine + newLine) + // Add imports + sb.WriteString("import (" + newLine) + for _, imp := range f.testImports { + if imp != "" { + sb.WriteString("\t" + imp + newLine) + } else { + sb.WriteString(newLine) + } + } + sb.WriteString(")") + // Write all tests + for _, s := range f.structs { + sb.WriteString(newLine + newLine) + s.generateTests(&sb) + } + // Write all tests generate value + for _, s := range f.structs { + sb.WriteString(newLine + newLine) + s.generateTestValueHelpers(&sb) + } + sb.WriteString(newLine) + return sb.String() +} diff --git a/internal/otel_collector/cmd/pdatagen/internal/log_structs.go b/internal/otel_collector/cmd/pdatagen/internal/log_structs.go new file mode 100644 index 00000000000..e22c837a3c9 --- /dev/null +++ b/internal/otel_collector/cmd/pdatagen/internal/log_structs.go @@ -0,0 +1,141 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +var logFile = &File{ + Name: "log", + imports: []string{ + `"sort"`, + ``, + `otlplogs "go.opentelemetry.io/collector/model/internal/data/protogen/logs/v1"`, + }, + testImports: []string{ + `"testing"`, + ``, + `"github.com/stretchr/testify/assert"`, + ``, + `otlplogs "go.opentelemetry.io/collector/model/internal/data/protogen/logs/v1"`, + }, + structs: []baseStruct{ + resourceLogsSlice, + resourceLogs, + instrumentationLibraryLogsSlice, + instrumentationLibraryLogs, + logSlice, + logRecord, + }, +} + +var resourceLogsSlice = &sliceOfPtrs{ + structName: "ResourceLogsSlice", + element: resourceLogs, +} + +var resourceLogs = &messageValueStruct{ + structName: "ResourceLogs", + description: "// ResourceLogs is a collection of logs from a Resource.", + originFullName: "otlplogs.ResourceLogs", + fields: []baseField{ + resourceField, + schemaURLField, + &sliceField{ + fieldName: "InstrumentationLibraryLogs", + originFieldName: "InstrumentationLibraryLogs", + returnSlice: instrumentationLibraryLogsSlice, + }, + }, +} + +var instrumentationLibraryLogsSlice = &sliceOfPtrs{ + structName: "InstrumentationLibraryLogsSlice", + element: instrumentationLibraryLogs, +} + +var instrumentationLibraryLogs = &messageValueStruct{ + structName: "InstrumentationLibraryLogs", + description: "// InstrumentationLibraryLogs is a collection of logs from a LibraryInstrumentation.", + originFullName: "otlplogs.InstrumentationLibraryLogs", + fields: []baseField{ + instrumentationLibraryField, + schemaURLField, + &sliceField{ + fieldName: "Logs", + originFieldName: "Logs", + returnSlice: logSlice, + }, + }, +} + +var logSlice = &sliceOfPtrs{ + structName: "LogSlice", + element: logRecord, +} + +var logRecord = &messageValueStruct{ + structName: "LogRecord", + description: "// LogRecord are experimental implementation of OpenTelemetry Log Data Model.\n", + originFullName: "otlplogs.LogRecord", + fields: []baseField{ + &primitiveTypedField{ + fieldName: "Timestamp", + originFieldName: "TimeUnixNano", + returnType: "Timestamp", + rawType: "uint64", + defaultVal: "Timestamp(0)", + testVal: "Timestamp(1234567890)", + }, + traceIDField, + spanIDField, + &primitiveTypedField{ + fieldName: "Flags", + originFieldName: "Flags", + returnType: "uint32", + rawType: "uint32", + defaultVal: `uint32(0)`, + testVal: `uint32(0x01)`, + }, + &primitiveField{ + fieldName: "SeverityText", + originFieldName: "SeverityText", + returnType: "string", + defaultVal: `""`, + testVal: `"INFO"`, + }, + &primitiveTypedField{ + fieldName: "SeverityNumber", + originFieldName: "SeverityNumber", + returnType: "SeverityNumber", + rawType: "otlplogs.SeverityNumber", + defaultVal: `SeverityNumberUNDEFINED`, + testVal: `SeverityNumberINFO`, + }, + &primitiveField{ + fieldName: "Name", + originFieldName: "Name", + returnType: "string", + defaultVal: `""`, + testVal: `"test_name"`, + }, + bodyField, + attributes, + droppedAttributesCount, + }, +} + +var bodyField = &messageValueField{ + fieldName: "Body", + originFieldName: "Body", + returnMessage: anyValue, +} diff --git a/internal/otel_collector/cmd/pdatagen/internal/metrics_structs.go b/internal/otel_collector/cmd/pdatagen/internal/metrics_structs.go new file mode 100644 index 00000000000..e1903ab14db --- /dev/null +++ b/internal/otel_collector/cmd/pdatagen/internal/metrics_structs.go @@ -0,0 +1,398 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +var metricsFile = &File{ + Name: "metrics", + imports: []string{ + `"sort"`, + ``, + `otlpmetrics "go.opentelemetry.io/collector/model/internal/data/protogen/metrics/v1"`, + }, + testImports: []string{ + `"testing"`, + ``, + `"github.com/stretchr/testify/assert"`, + ``, + `otlpmetrics "go.opentelemetry.io/collector/model/internal/data/protogen/metrics/v1"`, + }, + structs: []baseStruct{ + resourceMetricsSlice, + resourceMetrics, + instrumentationLibraryMetricsSlice, + instrumentationLibraryMetrics, + metricSlice, + metric, + doubleGauge, + doubleSum, + histogram, + summary, + numberDataPointSlice, + numberDataPoint, + histogramDataPointSlice, + histogramDataPoint, + summaryDataPointSlice, + summaryDataPoint, + quantileValuesSlice, + quantileValues, + exemplarSlice, + exemplar, + }, +} + +var resourceMetricsSlice = &sliceOfPtrs{ + structName: "ResourceMetricsSlice", + element: resourceMetrics, +} + +var resourceMetrics = &messageValueStruct{ + structName: "ResourceMetrics", + description: "// ResourceMetrics is a collection of metrics from a Resource.", + originFullName: "otlpmetrics.ResourceMetrics", + fields: []baseField{ + resourceField, + schemaURLField, + &sliceField{ + fieldName: "InstrumentationLibraryMetrics", + originFieldName: "InstrumentationLibraryMetrics", + returnSlice: instrumentationLibraryMetricsSlice, + }, + }, +} + +var instrumentationLibraryMetricsSlice = &sliceOfPtrs{ + structName: "InstrumentationLibraryMetricsSlice", + element: instrumentationLibraryMetrics, +} + +var instrumentationLibraryMetrics = &messageValueStruct{ + structName: "InstrumentationLibraryMetrics", + description: "// InstrumentationLibraryMetrics is a collection of metrics from a LibraryInstrumentation.", + originFullName: "otlpmetrics.InstrumentationLibraryMetrics", + fields: []baseField{ + instrumentationLibraryField, + schemaURLField, + &sliceField{ + fieldName: "Metrics", + originFieldName: "Metrics", + returnSlice: metricSlice, + }, + }, +} + +var metricSlice = &sliceOfPtrs{ + structName: "MetricSlice", + element: metric, +} + +var metric = &messageValueStruct{ + structName: "Metric", + description: "// Metric represents one metric as a collection of datapoints.\n" + + "// See Metric definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/metrics/v1/metrics.proto", + originFullName: "otlpmetrics.Metric", + fields: []baseField{ + nameField, + &primitiveField{ + fieldName: "Description", + originFieldName: "Description", + returnType: "string", + defaultVal: `""`, + testVal: `"test_description"`, + }, + &primitiveField{ + fieldName: "Unit", + originFieldName: "Unit", + returnType: "string", + defaultVal: `""`, + testVal: `"1"`, + }, + oneofDataField, + }, +} + +var doubleGauge = &messageValueStruct{ + structName: "Gauge", + description: "// Gauge represents the type of a double scalar metric that always exports the \"current value\" for every data point.", + originFullName: "otlpmetrics.Gauge", + fields: []baseField{ + &sliceField{ + fieldName: "DataPoints", + originFieldName: "DataPoints", + returnSlice: numberDataPointSlice, + }, + }, +} + +var doubleSum = &messageValueStruct{ + structName: "Sum", + description: "// Sum represents the type of a numeric double scalar metric that is calculated as a sum of all reported measurements over a time interval.", + originFullName: "otlpmetrics.Sum", + fields: []baseField{ + aggregationTemporalityField, + isMonotonicField, + &sliceField{ + fieldName: "DataPoints", + originFieldName: "DataPoints", + returnSlice: numberDataPointSlice, + }, + }, +} + +var histogram = &messageValueStruct{ + structName: "Histogram", + description: "// Histogram represents the type of a metric that is calculated by aggregating as a Histogram of all reported measurements over a time interval.", + originFullName: "otlpmetrics.Histogram", + fields: []baseField{ + aggregationTemporalityField, + &sliceField{ + fieldName: "DataPoints", + originFieldName: "DataPoints", + returnSlice: histogramDataPointSlice, + }, + }, +} + +var summary = &messageValueStruct{ + structName: "Summary", + description: "// Summary represents the type of a metric that is calculated by aggregating as a Summary of all reported double measurements over a time interval.", + originFullName: "otlpmetrics.Summary", + fields: []baseField{ + &sliceField{ + fieldName: "DataPoints", + originFieldName: "DataPoints", + returnSlice: summaryDataPointSlice, + }, + }, +} + +var numberDataPointSlice = &sliceOfPtrs{ + structName: "NumberDataPointSlice", + element: numberDataPoint, +} + +var numberDataPoint = &messageValueStruct{ + structName: "NumberDataPoint", + description: "// NumberDataPoint is a single data point in a timeseries that describes the time-varying value of a number metric.", + originFullName: "otlpmetrics.NumberDataPoint", + fields: []baseField{ + attributes, + startTimeField, + timeField, + &numberField{ + fields: []*oneOfPrimitiveValue{ + { + originFullName: "otlpmetrics.NumberDataPoint", + name: "DoubleVal", + originFieldName: "Value", + returnType: "float64", + defaultVal: "float64(0.0)", + testVal: "float64(17.13)", + fieldType: "Double", + }, + { + originFullName: "otlpmetrics.NumberDataPoint", + name: "IntVal", + originFieldName: "Value", + returnType: "int64", + defaultVal: "int64(0)", + testVal: "int64(17)", + fieldType: "Int", + }, + }, + }, + exemplarsField, + }, +} + +var histogramDataPointSlice = &sliceOfPtrs{ + structName: "HistogramDataPointSlice", + element: histogramDataPoint, +} + +var histogramDataPoint = &messageValueStruct{ + structName: "HistogramDataPoint", + description: "// HistogramDataPoint is a single data point in a timeseries that describes the time-varying values of a Histogram of values.", + originFullName: "otlpmetrics.HistogramDataPoint", + fields: []baseField{ + attributes, + startTimeField, + timeField, + countField, + doubleSumField, + bucketCountsField, + explicitBoundsField, + exemplarsField, + }, +} + +var summaryDataPointSlice = &sliceOfPtrs{ + structName: "SummaryDataPointSlice", + element: summaryDataPoint, +} + +var summaryDataPoint = &messageValueStruct{ + structName: "SummaryDataPoint", + description: "// SummaryDataPoint is a single data point in a timeseries that describes the time-varying values of a Summary of double values.", + originFullName: "otlpmetrics.SummaryDataPoint", + fields: []baseField{ + attributes, + startTimeField, + timeField, + countField, + doubleSumField, + &sliceField{ + fieldName: "QuantileValues", + originFieldName: "QuantileValues", + returnSlice: quantileValuesSlice, + }, + }, +} + +var quantileValuesSlice = &sliceOfPtrs{ + structName: "ValueAtQuantileSlice", + element: quantileValues, +} + +var quantileValues = &messageValueStruct{ + structName: "ValueAtQuantile", + description: "// ValueAtQuantile is a quantile value within a Summary data point.", + originFullName: "otlpmetrics.SummaryDataPoint_ValueAtQuantile", + fields: []baseField{ + quantileField, + valueFloat64Field, + }, +} + +var exemplarSlice = &sliceOfValues{ + structName: "ExemplarSlice", + element: exemplar, +} + +var exemplar = &messageValueStruct{ + structName: "Exemplar", + description: "// Exemplar is a sample input double measurement.\n//\n" + + "// Exemplars also hold information about the environment when the measurement was recorded,\n" + + "// for example the span and trace ID of the active span when the exemplar was recorded.", + + originFullName: "otlpmetrics.Exemplar", + fields: []baseField{ + timeField, + &numberField{ + fields: []*oneOfPrimitiveValue{ + { + originFullName: "otlpmetrics.Exemplar", + name: "DoubleVal", + originFieldName: "Value", + returnType: "float64", + defaultVal: "float64(0.0)", + testVal: "float64(17.13)", + fieldType: "Double", + }, + { + originFullName: "otlpmetrics.Exemplar", + name: "IntVal", + originFieldName: "Value", + returnType: "int64", + defaultVal: "int64(0)", + testVal: "int64(17)", + fieldType: "Int", + }, + }, + }, + &sliceField{ + fieldName: "FilteredAttributes", + originFieldName: "FilteredAttributes", + returnSlice: attributeMap, + }, + }, +} + +var exemplarsField = &sliceField{ + fieldName: "Exemplars", + originFieldName: "Exemplars", + returnSlice: exemplarSlice, +} + +var countField = &primitiveField{ + fieldName: "Count", + originFieldName: "Count", + returnType: "uint64", + defaultVal: "uint64(0)", + testVal: "uint64(17)", +} + +var doubleSumField = &primitiveField{ + fieldName: "Sum", + originFieldName: "Sum", + returnType: "float64", + defaultVal: "float64(0.0)", + testVal: "float64(17.13)", +} + +var valueFloat64Field = &primitiveField{ + fieldName: "Value", + originFieldName: "Value", + returnType: "float64", + defaultVal: "float64(0.0)", + testVal: "float64(17.13)", +} + +var bucketCountsField = &primitiveField{ + fieldName: "BucketCounts", + originFieldName: "BucketCounts", + returnType: "[]uint64", + defaultVal: "[]uint64(nil)", + testVal: "[]uint64{1, 2, 3}", +} + +var explicitBoundsField = &primitiveField{ + fieldName: "ExplicitBounds", + originFieldName: "ExplicitBounds", + returnType: "[]float64", + defaultVal: "[]float64(nil)", + testVal: "[]float64{1, 2, 3}", +} + +var quantileField = &primitiveField{ + fieldName: "Quantile", + originFieldName: "Quantile", + returnType: "float64", + defaultVal: "float64(0.0)", + testVal: "float64(17.13)", +} + +var isMonotonicField = &primitiveField{ + fieldName: "IsMonotonic", + originFieldName: "IsMonotonic", + returnType: "bool", + defaultVal: "false", + testVal: "true", +} + +var aggregationTemporalityField = &primitiveTypedField{ + fieldName: "AggregationTemporality", + originFieldName: "AggregationTemporality", + returnType: "AggregationTemporality", + rawType: "otlpmetrics.AggregationTemporality", + defaultVal: "AggregationTemporalityUnspecified", + testVal: "AggregationTemporalityCumulative", +} + +var oneofDataField = &oneofField{ + copyFuncName: "copyData", + originFieldName: "Data", + testVal: "&otlpmetrics.Metric_Gauge{Gauge: &otlpmetrics.Gauge{}}", + fillTestName: "Gauge", +} diff --git a/internal/otel_collector/cmd/pdatagen/internal/resource_structs.go b/internal/otel_collector/cmd/pdatagen/internal/resource_structs.go new file mode 100644 index 00000000000..e14c0f3b3e4 --- /dev/null +++ b/internal/otel_collector/cmd/pdatagen/internal/resource_structs.go @@ -0,0 +1,45 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +var resourceFile = &File{ + Name: "resource", + imports: []string{ + `otlpresource "go.opentelemetry.io/collector/model/internal/data/protogen/resource/v1"`, + }, + testImports: []string{ + `"testing"`, + ``, + `"github.com/stretchr/testify/assert"`, + }, + structs: []baseStruct{ + resource, + }, +} + +var resource = &messageValueStruct{ + structName: "Resource", + description: "// Resource is a message representing the resource information.", + originFullName: "otlpresource.Resource", + fields: []baseField{ + attributes, + }, +} + +var resourceField = &messageValueField{ + fieldName: "Resource", + originFieldName: "Resource", + returnMessage: resource, +} diff --git a/internal/otel_collector/cmd/pdatagen/internal/trace_structs.go b/internal/otel_collector/cmd/pdatagen/internal/trace_structs.go new file mode 100644 index 00000000000..0b234bea0b7 --- /dev/null +++ b/internal/otel_collector/cmd/pdatagen/internal/trace_structs.go @@ -0,0 +1,251 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +var traceFile = &File{ + Name: "trace", + imports: []string{ + `"sort"`, + ``, + `otlptrace "go.opentelemetry.io/collector/model/internal/data/protogen/trace/v1"`, + }, + testImports: []string{ + `"testing"`, + ``, + `"github.com/stretchr/testify/assert"`, + ``, + `otlptrace "go.opentelemetry.io/collector/model/internal/data/protogen/trace/v1"`, + }, + structs: []baseStruct{ + resourceSpansSlice, + resourceSpans, + instrumentationLibrarySpansSlice, + instrumentationLibrarySpans, + spanSlice, + span, + spanEventSlice, + spanEvent, + spanLinkSlice, + spanLink, + spanStatus, + }, +} + +var resourceSpansSlice = &sliceOfPtrs{ + structName: "ResourceSpansSlice", + element: resourceSpans, +} + +var resourceSpans = &messageValueStruct{ + structName: "ResourceSpans", + description: "// ResourceSpans is a collection of spans from a Resource.", + originFullName: "otlptrace.ResourceSpans", + fields: []baseField{ + resourceField, + schemaURLField, + &sliceField{ + fieldName: "InstrumentationLibrarySpans", + originFieldName: "InstrumentationLibrarySpans", + returnSlice: instrumentationLibrarySpansSlice, + }, + }, +} + +var instrumentationLibrarySpansSlice = &sliceOfPtrs{ + structName: "InstrumentationLibrarySpansSlice", + element: instrumentationLibrarySpans, +} + +var instrumentationLibrarySpans = &messageValueStruct{ + structName: "InstrumentationLibrarySpans", + description: "// InstrumentationLibrarySpans is a collection of spans from a LibraryInstrumentation.", + originFullName: "otlptrace.InstrumentationLibrarySpans", + fields: []baseField{ + instrumentationLibraryField, + schemaURLField, + &sliceField{ + fieldName: "Spans", + originFieldName: "Spans", + returnSlice: spanSlice, + }, + }, +} + +var spanSlice = &sliceOfPtrs{ + structName: "SpanSlice", + element: span, +} + +var span = &messageValueStruct{ + structName: "Span", + description: "// Span represents a single operation within a trace.\n" + + "// See Span definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto", + originFullName: "otlptrace.Span", + fields: []baseField{ + traceIDField, + spanIDField, + traceStateField, + parentSpanIDField, + nameField, + &primitiveTypedField{ + fieldName: "Kind", + originFieldName: "Kind", + returnType: "SpanKind", + rawType: "otlptrace.Span_SpanKind", + defaultVal: "SpanKindUnspecified", + testVal: "SpanKindServer", + }, + startTimeField, + endTimeField, + attributes, + droppedAttributesCount, + &sliceField{ + fieldName: "Events", + originFieldName: "Events", + returnSlice: spanEventSlice, + }, + &primitiveField{ + fieldName: "DroppedEventsCount", + originFieldName: "DroppedEventsCount", + returnType: "uint32", + defaultVal: "uint32(0)", + testVal: "uint32(17)", + }, + &sliceField{ + fieldName: "Links", + originFieldName: "Links", + returnSlice: spanLinkSlice, + }, + &primitiveField{ + fieldName: "DroppedLinksCount", + originFieldName: "DroppedLinksCount", + returnType: "uint32", + defaultVal: "uint32(0)", + testVal: "uint32(17)", + }, + &messageValueField{ + fieldName: "Status", + originFieldName: "Status", + returnMessage: spanStatus, + }, + }, +} + +var spanEventSlice = &sliceOfPtrs{ + structName: "SpanEventSlice", + element: spanEvent, +} + +var spanEvent = &messageValueStruct{ + structName: "SpanEvent", + description: "// SpanEvent is a time-stamped annotation of the span, consisting of user-supplied\n" + + "// text description and key-value pairs. See OTLP for event definition.", + originFullName: "otlptrace.Span_Event", + fields: []baseField{ + timeField, + nameField, + attributes, + droppedAttributesCount, + }, +} + +var spanLinkSlice = &sliceOfPtrs{ + structName: "SpanLinkSlice", + element: spanLink, +} + +var spanLink = &messageValueStruct{ + structName: "SpanLink", + description: "// SpanLink is a pointer from the current span to another span in the same trace or in a\n" + + "// different trace.\n" + + "// See Link definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto", + originFullName: "otlptrace.Span_Link", + fields: []baseField{ + traceIDField, + spanIDField, + traceStateField, + attributes, + droppedAttributesCount, + }, +} + +var spanStatus = &messageValueStruct{ + structName: "SpanStatus", + description: "// SpanStatus is an optional final status for this span. Semantically, when Status was not\n" + + "// set, that means the span ended without errors and to assume Status.Ok (code = 0).", + originFullName: "otlptrace.Status", + fields: []baseField{ + &primitiveTypedField{ + fieldName: "Code", + originFieldName: "Code", + returnType: "StatusCode", + rawType: "otlptrace.Status_StatusCode", + defaultVal: "StatusCode(0)", + testVal: "StatusCode(1)", + // Generate code without setter. Setter will be manually coded since we + // need to also change DeprecatedCode when Code is changed according + // to OTLP spec https://github.com/open-telemetry/opentelemetry-proto/blob/59c488bfb8fb6d0458ad6425758b70259ff4a2bd/opentelemetry/proto/trace/v1/trace.proto#L231 + manualSetter: true, + }, + &primitiveField{ + fieldName: "Message", + originFieldName: "Message", + returnType: "string", + defaultVal: `""`, + testVal: `"cancelled"`, + }, + }, +} + +var traceIDField = &primitiveStructField{ + fieldName: "TraceID", + originFieldName: "TraceId", + returnType: "TraceID", + defaultVal: "NewTraceID([16]byte{})", + testVal: "NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})", +} + +var spanIDField = &primitiveStructField{ + fieldName: "SpanID", + originFieldName: "SpanId", + returnType: "SpanID", + defaultVal: "NewSpanID([8]byte{})", + testVal: "NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})", +} + +var parentSpanIDField = &primitiveStructField{ + fieldName: "ParentSpanID", + originFieldName: "ParentSpanId", + returnType: "SpanID", + defaultVal: "NewSpanID([8]byte{})", + testVal: "NewSpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1})", +} + +var traceStateField = &primitiveTypedField{ + fieldName: "TraceState", + originFieldName: "TraceState", + returnType: "TraceState", + rawType: "string", + defaultVal: `TraceState("")`, + testVal: `TraceState("congo=congos")`, +} + +var droppedAttributesCount = &primitiveField{ + fieldName: "DroppedAttributesCount", + originFieldName: "DroppedAttributesCount", + returnType: "uint32", + defaultVal: "uint32(0)", + testVal: "uint32(17)", +} diff --git a/internal/otel_collector/cmd/pdatagen/main.go b/internal/otel_collector/cmd/pdatagen/main.go new file mode 100644 index 00000000000..3b4101a7426 --- /dev/null +++ b/internal/otel_collector/cmd/pdatagen/main.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "os" + + "go.opentelemetry.io/collector/cmd/pdatagen/internal" +) + +func check(e error) { + if e != nil { + panic(e) + } +} + +func main() { + for _, fp := range internal.AllFiles { + f, err := os.Create("./model/pdata/generated_" + fp.Name + ".go") + check(err) + _, err = f.WriteString(fp.GenerateFile()) + check(err) + check(f.Close()) + f, err = os.Create("./model/pdata/generated_" + fp.Name + "_test.go") + check(err) + _, err = f.WriteString(fp.GenerateTestFile()) + check(err) + check(f.Close()) + } +} diff --git a/internal/otel_collector/component/build_info.go b/internal/otel_collector/component/build_info.go new file mode 100644 index 00000000000..1f2257ba981 --- /dev/null +++ b/internal/otel_collector/component/build_info.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package component + +// BuildInfo is the information that is logged at the application start and +// passed into each component. This information can be overridden in custom builds. +type BuildInfo struct { + // Executable file name, e.g. "otelcol". + Command string + + // Full name of the collector, e.g. "OpenTelemetry Collector". + Description string + + // Version string. + Version string +} + +// DefaultBuildInfo returns the default BuildInfo. +func DefaultBuildInfo() BuildInfo { + return BuildInfo{ + Command: "otelcol", + Description: "OpenTelemetry Collector", + Version: "latest", + } +} diff --git a/internal/otel_collector/component/component.go b/internal/otel_collector/component/component.go new file mode 100644 index 00000000000..868d9d26c03 --- /dev/null +++ b/internal/otel_collector/component/component.go @@ -0,0 +1,78 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package component + +import ( + "context" + + "go.opentelemetry.io/collector/config" +) + +// Component is either a receiver, exporter, processor, or an extension. +// +// A component's lifecycle has the following phases: +// +// 1. Creation: The component is created using its respective factory, via a Create* call. +// 2. Start: The component's Start method is called. +// 3. Running: The component is up and running. +// 4. Shutdown: The component's Shutdown method is called and the lifecycle is complete. +// +// Once the lifecycle is complete it may be repeated, in which case a new component +// is created, starts, runs and is shutdown again. +type Component interface { + // Start tells the component to start. Host parameter can be used for communicating + // with the host after Start() has already returned. If an error is returned by + // Start() then the collector startup will be aborted. + // If this is an exporter component it may prepare for exporting + // by connecting to the endpoint. + // + // If the component needs to perform a long-running starting operation then it is recommended + // that Start() returns quickly and the long-running operation is performed in background. + // In that case make sure that the long-running operation does not use the context passed + // to Start() function since that context will be cancelled soon and can abort the long-running + // operation. Create a new context from the context.Background() for long-running operations. + Start(ctx context.Context, host Host) error + + // Shutdown is invoked during service shutdown. After Shutdown() is called, if the component + // accepted data in any way, it should not accept it anymore. + // + // If there are any background operations running by the component they must be aborted before + // this function returns. Remember that if you started any long-running background operations from + // the Start() method, those operations must be also cancelled. If there are any buffers in the + // component, they should be cleared and the data sent immediately to the next component. + // + // The component's lifecycle is completed once the Shutdown() method returns. No other + // methods of the component are called after that. If necessary a new component with + // the same or different configuration may be created and started (this may happen + // for example if we want to restart the component). + Shutdown(ctx context.Context) error +} + +// Kind represents component kinds. +type Kind int + +const ( + _ Kind = iota // skip 0, start types from 1. + KindReceiver + KindProcessor + KindExporter + KindExtension +) + +// Factory is implemented by all component factories. +type Factory interface { + // Type gets the type of the component created by this factory. + Type() config.Type +} diff --git a/internal/otel_collector/component/componenterror/doc.go b/internal/otel_collector/component/componenterror/doc.go new file mode 100644 index 00000000000..1b57560cbae --- /dev/null +++ b/internal/otel_collector/component/componenterror/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package componenterror provides helper functions to create and process +// OpenTelemetry component errors. +package componenterror diff --git a/internal/otel_collector/component/componenterror/errors.go b/internal/otel_collector/component/componenterror/errors.go new file mode 100644 index 00000000000..e165202406f --- /dev/null +++ b/internal/otel_collector/component/componenterror/errors.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package componenterror + +import ( + "errors" +) + +var ( + // ErrNilNextConsumer indicates an error on nil next consumer. + ErrNilNextConsumer = errors.New("nil nextConsumer") + + // ErrDataTypeIsNotSupported can be returned by receiver, exporter or processor + // factory methods that create the entity if the particular telemetry + // data type is not supported by the receiver, exporter or processor. + ErrDataTypeIsNotSupported = errors.New("telemetry type is not supported") +) diff --git a/internal/otel_collector/component/componenthelper/component.go b/internal/otel_collector/component/componenthelper/component.go new file mode 100644 index 00000000000..ee41c880e4a --- /dev/null +++ b/internal/otel_collector/component/componenthelper/component.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package componenthelper + +import ( + "context" + + "go.opentelemetry.io/collector/component" +) + +// StartFunc specifies the function invoked when the component.Component is being started. +type StartFunc func(context.Context, component.Host) error + +// Start starts the component. +func (f StartFunc) Start(ctx context.Context, host component.Host) error { + return f(ctx, host) +} + +// ShutdownFunc specifies the function invoked when the component.Component is being shutdown. +type ShutdownFunc func(context.Context) error + +// Shutdown shuts down the component. +func (f ShutdownFunc) Shutdown(ctx context.Context) error { + return f(ctx) +} + +// Option represents the possible options for New. +type Option func(*baseComponent) + +// WithStart overrides the default `Start` function for a component.Component. +// The default always returns nil. +func WithStart(startFunc StartFunc) Option { + return func(o *baseComponent) { + o.StartFunc = startFunc + } +} + +// WithShutdown overrides the default `Shutdown` function for a component.Component. +// The default always returns nil. +func WithShutdown(shutdownFunc ShutdownFunc) Option { + return func(o *baseComponent) { + o.ShutdownFunc = shutdownFunc + } +} + +type baseComponent struct { + StartFunc + ShutdownFunc +} + +// New returns a component.Component configured with the provided options. +func New(options ...Option) component.Component { + bc := &baseComponent{ + StartFunc: func(ctx context.Context, host component.Host) error { return nil }, + ShutdownFunc: func(ctx context.Context) error { return nil }, + } + + for _, op := range options { + op(bc) + } + + return bc +} diff --git a/internal/otel_collector/component/componenthelper/doc.go b/internal/otel_collector/component/componenthelper/doc.go new file mode 100644 index 00000000000..bede10d8be7 --- /dev/null +++ b/internal/otel_collector/component/componenthelper/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package componenthelper assists with the creation of a new component.Component. +package componenthelper diff --git a/internal/otel_collector/component/componenttest/doc.go b/internal/otel_collector/component/componenttest/doc.go new file mode 100644 index 00000000000..1762446116c --- /dev/null +++ b/internal/otel_collector/component/componenttest/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package componenttest define types and functions used to help test packages +// implementing the component package interfaces. +package componenttest diff --git a/internal/otel_collector/component/componenttest/nop_exporter.go b/internal/otel_collector/component/componenttest/nop_exporter.go new file mode 100644 index 00000000000..2a474e6b02f --- /dev/null +++ b/internal/otel_collector/component/componenttest/nop_exporter.go @@ -0,0 +1,100 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package componenttest + +import ( + "context" + + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenthelper" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer/consumertest" +) + +// NewNopExporterCreateSettings returns a new nop settings for Create*Exporter functions. +func NewNopExporterCreateSettings() component.ExporterCreateSettings { + return component.ExporterCreateSettings{ + Logger: zap.NewNop(), + TracerProvider: trace.NewNoopTracerProvider(), + BuildInfo: component.DefaultBuildInfo(), + } +} + +type nopExporterConfig struct { + config.ExporterSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct +} + +// nopExporterFactory is factory for nopExporter. +type nopExporterFactory struct{} + +var nopExporterFactoryInstance = &nopExporterFactory{} + +// NewNopExporterFactory returns a component.ExporterFactory that constructs nop exporters. +func NewNopExporterFactory() component.ExporterFactory { + return nopExporterFactoryInstance +} + +// Type gets the type of the Exporter config created by this factory. +func (f *nopExporterFactory) Type() config.Type { + return "nop" +} + +// CreateDefaultConfig creates the default configuration for the Exporter. +func (f *nopExporterFactory) CreateDefaultConfig() config.Exporter { + return &nopExporterConfig{ + ExporterSettings: config.NewExporterSettings(config.NewID("nop")), + } +} + +// CreateTracesExporter implements component.ExporterFactory interface. +func (f *nopExporterFactory) CreateTracesExporter( + _ context.Context, + _ component.ExporterCreateSettings, + _ config.Exporter, +) (component.TracesExporter, error) { + return nopExporterInstance, nil +} + +// CreateMetricsExporter implements component.ExporterFactory interface. +func (f *nopExporterFactory) CreateMetricsExporter( + _ context.Context, + _ component.ExporterCreateSettings, + _ config.Exporter, +) (component.MetricsExporter, error) { + return nopExporterInstance, nil +} + +// CreateLogsExporter implements component.ExporterFactory interface. +func (f *nopExporterFactory) CreateLogsExporter( + _ context.Context, + _ component.ExporterCreateSettings, + _ config.Exporter, +) (component.LogsExporter, error) { + return nopExporterInstance, nil +} + +var nopExporterInstance = &nopExporter{ + Component: componenthelper.New(), + Consumer: consumertest.NewNop(), +} + +// nopExporter stores consumed traces and metrics for testing purposes. +type nopExporter struct { + component.Component + consumertest.Consumer +} diff --git a/internal/otel_collector/component/componenttest/nop_extension.go b/internal/otel_collector/component/componenttest/nop_extension.go new file mode 100644 index 00000000000..222cb940b91 --- /dev/null +++ b/internal/otel_collector/component/componenttest/nop_extension.go @@ -0,0 +1,79 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package componenttest + +import ( + "context" + + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenthelper" + "go.opentelemetry.io/collector/config" +) + +// NewNopExtensionCreateSettings returns a new nop settings for Create*Extension functions. +func NewNopExtensionCreateSettings() component.ExtensionCreateSettings { + return component.ExtensionCreateSettings{ + Logger: zap.NewNop(), + TracerProvider: trace.NewNoopTracerProvider(), + BuildInfo: component.DefaultBuildInfo(), + } +} + +type nopExtensionConfig struct { + config.ExtensionSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct +} + +// nopExtensionFactory is factory for nopExtension. +type nopExtensionFactory struct{} + +var nopExtensionFactoryInstance = &nopExtensionFactory{} + +// NewNopExtensionFactory returns a component.ExtensionFactory that constructs nop extensions. +func NewNopExtensionFactory() component.ExtensionFactory { + return nopExtensionFactoryInstance +} + +// Type gets the type of the Extension config created by this factory. +func (f *nopExtensionFactory) Type() config.Type { + return "nop" +} + +// CreateDefaultConfig creates the default configuration for the Extension. +func (f *nopExtensionFactory) CreateDefaultConfig() config.Extension { + return &nopExtensionConfig{ + ExtensionSettings: config.NewExtensionSettings(config.NewID("nop")), + } +} + +// CreateExtension implements component.ExtensionFactory interface. +func (f *nopExtensionFactory) CreateExtension( + _ context.Context, + _ component.ExtensionCreateSettings, + _ config.Extension, +) (component.Extension, error) { + return nopExtensionInstance, nil +} + +var nopExtensionInstance = &nopExtension{ + Component: componenthelper.New(), +} + +// nopExtension stores consumed traces and metrics for testing purposes. +type nopExtension struct { + component.Component +} diff --git a/internal/otel_collector/component/componenttest/nop_factories.go b/internal/otel_collector/component/componenttest/nop_factories.go new file mode 100644 index 00000000000..ad3813f090f --- /dev/null +++ b/internal/otel_collector/component/componenttest/nop_factories.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package componenttest + +import ( + "go.opentelemetry.io/collector/component" +) + +// NopFactories returns a component.Factories with all nop factories. +func NopFactories() (component.Factories, error) { + var factories component.Factories + var err error + + if factories.Extensions, err = component.MakeExtensionFactoryMap(NewNopExtensionFactory()); err != nil { + return component.Factories{}, err + } + + if factories.Receivers, err = component.MakeReceiverFactoryMap(NewNopReceiverFactory()); err != nil { + return component.Factories{}, err + } + + if factories.Exporters, err = component.MakeExporterFactoryMap(NewNopExporterFactory()); err != nil { + return component.Factories{}, err + } + + if factories.Processors, err = component.MakeProcessorFactoryMap(NewNopProcessorFactory()); err != nil { + return component.Factories{}, err + } + + return factories, err +} diff --git a/internal/otel_collector/component/componenttest/nop_host.go b/internal/otel_collector/component/componenttest/nop_host.go new file mode 100644 index 00000000000..0baee4726b9 --- /dev/null +++ b/internal/otel_collector/component/componenttest/nop_host.go @@ -0,0 +1,44 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package componenttest + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" +) + +// nopHost mocks a receiver.ReceiverHost for test purposes. +type nopHost struct{} + +var nopHostInstance component.Host = &nopHost{} + +// NewNopHost returns a new instance of nopHost with proper defaults for most tests. +func NewNopHost() component.Host { + return nopHostInstance +} + +func (nh *nopHost) ReportFatalError(_ error) {} + +func (nh *nopHost) GetFactory(_ component.Kind, _ config.Type) component.Factory { + return nil +} + +func (nh *nopHost) GetExtensions() map[config.ComponentID]component.Extension { + return nil +} + +func (nh *nopHost) GetExporters() map[config.DataType]map[config.ComponentID]component.Exporter { + return nil +} diff --git a/internal/otel_collector/component/componenttest/nop_processor.go b/internal/otel_collector/component/componenttest/nop_processor.go new file mode 100644 index 00000000000..0e7fd1b5f07 --- /dev/null +++ b/internal/otel_collector/component/componenttest/nop_processor.go @@ -0,0 +1,106 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package componenttest + +import ( + "context" + + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenthelper" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" +) + +// NewNopProcessorCreateSettings returns a new nop settings for Create*Processor functions. +func NewNopProcessorCreateSettings() component.ProcessorCreateSettings { + return component.ProcessorCreateSettings{ + Logger: zap.NewNop(), + TracerProvider: trace.NewNoopTracerProvider(), + BuildInfo: component.DefaultBuildInfo(), + } +} + +type nopProcessorConfig struct { + config.ProcessorSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct +} + +// nopProcessorFactory is factory for nopProcessor. +type nopProcessorFactory struct { + component.BaseProcessorFactory +} + +var nopProcessorFactoryInstance = &nopProcessorFactory{} + +// NewNopProcessorFactory returns a component.ProcessorFactory that constructs nop processors. +func NewNopProcessorFactory() component.ProcessorFactory { + return nopProcessorFactoryInstance +} + +// Type gets the type of the Processor config created by this factory. +func (f *nopProcessorFactory) Type() config.Type { + return "nop" +} + +// CreateDefaultConfig creates the default configuration for the Processor. +func (f *nopProcessorFactory) CreateDefaultConfig() config.Processor { + return &nopProcessorConfig{ + ProcessorSettings: config.NewProcessorSettings(config.NewID("nop")), + } +} + +// CreateTracesProcessor implements component.ProcessorFactory interface. +func (f *nopProcessorFactory) CreateTracesProcessor( + _ context.Context, + _ component.ProcessorCreateSettings, + _ config.Processor, + _ consumer.Traces, +) (component.TracesProcessor, error) { + return nopProcessorInstance, nil +} + +// CreateMetricsProcessor implements component.ProcessorFactory interface. +func (f *nopProcessorFactory) CreateMetricsProcessor( + _ context.Context, + _ component.ProcessorCreateSettings, + _ config.Processor, + _ consumer.Metrics, +) (component.MetricsProcessor, error) { + return nopProcessorInstance, nil +} + +// CreateLogsProcessor implements component.ProcessorFactory interface. +func (f *nopProcessorFactory) CreateLogsProcessor( + _ context.Context, + _ component.ProcessorCreateSettings, + _ config.Processor, + _ consumer.Logs, +) (component.LogsProcessor, error) { + return nopProcessorInstance, nil +} + +var nopProcessorInstance = &nopProcessor{ + Component: componenthelper.New(), + Consumer: consumertest.NewNop(), +} + +// nopProcessor stores consumed traces and metrics for testing purposes. +type nopProcessor struct { + component.Component + consumertest.Consumer +} diff --git a/internal/otel_collector/component/componenttest/nop_receiver.go b/internal/otel_collector/component/componenttest/nop_receiver.go new file mode 100644 index 00000000000..1f449e1ba0e --- /dev/null +++ b/internal/otel_collector/component/componenttest/nop_receiver.go @@ -0,0 +1,101 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package componenttest + +import ( + "context" + + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenthelper" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" +) + +// NewNopReceiverCreateSettings returns a new nop settings for Create*Receiver functions. +func NewNopReceiverCreateSettings() component.ReceiverCreateSettings { + return component.ReceiverCreateSettings{ + Logger: zap.NewNop(), + TracerProvider: trace.NewNoopTracerProvider(), + BuildInfo: component.DefaultBuildInfo(), + } +} + +type nopReceiverConfig struct { + config.ReceiverSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct +} + +// nopReceiverFactory is factory for nopReceiver. +type nopReceiverFactory struct{} + +var nopReceiverFactoryInstance = &nopReceiverFactory{} + +// NewNopReceiverFactory returns a component.ReceiverFactory that constructs nop receivers. +func NewNopReceiverFactory() component.ReceiverFactory { + return nopReceiverFactoryInstance +} + +// Type gets the type of the Receiver config created by this factory. +func (f *nopReceiverFactory) Type() config.Type { + return config.NewID("nop").Type() +} + +// CreateDefaultConfig creates the default configuration for the Receiver. +func (f *nopReceiverFactory) CreateDefaultConfig() config.Receiver { + return &nopReceiverConfig{ + ReceiverSettings: config.NewReceiverSettings(config.NewID("nop")), + } +} + +// CreateTracesReceiver implements component.ReceiverFactory interface. +func (f *nopReceiverFactory) CreateTracesReceiver( + _ context.Context, + _ component.ReceiverCreateSettings, + _ config.Receiver, + _ consumer.Traces, +) (component.TracesReceiver, error) { + return nopReceiverInstance, nil +} + +// CreateMetricsReceiver implements component.ReceiverFactory interface. +func (f *nopReceiverFactory) CreateMetricsReceiver( + _ context.Context, + _ component.ReceiverCreateSettings, + _ config.Receiver, + _ consumer.Metrics, +) (component.MetricsReceiver, error) { + return nopReceiverInstance, nil +} + +// CreateLogsReceiver implements component.ReceiverFactory interface. +func (f *nopReceiverFactory) CreateLogsReceiver( + _ context.Context, + _ component.ReceiverCreateSettings, + _ config.Receiver, + _ consumer.Logs, +) (component.LogsReceiver, error) { + return nopReceiverInstance, nil +} + +var nopReceiverInstance = &nopReceiver{ + Component: componenthelper.New(), +} + +// nopReceiver stores consumed traces and metrics for testing purposes. +type nopReceiver struct { + component.Component +} diff --git a/internal/otel_collector/component/componenttest/shutdown_verifier.go b/internal/otel_collector/component/componenttest/shutdown_verifier.go new file mode 100644 index 00000000000..1c26575e114 --- /dev/null +++ b/internal/otel_collector/component/componenttest/shutdown_verifier.go @@ -0,0 +1,69 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package componenttest + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/internal/testdata" +) + +func verifyTracesProcessorDoesntProduceAfterShutdown(t *testing.T, factory component.ProcessorFactory, cfg config.Processor) { + // Create a processor and output its produce to a sink. + nextSink := new(consumertest.TracesSink) + processor, err := factory.CreateTracesProcessor( + context.Background(), + NewNopProcessorCreateSettings(), + cfg, + nextSink, + ) + if err != nil { + if err == componenterror.ErrDataTypeIsNotSupported { + return + } + require.NoError(t, err) + } + err = processor.Start(context.Background(), NewNopHost()) + assert.NoError(t, err) + + // Send some traces to the processor. + const generatedCount = 10 + for i := 0; i < generatedCount; i++ { + require.NoError(t, processor.ConsumeTraces(context.Background(), testdata.GenerateTracesOneSpan())) + } + + // Now shutdown the processor. + err = processor.Shutdown(context.Background()) + assert.NoError(t, err) + + // The Shutdown() is done. It means the processor must have sent everything we + // gave it to the next sink. + assert.EqualValues(t, generatedCount, nextSink.SpanCount()) +} + +// VerifyProcessorShutdown verifies the processor doesn't produce telemetry data after shutdown. +func VerifyProcessorShutdown(t *testing.T, factory component.ProcessorFactory, cfg config.Processor) { + verifyTracesProcessorDoesntProduceAfterShutdown(t, factory, cfg) + // TODO: add metrics and logs verification. + // TODO: add other shutdown verifications. +} diff --git a/internal/otel_collector/component/componenttest/testdata/invalid_go.txt b/internal/otel_collector/component/componenttest/testdata/invalid_go.txt new file mode 100644 index 00000000000..50ecec8f181 --- /dev/null +++ b/internal/otel_collector/component/componenttest/testdata/invalid_go.txt @@ -0,0 +1,6 @@ +package testdata + + +import ( + "import +) \ No newline at end of file diff --git a/internal/otel_collector/component/componenttest/testdata/valid_go.txt b/internal/otel_collector/component/componenttest/testdata/valid_go.txt new file mode 100644 index 00000000000..2c88748717d --- /dev/null +++ b/internal/otel_collector/component/componenttest/testdata/valid_go.txt @@ -0,0 +1,9 @@ +package testdata + + +import ( + "go.opentelemetry.io/collector/exporter/exporter1" +) + +func main() { +} \ No newline at end of file diff --git a/internal/otel_collector/component/doc.go b/internal/otel_collector/component/doc.go new file mode 100644 index 00000000000..73f438ff0ee --- /dev/null +++ b/internal/otel_collector/component/doc.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package component outlines the components used in the collector +// and provides a foundation for the component’s creation and +// termination process. A component can be either a receiver, exporter, +// processor, or an extension. +package component diff --git a/internal/otel_collector/component/experimental/component/doc.go b/internal/otel_collector/component/experimental/component/doc.go new file mode 100644 index 00000000000..1041d4bfbb6 --- /dev/null +++ b/internal/otel_collector/component/experimental/component/doc.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package component under config/experimental contains types and interfaces +// that typically live under the "go.opentelemetry.io/collector/component" +// package but aren't stable yet to be published there. +// ATTENTION: the package is still experimental and subject to changes without advanced notice. +package component diff --git a/internal/otel_collector/component/experimental/component/factory.go b/internal/otel_collector/component/experimental/component/factory.go new file mode 100644 index 00000000000..03c0d49d937 --- /dev/null +++ b/internal/otel_collector/component/experimental/component/factory.go @@ -0,0 +1,60 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package component + +import ( + "context" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + stableconfig "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/experimental/config" + "go.opentelemetry.io/collector/config/experimental/configsource" +) + +// ConfigSourceCreateSettings is passed to ConfigSourceFactory.CreateConfigSource function. +type ConfigSourceCreateSettings struct { + // Logger that the factory can use during creation and can pass to the created + // Source to be used later as well. + Logger *zap.Logger + + // BuildInfo can be used to retrieve data according to version, etc. + BuildInfo component.BuildInfo +} + +// ConfigSourceFactory is a factory interface for configuration sources. +type ConfigSourceFactory interface { + component.Factory + + // CreateDefaultConfig creates the default configuration settings for the Source. + // This method can be called multiple times depending on the pipeline + // configuration and should not cause side-effects that prevent the creation + // of multiple instances of the Source. + // The object returned by this method needs to pass the checks implemented by + // 'configcheck.ValidateConfig'. It is recommended to have such check in the + // tests of any implementation of the ConfigSourceFactory interface. + CreateDefaultConfig() config.Source + + // CreateConfigSource creates a configuration source based on the given config. + CreateConfigSource( + ctx context.Context, + set ConfigSourceCreateSettings, + cfg config.Source, + ) (configsource.ConfigSource, error) +} + +// ConfigSourceFactories maps the type of a ConfigSource to the respective factory object. +type ConfigSourceFactories map[stableconfig.Type]ConfigSourceFactory diff --git a/internal/otel_collector/component/exporter.go b/internal/otel_collector/component/exporter.go new file mode 100644 index 00000000000..2e469350082 --- /dev/null +++ b/internal/otel_collector/component/exporter.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package component + +import ( + "context" + + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" +) + +// Exporter exports telemetry data from the collector to a destination. +type Exporter interface { + Component +} + +// TracesExporter is an Exporter that can consume traces. +type TracesExporter interface { + Exporter + consumer.Traces +} + +// MetricsExporter is an Exporter that can consume metrics. +type MetricsExporter interface { + Exporter + consumer.Metrics +} + +// LogsExporter is an Exporter that can consume logs. +type LogsExporter interface { + Exporter + consumer.Logs +} + +// ExporterCreateSettings configures Exporter creators. +type ExporterCreateSettings struct { + // Logger that the factory can use during creation and can pass to the created + // component to be used later as well. + Logger *zap.Logger + + // TracerProvider that the factory can pass to other instrumented third-party libraries. + TracerProvider trace.TracerProvider + + // BuildInfo can be used by components for informational purposes + BuildInfo BuildInfo +} + +// ExporterFactory can create MetricsExporter, TracesExporter and +// LogsExporter. This is the new preferred factory type to create exporters. +type ExporterFactory interface { + Factory + + // CreateDefaultConfig creates the default configuration for the Exporter. + // This method can be called multiple times depending on the pipeline + // configuration and should not cause side-effects that prevent the creation + // of multiple instances of the Exporter. + // The object returned by this method needs to pass the checks implemented by + // 'configcheck.ValidateConfig'. It is recommended to have these checks in the + // tests of any implementation of the Factory interface. + CreateDefaultConfig() config.Exporter + + // CreateTracesExporter creates a trace exporter based on this config. + // If the exporter type does not support tracing or if the config is not valid, + // an error will be returned instead. + CreateTracesExporter(ctx context.Context, set ExporterCreateSettings, + cfg config.Exporter) (TracesExporter, error) + + // CreateMetricsExporter creates a metrics exporter based on this config. + // If the exporter type does not support metrics or if the config is not valid, + // an error will be returned instead. + CreateMetricsExporter(ctx context.Context, set ExporterCreateSettings, + cfg config.Exporter) (MetricsExporter, error) + + // CreateLogsExporter creates an exporter based on the config. + // If the exporter type does not support logs or if the config is not valid, + // an error will be returned instead. + CreateLogsExporter(ctx context.Context, set ExporterCreateSettings, + cfg config.Exporter) (LogsExporter, error) +} diff --git a/internal/otel_collector/component/extension.go b/internal/otel_collector/component/extension.go new file mode 100644 index 00000000000..9f865c7f966 --- /dev/null +++ b/internal/otel_collector/component/extension.go @@ -0,0 +1,78 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package component + +import ( + "context" + + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/config" +) + +// Extension is the interface for objects hosted by the OpenTelemetry Collector that +// don't participate directly on data pipelines but provide some functionality +// to the service, examples: health check endpoint, z-pages, etc. +type Extension interface { + Component +} + +// PipelineWatcher is an extra interface for Extension hosted by the OpenTelemetry +// Collector that is to be implemented by extensions interested in changes to pipeline +// states. Typically this will be used by extensions that change their behavior if data is +// being ingested or not, e.g.: a k8s readiness probe. +type PipelineWatcher interface { + // Ready notifies the Extension that all pipelines were built and the + // receivers were started, i.e.: the service is ready to receive data + // (note that it may already have received data when this method is called). + Ready() error + + // NotReady notifies the Extension that all receivers are about to be stopped, + // i.e.: pipeline receivers will not accept new data. + // This is sent before receivers are stopped, so the Extension can take any + // appropriate actions before that happens. + NotReady() error +} + +// ExtensionCreateSettings is passed to ExtensionFactory.Create* functions. +type ExtensionCreateSettings struct { + // Logger that the factory can use during creation and can pass to the created + // component to be used later as well. + Logger *zap.Logger + + // TracerProvider that the factory can pass to other instrumented third-party libraries. + TracerProvider trace.TracerProvider + + // BuildInfo can be used by components for informational purposes + BuildInfo BuildInfo +} + +// ExtensionFactory is a factory interface for extensions to the service. +type ExtensionFactory interface { + Factory + + // CreateDefaultConfig creates the default configuration for the Extension. + // This method can be called multiple times depending on the pipeline + // configuration and should not cause side-effects that prevent the creation + // of multiple instances of the Extension. + // The object returned by this method needs to pass the checks implemented by + // 'configcheck.ValidateConfig'. It is recommended to have these checks in the + // tests of any implementation of the Factory interface. + CreateDefaultConfig() config.Extension + + // CreateExtension creates a service extension based on the given config. + CreateExtension(ctx context.Context, set ExtensionCreateSettings, cfg config.Extension) (Extension, error) +} diff --git a/internal/otel_collector/component/factories.go b/internal/otel_collector/component/factories.go new file mode 100644 index 00000000000..373bbdfc688 --- /dev/null +++ b/internal/otel_collector/component/factories.go @@ -0,0 +1,93 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package component + +import ( + "fmt" + + "go.opentelemetry.io/collector/config" +) + +// Factories struct holds in a single type all component factories that +// can be handled by the Config. +type Factories struct { + // Receivers maps receiver type names in the config to the respective factory. + Receivers map[config.Type]ReceiverFactory + + // Processors maps processor type names in the config to the respective factory. + Processors map[config.Type]ProcessorFactory + + // Exporters maps exporter type names in the config to the respective factory. + Exporters map[config.Type]ExporterFactory + + // Extensions maps extension type names in the config to the respective factory. + Extensions map[config.Type]ExtensionFactory +} + +// MakeReceiverFactoryMap takes a list of receiver factories and returns a map +// with factory type as keys. It returns a non-nil error when more than one factories +// have the same type. +func MakeReceiverFactoryMap(factories ...ReceiverFactory) (map[config.Type]ReceiverFactory, error) { + fMap := map[config.Type]ReceiverFactory{} + for _, f := range factories { + if _, ok := fMap[f.Type()]; ok { + return fMap, fmt.Errorf("duplicate receiver factory %q", f.Type()) + } + fMap[f.Type()] = f + } + return fMap, nil +} + +// MakeProcessorFactoryMap takes a list of processor factories and returns a map +// with factory type as keys. It returns a non-nil error when more than one factories +// have the same type. +func MakeProcessorFactoryMap(factories ...ProcessorFactory) (map[config.Type]ProcessorFactory, error) { + fMap := map[config.Type]ProcessorFactory{} + for _, f := range factories { + if _, ok := fMap[f.Type()]; ok { + return fMap, fmt.Errorf("duplicate processor factory %q", f.Type()) + } + fMap[f.Type()] = f + } + return fMap, nil +} + +// MakeExporterFactoryMap takes a list of exporter factories and returns a map +// with factory type as keys. It returns a non-nil error when more than one factories +// have the same type. +func MakeExporterFactoryMap(factories ...ExporterFactory) (map[config.Type]ExporterFactory, error) { + fMap := map[config.Type]ExporterFactory{} + for _, f := range factories { + if _, ok := fMap[f.Type()]; ok { + return fMap, fmt.Errorf("duplicate exporter factory %q", f.Type()) + } + fMap[f.Type()] = f + } + return fMap, nil +} + +// MakeExtensionFactoryMap takes a list of extension factories and returns a map +// with factory type as keys. It returns a non-nil error when more than one factories +// have the same type. +func MakeExtensionFactoryMap(factories ...ExtensionFactory) (map[config.Type]ExtensionFactory, error) { + fMap := map[config.Type]ExtensionFactory{} + for _, f := range factories { + if _, ok := fMap[f.Type()]; ok { + return fMap, fmt.Errorf("duplicate extension factory %q", f.Type()) + } + fMap[f.Type()] = f + } + return fMap, nil +} diff --git a/internal/otel_collector/component/host.go b/internal/otel_collector/component/host.go new file mode 100644 index 00000000000..56375b871ee --- /dev/null +++ b/internal/otel_collector/component/host.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package component + +import ( + "go.opentelemetry.io/collector/config" +) + +// Host represents the entity that is hosting a Component. It is used to allow communication +// between the Component and its host (normally the service.Collector is the host). +type Host interface { + // ReportFatalError is used to report to the host that the component + // encountered a fatal error (i.e.: an error that the instance can't recover + // from) after its start function had already returned. + // + // ReportFatalError should be called by the component anytime after Component.Start() ends and + // before Component.Shutdown() begins. + ReportFatalError(err error) + + // GetFactory of the specified kind. Returns the factory for a component type. + // This allows components to create other components. For example: + // func (r MyReceiver) Start(host component.Host) error { + // apacheFactory := host.GetFactory(KindReceiver,"apache").(component.ReceiverFactory) + // receiver, err := apacheFactory.CreateMetricsReceiver(...) + // ... + // } + // + // GetFactory can be called by the component anytime after Component.Start() begins and + // until Component.Shutdown() ends. Note that the component is responsible for destroying + // other components that it creates. + GetFactory(kind Kind, componentType config.Type) Factory + + // GetExtensions returns the map of extensions. Only enabled and created extensions will be returned. + // Typically is used to find an extension by type or by full config name. Both cases + // can be done by iterating the returned map. There are typically very few extensions, + // so there are no performance implications due to iteration. + // + // GetExtensions can be called by the component anytime after Component.Start() begins and + // until Component.Shutdown() ends. + GetExtensions() map[config.ComponentID]Extension + + // GetExporters returns the map of exporters. Only enabled and created exporters will be returned. + // Typically is used to find exporters by type or by full config name. Both cases + // can be done by iterating the returned map. There are typically very few exporters, + // so there are no performance implications due to iteration. + // This returns a map by DataType of maps by exporter configs to the exporter instance. + // Note that an exporter with the same name may be attached to multiple pipelines and + // thus we may have an instance of the exporter for multiple data types. + // This is an experimental function that may change or even be removed completely. + // + // GetExporters can be called by the component anytime after Component.Start() begins and + // until Component.Shutdown() ends. + GetExporters() map[config.DataType]map[config.ComponentID]Exporter +} diff --git a/internal/otel_collector/component/processor.go b/internal/otel_collector/component/processor.go new file mode 100644 index 00000000000..c6035317b8c --- /dev/null +++ b/internal/otel_collector/component/processor.go @@ -0,0 +1,146 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package component + +import ( + "context" + + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" +) + +// Processor defines the common functions that must be implemented by TracesProcessor +// and MetricsProcessor. +type Processor interface { + Component +} + +// TracesProcessor is a processor that can consume traces. +type TracesProcessor interface { + Processor + consumer.Traces +} + +// MetricsProcessor is a processor that can consume metrics. +type MetricsProcessor interface { + Processor + consumer.Metrics +} + +// LogsProcessor is a processor that can consume logs. +type LogsProcessor interface { + Processor + consumer.Logs +} + +// ProcessorCreateSettings is passed to Create* functions in ProcessorFactory. +type ProcessorCreateSettings struct { + // Logger that the factory can use during creation and can pass to the created + // component to be used later as well. + Logger *zap.Logger + + // TracerProvider that the factory can pass to other instrumented third-party libraries. + TracerProvider trace.TracerProvider + + // BuildInfo can be used by components for informational purposes + BuildInfo BuildInfo +} + +// ProcessorFactory is factory interface for processors. This is the +// new factory type that can create new style processors. +// +// This interface cannot be directly implemented. Implementations need to embed +// the BaseProcessorFactory or use the processorhelper.NewFactory to implement it. +type ProcessorFactory interface { + Factory + + // CreateDefaultConfig creates the default configuration for the Processor. + // This method can be called multiple times depending on the pipeline + // configuration and should not cause side-effects that prevent the creation + // of multiple instances of the Processor. + // The object returned by this method needs to pass the checks implemented by + // 'configcheck.ValidateConfig'. It is recommended to have these checks in the + // tests of any implementation of the Factory interface. + CreateDefaultConfig() config.Processor + + // CreateTracesProcessor creates a trace processor based on this config. + // If the processor type does not support tracing or if the config is not valid, + // an error will be returned instead. + CreateTracesProcessor( + ctx context.Context, + set ProcessorCreateSettings, + cfg config.Processor, + nextConsumer consumer.Traces, + ) (TracesProcessor, error) + + // CreateMetricsProcessor creates a metrics processor based on this config. + // If the processor type does not support metrics or if the config is not valid, + // an error will be returned instead. + CreateMetricsProcessor( + ctx context.Context, + set ProcessorCreateSettings, + cfg config.Processor, + nextConsumer consumer.Metrics, + ) (MetricsProcessor, error) + + // CreateLogsProcessor creates a processor based on the config. + // If the processor type does not support logs or if the config is not valid, + // an error will be returned instead. + CreateLogsProcessor( + ctx context.Context, + set ProcessorCreateSettings, + cfg config.Processor, + nextConsumer consumer.Logs, + ) (LogsProcessor, error) + + // unexportedProcessor is a dummy method to force this interface to not be implemented. + unexportedProcessor() +} + +// BaseProcessorFactory is the interface that must be embedded by all ProcessorFactory implementations. +type BaseProcessorFactory struct{} + +var _ ProcessorFactory = (*BaseProcessorFactory)(nil) + +// Type must be overridden. +func (b BaseProcessorFactory) Type() config.Type { + panic("implement me") +} + +// CreateDefaultConfig must be overridden. +func (b BaseProcessorFactory) CreateDefaultConfig() config.Processor { + panic("implement me") +} + +// CreateTracesProcessor default implemented as not supported data type. +func (b BaseProcessorFactory) CreateTracesProcessor(context.Context, ProcessorCreateSettings, config.Processor, consumer.Traces) (TracesProcessor, error) { + return nil, componenterror.ErrDataTypeIsNotSupported +} + +// CreateMetricsProcessor default implemented as not supported data type. +func (b BaseProcessorFactory) CreateMetricsProcessor(context.Context, ProcessorCreateSettings, config.Processor, consumer.Metrics) (MetricsProcessor, error) { + return nil, componenterror.ErrDataTypeIsNotSupported +} + +// CreateLogsProcessor default implemented as not supported data type. +func (b BaseProcessorFactory) CreateLogsProcessor(context.Context, ProcessorCreateSettings, config.Processor, consumer.Logs) (LogsProcessor, error) { + return nil, componenterror.ErrDataTypeIsNotSupported +} + +func (b BaseProcessorFactory) unexportedProcessor() {} diff --git a/internal/otel_collector/component/receiver.go b/internal/otel_collector/component/receiver.go new file mode 100644 index 00000000000..fb2c885428a --- /dev/null +++ b/internal/otel_collector/component/receiver.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package component + +import ( + "context" + + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" +) + +// Receiver allows the collector to receive metrics, traces and logs. +type Receiver interface { + Component +} + +// A TracesReceiver receives traces. +// Its purpose is to translate data from any format to the collector's internal trace format. +// TracesReceiver feeds a consumer.Traces with data. +// +// For example it could be Zipkin data source which translates Zipkin spans into pdata.Traces. +type TracesReceiver interface { + Receiver +} + +// A MetricsReceiver receives metrics. +// Its purpose is to translate data from any format to the collector's internal metrics format. +// MetricsReceiver feeds a consumer.Metrics with data. +// +// For example it could be Prometheus data source which translates Prometheus metrics into pdata.Metrics. +type MetricsReceiver interface { + Receiver +} + +// A LogsReceiver receives logs. +// Its purpose is to translate data from any format to the collector's internal logs data format. +// LogsReceiver feeds a consumer.Logs with data. +// +// For example a LogsReceiver can read syslogs and convert them into pdata.Logs. +type LogsReceiver interface { + Receiver +} + +// ReceiverCreateSettings configures Receiver creators. +type ReceiverCreateSettings struct { + // Logger that the factory can use during creation and can pass to the created + // component to be used later as well. + Logger *zap.Logger + + // TracerProvider that the factory can pass to other instrumented third-party libraries. + TracerProvider trace.TracerProvider + + // BuildInfo can be used by components for informational purposes. + BuildInfo BuildInfo +} + +// ReceiverFactory can create TracesReceiver, MetricsReceiver and +// and LogsReceiver. This is the new preferred factory type to create receivers. +type ReceiverFactory interface { + Factory + + // CreateDefaultConfig creates the default configuration for the Receiver. + // This method can be called multiple times depending on the pipeline + // configuration and should not cause side-effects that prevent the creation + // of multiple instances of the Receiver. + // The object returned by this method needs to pass the checks implemented by + // 'configcheck.ValidateConfig'. It is recommended to have these checks in the + // tests of any implementation of the Factory interface. + CreateDefaultConfig() config.Receiver + + // CreateTracesReceiver creates a trace receiver based on this config. + // If the receiver type does not support tracing or if the config is not valid + // an error will be returned instead. + CreateTracesReceiver(ctx context.Context, set ReceiverCreateSettings, + cfg config.Receiver, nextConsumer consumer.Traces) (TracesReceiver, error) + + // CreateMetricsReceiver creates a metrics receiver based on this config. + // If the receiver type does not support metrics or if the config is not valid + // an error will be returned instead. + CreateMetricsReceiver(ctx context.Context, set ReceiverCreateSettings, + cfg config.Receiver, nextConsumer consumer.Metrics) (MetricsReceiver, error) + + // CreateLogsReceiver creates a log receiver based on this config. + // If the receiver type does not support the data type or if the config is not valid + // an error will be returned instead. + CreateLogsReceiver(ctx context.Context, set ReceiverCreateSettings, + cfg config.Receiver, nextConsumer consumer.Logs) (LogsReceiver, error) +} diff --git a/internal/otel_collector/config/config.go b/internal/otel_collector/config/config.go new file mode 100644 index 00000000000..6019cde3ad4 --- /dev/null +++ b/internal/otel_collector/config/config.go @@ -0,0 +1,207 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "errors" + "fmt" + + "go.opentelemetry.io/collector/config/configparser" +) + +var ( + errMissingExporters = errors.New("no enabled exporters specified in config") + errMissingReceivers = errors.New("no enabled receivers specified in config") + errMissingServicePipelines = errors.New("service must have at least one pipeline") +) + +// Config defines the configuration for the various elements of collector or agent. +type Config struct { + Receivers + Exporters + Processors + Extensions + Service +} + +var _ validatable = (*Config)(nil) + +// Validate returns an error if the config is invalid. +// +// This function performs basic validation of configuration. There may be more subtle +// invalid cases that we currently don't check for but which we may want to add in +// the future (e.g. disallowing receiving and exporting on the same endpoint). +func (cfg *Config) Validate() error { + // Currently there is no default receiver enabled. + // The configuration must specify at least one receiver to be valid. + if len(cfg.Receivers) == 0 { + return errMissingReceivers + } + + // Validate the receiver configuration. + for recv, recvCfg := range cfg.Receivers { + if err := recvCfg.Validate(); err != nil { + return fmt.Errorf("receiver \"%s\" has invalid configuration: %w", recv, err) + } + } + + // Currently there is no default exporter enabled. + // The configuration must specify at least one exporter to be valid. + if len(cfg.Exporters) == 0 { + return errMissingExporters + } + + // Validate the exporter configuration. + for exp, expCfg := range cfg.Exporters { + if err := expCfg.Validate(); err != nil { + return fmt.Errorf("exporter \"%s\" has invalid configuration: %w", exp, err) + } + } + + // Validate the processor configuration. + for proc, procCfg := range cfg.Processors { + if err := procCfg.Validate(); err != nil { + return fmt.Errorf("processor \"%s\" has invalid configuration: %w", proc, err) + } + } + + // Validate the extension configuration. + for ext, extCfg := range cfg.Extensions { + if err := extCfg.Validate(); err != nil { + return fmt.Errorf("extension \"%s\" has invalid configuration: %w", ext, err) + } + } + + // Check that all enabled extensions in the service are configured. + if err := cfg.validateServiceExtensions(); err != nil { + return err + } + + // Check that all pipelines have at least one receiver and one exporter, and they reference + // only configured components. + return cfg.validateServicePipelines() +} + +func (cfg *Config) validateServiceExtensions() error { + // Validate extensions. + for _, ref := range cfg.Service.Extensions { + // Check that the name referenced in the Service extensions exists in the top-level extensions. + if cfg.Extensions[ref] == nil { + return fmt.Errorf("service references extension %q which does not exist", ref) + } + } + + return nil +} + +func (cfg *Config) validateServicePipelines() error { + // Must have at least one pipeline. + if len(cfg.Service.Pipelines) == 0 { + return errMissingServicePipelines + } + + // Validate pipelines. + for _, pipeline := range cfg.Service.Pipelines { + // Validate pipeline has at least one receiver. + if len(pipeline.Receivers) == 0 { + return fmt.Errorf("pipeline %q must have at least one receiver", pipeline.Name) + } + + // Validate pipeline receiver name references. + for _, ref := range pipeline.Receivers { + // Check that the name referenced in the pipeline's receivers exists in the top-level receivers. + if cfg.Receivers[ref] == nil { + return fmt.Errorf("pipeline %q references receiver %q which does not exist", pipeline.Name, ref) + } + } + + // Validate pipeline processor name references. + for _, ref := range pipeline.Processors { + // Check that the name referenced in the pipeline's processors exists in the top-level processors. + if cfg.Processors[ref] == nil { + return fmt.Errorf("pipeline %q references processor %q which does not exist", pipeline.Name, ref) + } + } + + // Validate pipeline has at least one exporter. + if len(pipeline.Exporters) == 0 { + return fmt.Errorf("pipeline %q must have at least one exporter", pipeline.Name) + } + + // Validate pipeline exporter name references. + for _, ref := range pipeline.Exporters { + // Check that the name referenced in the pipeline's Exporters exists in the top-level Exporters. + if cfg.Exporters[ref] == nil { + return fmt.Errorf("pipeline %q references exporter %q which does not exist", pipeline.Name, ref) + } + } + } + return nil +} + +// Service defines the configurable components of the service. +type Service struct { + // Extensions are the ordered list of extensions configured for the service. + Extensions []ComponentID + + // Pipelines are the set of data pipelines configured for the service. + Pipelines Pipelines +} + +// Type is the component type as it is used in the config. +type Type string + +// validatable defines the interface for the configuration validation. +type validatable interface { + // Validate validates the configuration and returns an error if invalid. + Validate() error +} + +// Unmarshallable defines an optional interface for custom configuration unmarshaling. +// A configuration struct can implement this interface to override the default unmarshaling. +type Unmarshallable interface { + // Unmarshal is a function that un-marshals a Parser into the unmarshable struct in a custom way. + // componentSection *Parser + // The config for this specific component. May be nil or empty if no config available. + Unmarshal(componentSection *configparser.Parser) error +} + +// DataType is the data type that is supported for collection. We currently support +// collecting metrics, traces and logs, this can expand in the future. +type DataType string + +// Currently supported data types. Add new data types here when new types are supported in the future. +const ( + // TracesDataType is the data type tag for traces. + TracesDataType DataType = "traces" + + // MetricsDataType is the data type tag for metrics. + MetricsDataType DataType = "metrics" + + // LogsDataType is the data type tag for logs. + LogsDataType DataType = "logs" +) + +// Pipeline defines a single pipeline. +type Pipeline struct { + Name string + InputType DataType + Receivers []ComponentID + Processors []ComponentID + Exporters []ComponentID +} + +// Pipelines is a map of names to Pipelines. +type Pipelines map[string]*Pipeline diff --git a/internal/otel_collector/config/configauth/README.md b/internal/otel_collector/config/configauth/README.md new file mode 100644 index 00000000000..c8bf9f0f6c0 --- /dev/null +++ b/internal/otel_collector/config/configauth/README.md @@ -0,0 +1,37 @@ +# Authentication configuration for receivers + +This module allows server types, such as gRPC and HTTP, to be configured to perform authentication for requests and/or RPCs. Each server type is responsible for getting the request/RPC metadata and passing down to the authenticator. + +The currently known authenticators: + +- [oidc](../../extension/oidcauthextension) + +Examples: +```yaml +extensions: + oidc: + # see the blog post on securing the otelcol for information + # on how to setup an OIDC server and how to generate the TLS certs + # required for this example + # https://medium.com/opentelemetry/securing-your-opentelemetry-collector-1a4f9fa5bd6f + issuer_url: http://localhost:8080/auth/realms/opentelemetry + audience: account + +receivers: + otlp/with_auth: + protocols: + grpc: + endpoint: localhost:4318 + tls_settings: + cert_file: /tmp/certs/cert.pem + key_file: /tmp/certs/cert-key.pem + auth: + ## oidc is the extension name to use as the authenticator for this receiver + authenticator: oidc +``` + +## Creating an authenticator + +New authenticators can be added by creating a new extension that also implements the `configauth.ServerAuthenticator` extension. Generic authenticators that may be used by a good number of users might be accepted as part of the core distribution, or as part of the contrib distribution. If you have interest in contributing one authenticator, open an issue with your proposal. + +For other cases, you'll need to include your custom authenticator as part of your custom OpenTelemetry Collector, perhaps being built using the [OpenTelemetry Collector Builder](https://github.com/open-telemetry/opentelemetry-collector-builder). diff --git a/internal/otel_collector/config/configauth/clientauth.go b/internal/otel_collector/config/configauth/clientauth.go new file mode 100644 index 00000000000..622c4954a9f --- /dev/null +++ b/internal/otel_collector/config/configauth/clientauth.go @@ -0,0 +1,78 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configauth + +import ( + "fmt" + "net/http" + + "google.golang.org/grpc/credentials" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" +) + +// ClientAuthenticator is an Extension that can be used as an authenticator for the configauth.Authentication option. +// Authenticators are then included as part of OpenTelemetry Collector builds and can be referenced by their +// names from the Authentication configuration. +type ClientAuthenticator interface { + component.Extension +} + +// HTTPClientAuthenticator is a ClientAuthenticator that can be used as an authenticator +// for the configauth.Authentication option for HTTP clients. +type HTTPClientAuthenticator interface { + ClientAuthenticator + RoundTripper(base http.RoundTripper) (http.RoundTripper, error) +} + +// GRPCClientAuthenticator is a ClientAuthenticator that can be used as an authenticator for +// the configauth.Authentication option for gRPC clients. +type GRPCClientAuthenticator interface { + ClientAuthenticator + PerRPCCredentials() (credentials.PerRPCCredentials, error) +} + +// GetHTTPClientAuthenticator attempts to select the appropriate HTTPClientAuthenticator from the list of extensions, +// based on the component id of the extension. If an authenticator is not found, an error is returned. +// This should be only used by HTTP clients. +func GetHTTPClientAuthenticator(extensions map[config.ComponentID]component.Extension, + componentID config.ComponentID) (HTTPClientAuthenticator, error) { + for id, ext := range extensions { + if id == componentID { + if auth, ok := ext.(HTTPClientAuthenticator); ok { + return auth, nil + } + return nil, fmt.Errorf("requested authenticator is not for HTTP clients") + } + } + return nil, fmt.Errorf("failed to resolve authenticator %q: %w", componentID.String(), errAuthenticatorNotFound) +} + +// GetGRPCClientAuthenticator attempts to select the appropriate GRPCClientAuthenticator from the list of extensions, +// based on the component id of the extension. If an authenticator is not found, an error is returned. +// This should only be used by gRPC clients. +func GetGRPCClientAuthenticator(extensions map[config.ComponentID]component.Extension, + componentID config.ComponentID) (GRPCClientAuthenticator, error) { + for id, ext := range extensions { + if id == componentID { + if auth, ok := ext.(GRPCClientAuthenticator); ok { + return auth, nil + } + return nil, fmt.Errorf("requested authenticator is not for gRPC clients") + } + } + return nil, fmt.Errorf("failed to resolve authenticator %q: %w", componentID.String(), errAuthenticatorNotFound) +} diff --git a/internal/otel_collector/config/configauth/configauth.go b/internal/otel_collector/config/configauth/configauth.go new file mode 100644 index 00000000000..16c26328a82 --- /dev/null +++ b/internal/otel_collector/config/configauth/configauth.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configauth + +import ( + "errors" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" +) + +var ( + errAuthenticatorNotFound = errors.New("authenticator not found") +) + +// Authentication defines the auth settings for the receiver. +type Authentication struct { + // AuthenticatorName specifies the name of the extension to use in order to authenticate the incoming data point. + AuthenticatorName string `mapstructure:"authenticator"` +} + +// GetServerAuthenticator attempts to select the appropriate from the list of extensions, based on the requested extension name. +// If an authenticator is not found, an error is returned. +func GetServerAuthenticator(extensions map[config.ComponentID]component.Extension, componentID config.ComponentID) (ServerAuthenticator, error) { + for id, ext := range extensions { + if auth, ok := ext.(ServerAuthenticator); ok { + if id == componentID { + return auth, nil + } + } + } + + return nil, fmt.Errorf("failed to resolve authenticator %q: %w", componentID.String(), errAuthenticatorNotFound) +} diff --git a/internal/otel_collector/config/configauth/doc.go b/internal/otel_collector/config/configauth/doc.go new file mode 100644 index 00000000000..169a504122e --- /dev/null +++ b/internal/otel_collector/config/configauth/doc.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package configauth implements the configuration settings to +// ensure authentication on incoming requests, and allows +// exporters to add authentication on outgoing requests. +package configauth diff --git a/internal/otel_collector/config/configauth/mock_clientauth.go b/internal/otel_collector/config/configauth/mock_clientauth.go new file mode 100644 index 00000000000..68097e84ca1 --- /dev/null +++ b/internal/otel_collector/config/configauth/mock_clientauth.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configauth + +import ( + "context" + "errors" + "net/http" + + "google.golang.org/grpc/credentials" + + "go.opentelemetry.io/collector/component" +) + +var ( + _ HTTPClientAuthenticator = (*MockClientAuthenticator)(nil) + _ GRPCClientAuthenticator = (*MockClientAuthenticator)(nil) + errMockError = errors.New("mock Error") +) + +// MockClientAuthenticator provides a mock implementation of GRPCClientAuthenticator and HTTPClientAuthenticator interfaces +type MockClientAuthenticator struct { + ResultRoundTripper http.RoundTripper + ResultPerRPCCredentials credentials.PerRPCCredentials + MustError bool +} + +// Start for the MockClientAuthenticator does nothing +func (m *MockClientAuthenticator) Start(ctx context.Context, host component.Host) error { + return nil +} + +// Shutdown for the MockClientAuthenticator does nothing +func (m *MockClientAuthenticator) Shutdown(ctx context.Context) error { + return nil +} + +// RoundTripper for the MockClientAuthenticator either returns error if the mock authenticator is forced to or +// returns the supplied resultRoundTripper. +func (m *MockClientAuthenticator) RoundTripper(base http.RoundTripper) (http.RoundTripper, error) { + if m.MustError { + return nil, errMockError + } + return m.ResultRoundTripper, nil +} + +// PerRPCCredentials for the MockClientAuthenticator either returns error if the mock authenticator is forced to or +// returns the supplied resultPerRPCCredentials. +func (m *MockClientAuthenticator) PerRPCCredentials() (credentials.PerRPCCredentials, error) { + if m.MustError { + return nil, errMockError + } + return m.ResultPerRPCCredentials, nil +} diff --git a/internal/otel_collector/config/configauth/mock_serverauth.go b/internal/otel_collector/config/configauth/mock_serverauth.go new file mode 100644 index 00000000000..f2522557200 --- /dev/null +++ b/internal/otel_collector/config/configauth/mock_serverauth.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configauth + +import ( + "context" + + "google.golang.org/grpc" + + "go.opentelemetry.io/collector/component" +) + +var ( + _ ServerAuthenticator = (*MockAuthenticator)(nil) + _ component.Extension = (*MockAuthenticator)(nil) +) + +// MockAuthenticator provides a testing mock for code dealing with authentication. +type MockAuthenticator struct { + // AuthenticateFunc to use during the authentication phase of this mock. Optional. + AuthenticateFunc AuthenticateFunc + // TODO: implement the other funcs +} + +// Authenticate executes the mock's AuthenticateFunc, if provided, or just returns the given context unchanged. +func (m *MockAuthenticator) Authenticate(ctx context.Context, headers map[string][]string) (context.Context, error) { + if m.AuthenticateFunc == nil { + return context.Background(), nil + } + return m.AuthenticateFunc(ctx, headers) +} + +// GRPCUnaryServerInterceptor isn't currently implemented and always returns nil. +func (m *MockAuthenticator) GRPCUnaryServerInterceptor(context.Context, interface{}, *grpc.UnaryServerInfo, grpc.UnaryHandler) (interface{}, error) { + return nil, nil +} + +// GRPCStreamServerInterceptor isn't currently implemented and always returns nil. +func (m *MockAuthenticator) GRPCStreamServerInterceptor(interface{}, grpc.ServerStream, *grpc.StreamServerInfo, grpc.StreamHandler) error { + return nil +} + +// Start isn't currently implemented and always returns nil. +func (m *MockAuthenticator) Start(context.Context, component.Host) error { + return nil +} + +// Shutdown isn't currently implemented and always returns nil. +func (m *MockAuthenticator) Shutdown(ctx context.Context) error { + return nil +} diff --git a/internal/otel_collector/config/configauth/serverauth.go b/internal/otel_collector/config/configauth/serverauth.go new file mode 100644 index 00000000000..2a6da1e378b --- /dev/null +++ b/internal/otel_collector/config/configauth/serverauth.go @@ -0,0 +1,110 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configauth + +import ( + "context" + "errors" + + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + + "go.opentelemetry.io/collector/component" +) + +var ( + errMetadataNotFound = errors.New("no request metadata found") +) + +// ServerAuthenticator is an Extension that can be used as an authenticator for the configauth.Authentication option. +// Authenticators are then included as part of OpenTelemetry Collector builds and can be referenced by their +// names from the Authentication configuration. Each ServerAuthenticator is free to define its own behavior and configuration options, +// but note that the expectations that come as part of Extensions exist here as well. For instance, multiple instances of the same +// authenticator should be possible to exist under different names. +type ServerAuthenticator interface { + component.Extension + + // Authenticate checks whether the given headers map contains valid auth data. Successfully authenticated calls will always return a nil error. + // When the authentication fails, an error must be returned and the caller must not retry. This function is typically called from interceptors, + // on behalf of receivers, but receivers can still call this directly if the usage of interceptors isn't suitable. + // The deadline and cancellation given to this function must be respected, but note that authentication data has to be part of the map, not context. + // The resulting context should contain the authentication data, such as the principal/username, group membership (if available), and the raw + // authentication data (if possible). This will allow other components in the pipeline to make decisions based on that data, such as routing based + // on tenancy as determined by the group membership, or passing through the authentication data to the next collector/backend. + // The context keys to be used are not defined yet. + Authenticate(ctx context.Context, headers map[string][]string) (context.Context, error) + + // GRPCUnaryServerInterceptor is a helper method to provide a gRPC-compatible UnaryServerInterceptor, typically calling the authenticator's Authenticate method. + // While the context is the typical source of authentication data, the interceptor is free to determine where the auth data should come from. For instance, some + // receivers might implement an interceptor that looks into the payload instead. + // Once the authentication succeeds, the interceptor is expected to call the handler. + // See https://pkg.go.dev/google.golang.org/grpc#UnaryServerInterceptor. + GRPCUnaryServerInterceptor(ctx context.Context, req interface{}, srvInfo *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) + + // GRPCStreamServerInterceptor is a helper method to provide a gRPC-compatible StreamServerInterceptor, typically calling the authenticator's Authenticate method. + // While the context is the typical source of authentication data, the interceptor is free to determine where the auth data should come from. For instance, some + // receivers might implement an interceptor that looks into the payload instead. + // Once the authentication succeeds, the interceptor is expected to call the handler. + // See https://pkg.go.dev/google.golang.org/grpc#StreamServerInterceptor. + GRPCStreamServerInterceptor(srv interface{}, stream grpc.ServerStream, srvInfo *grpc.StreamServerInfo, handler grpc.StreamHandler) error +} + +// AuthenticateFunc defines the signature for the function responsible for performing the authentication based on the given headers map. +// See ServerAuthenticator.Authenticate. +type AuthenticateFunc func(ctx context.Context, headers map[string][]string) (context.Context, error) + +// GRPCUnaryInterceptorFunc defines the signature for the function intercepting unary gRPC calls, useful for authenticators to use as +// types for internal structs, making it easier to mock them in tests. +// See ServerAuthenticator.GRPCUnaryServerInterceptor. +type GRPCUnaryInterceptorFunc func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler, authenticate AuthenticateFunc) (interface{}, error) + +// GRPCStreamInterceptorFunc defines the signature for the function intercepting streaming gRPC calls, useful for authenticators to use as +// types for internal structs, making it easier to mock them in tests. +// See ServerAuthenticator.GRPCStreamServerInterceptor. +type GRPCStreamInterceptorFunc func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler, authenticate AuthenticateFunc) error + +// DefaultGRPCUnaryServerInterceptor provides a default implementation of GRPCUnaryInterceptorFunc, useful for most authenticators. +// It extracts the headers from the incoming request, under the assumption that the credentials will be part of the resulting map. +func DefaultGRPCUnaryServerInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler, authenticate AuthenticateFunc) (interface{}, error) { + headers, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, errMetadataNotFound + } + + ctx, err := authenticate(ctx, headers) + if err != nil { + return nil, err + } + + return handler(ctx, req) +} + +// DefaultGRPCStreamServerInterceptor provides a default implementation of GRPCStreamInterceptorFunc, useful for most authenticators. +// It extracts the headers from the incoming request, under the assumption that the credentials will be part of the resulting map. +func DefaultGRPCStreamServerInterceptor(srv interface{}, stream grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler, authenticate AuthenticateFunc) error { + ctx := stream.Context() + headers, ok := metadata.FromIncomingContext(ctx) + if !ok { + return errMetadataNotFound + } + + // TODO: propagate the context down the stream + _, err := authenticate(ctx, headers) + if err != nil { + return err + } + + return handler(srv, stream) +} diff --git a/internal/otel_collector/config/configcheck/configcheck.go b/internal/otel_collector/config/configcheck/configcheck.go new file mode 100644 index 00000000000..842aeed4e0b --- /dev/null +++ b/internal/otel_collector/config/configcheck/configcheck.go @@ -0,0 +1,185 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configcheck + +import ( + "fmt" + "reflect" + "regexp" + "strings" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/consumererror" +) + +// The regular expression for valid config field tag. +var configFieldTagRegExp = regexp.MustCompile("^[a-z0-9][a-z0-9_]*$") + +// ValidateConfigFromFactories checks if all configurations for the given factories +// are satisfying the patterns used by the collector. +func ValidateConfigFromFactories(factories component.Factories) error { + var errs []error + + for _, factory := range factories.Receivers { + if err := ValidateConfig(factory.CreateDefaultConfig()); err != nil { + errs = append(errs, err) + } + } + for _, factory := range factories.Processors { + if err := ValidateConfig(factory.CreateDefaultConfig()); err != nil { + errs = append(errs, err) + } + } + for _, factory := range factories.Exporters { + if err := ValidateConfig(factory.CreateDefaultConfig()); err != nil { + errs = append(errs, err) + } + } + for _, factory := range factories.Extensions { + if err := ValidateConfig(factory.CreateDefaultConfig()); err != nil { + errs = append(errs, err) + } + } + + return consumererror.Combine(errs) +} + +// ValidateConfig enforces that given configuration object is following the patterns +// used by the collector. This ensures consistency between different implementations +// of components and extensions. It is recommended for implementers of components +// to call this function on their tests passing the default configuration of the +// component factory. +func ValidateConfig(config interface{}) error { + t := reflect.TypeOf(config) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return fmt.Errorf("config must be a struct or a pointer to one, the passed object is a %s", t.Kind()) + } + + return validateConfigDataType(t) +} + +// validateConfigDataType performs a descending validation of the given type. +// If the type is a struct it goes to each of its fields to check for the proper +// tags. +func validateConfigDataType(t reflect.Type) error { + var errs []error + + switch t.Kind() { + case reflect.Ptr: + if err := validateConfigDataType(t.Elem()); err != nil { + errs = append(errs, err) + } + case reflect.Struct: + // Reflect on the pointed data and check each of its fields. + nf := t.NumField() + for i := 0; i < nf; i++ { + f := t.Field(i) + if err := checkStructFieldTags(f); err != nil { + errs = append(errs, err) + } + } + default: + // The config object can carry other types but they are not used when + // reading the configuration via koanf so ignore them. Basically ignore: + // reflect.Uintptr, reflect.Chan, reflect.Func, reflect.Interface, and + // reflect.UnsafePointer. + } + + if err := consumererror.Combine(errs); err != nil { + return fmt.Errorf( + "type %q from package %q has invalid config settings: %v", + t.Name(), + t.PkgPath(), + err) + } + + return nil +} + +// checkStructFieldTags inspects the tags of a struct field. +func checkStructFieldTags(f reflect.StructField) error { + + tagValue := f.Tag.Get("mapstructure") + if tagValue == "" { + + // Ignore special types. + switch f.Type.Kind() { + case reflect.Interface, reflect.Chan, reflect.Func, reflect.Uintptr, reflect.UnsafePointer: + // Allow the config to carry the types above, but since they are not read + // when loading configuration, just ignore them. + return nil + } + + // Public fields of other types should be tagged. + chars := []byte(f.Name) + if len(chars) > 0 && chars[0] >= 'A' && chars[0] <= 'Z' { + return fmt.Errorf("mapstructure tag not present on field %q", f.Name) + } + + // Not public field, no need to have a tag. + return nil + } + + tagParts := strings.Split(tagValue, ",") + if tagParts[0] != "" { + if tagParts[0] == "-" { + // Nothing to do, as mapstructure decode skips this field. + return nil + } + } + + // Check if squash is specified. + squash := false + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + // Field was squashed. + if (f.Type.Kind() != reflect.Struct) && (f.Type.Kind() != reflect.Ptr || f.Type.Elem().Kind() != reflect.Struct) { + return fmt.Errorf( + "attempt to squash non-struct type on field %q", f.Name) + } + } + + switch f.Type.Kind() { + case reflect.Struct: + // It is another struct, continue down-level. + return validateConfigDataType(f.Type) + + case reflect.Map, reflect.Slice, reflect.Array: + // The element of map, array, or slice can be itself a configuration object. + return validateConfigDataType(f.Type.Elem()) + + default: + fieldTag := tagParts[0] + if !configFieldTagRegExp.MatchString(fieldTag) { + return fmt.Errorf( + "field %q has config tag %q which doesn't satisfy %q", + f.Name, + fieldTag, + configFieldTagRegExp.String()) + } + } + + return nil +} diff --git a/internal/otel_collector/config/configcheck/doc.go b/internal/otel_collector/config/configcheck/doc.go new file mode 100644 index 00000000000..82ec0712afe --- /dev/null +++ b/internal/otel_collector/config/configcheck/doc.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package configcheck has checks to be applied to configuration +// objects implemented by factories of components used in the OpenTelemetry +// collector. It is recommended for implementers of components to run the +// validations available on this package. +package configcheck diff --git a/internal/otel_collector/config/configgrpc/README.md b/internal/otel_collector/config/configgrpc/README.md new file mode 100644 index 00000000000..a2d28d2f6af --- /dev/null +++ b/internal/otel_collector/config/configgrpc/README.md @@ -0,0 +1,64 @@ +# gRPC Configuration Settings + +gRPC exposes a [variety of settings](https://godoc.org/google.golang.org/grpc). +Several of these settings are available for configuration within individual +receivers or exporters. In general, none of these settings should need to be +adjusted. + +## Client Configuration + +[Exporters](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/README.md) +leverage client configuration. + +Note that client configuration supports TLS configuration, however +configuration parameters are not defined under `tls_settings` like server +configuration. For more information, see [configtls +README](../configtls/README.md). + +- [`balancer_name`](https://github.com/grpc/grpc-go/blob/master/examples/features/load_balancing/README.md) +- `compression` (default = gzip): Compression type to use (only gzip is supported today) +- `endpoint`: Valid value syntax available [here](https://github.com/grpc/grpc/blob/master/doc/naming.md) +- `headers`: name/value pairs added to the request +- [`keepalive`](https://godoc.org/google.golang.org/grpc/keepalive#ClientParameters) + - `permit_without_stream` + - `time` + - `timeout` +- [`read_buffer_size`](https://godoc.org/google.golang.org/grpc#ReadBufferSize) +- [`write_buffer_size`](https://godoc.org/google.golang.org/grpc#WriteBufferSize) + +Please note that [`per_rpc_auth`](https://pkg.go.dev/google.golang.org/grpc#PerRPCCredentials) which allows the credentials to send for every RPC is now moved to become an [extension](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/extension/bearertokenauthextension). Note that this feature isn't about sending the headers only during the initial connection as an `authorization` header under the `headers` would do: this is sent for every RPC performed during an established connection. + +Example: + +```yaml +exporters: + otlp: + endpoint: otelcol2:55690 + headers: + test1: "value1" + "test 2": "value 2" +``` + +## Server Configuration + +[Receivers](https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/README.md) +leverage server configuration. + +Note that transport configuration can also be configured. For more information, +see [confignet README](../confignet/README.md). + +- [`keepalive`](https://godoc.org/google.golang.org/grpc/keepalive#ServerParameters) + - [`enforcement_policy`](https://godoc.org/google.golang.org/grpc/keepalive#EnforcementPolicy) + - `min_time` + - `permit_without_stream` + - [`server_parameters`](https://godoc.org/google.golang.org/grpc/keepalive#ServerParameters) + - `max_connection_age` + - `max_connection_age_grace` + - `max_connection_idle` + - `time` + - `timeout` +- [`max_concurrent_streams`](https://godoc.org/google.golang.org/grpc#MaxConcurrentStreams) +- [`max_recv_msg_size_mib`](https://godoc.org/google.golang.org/grpc#MaxRecvMsgSize) +- [`read_buffer_size`](https://godoc.org/google.golang.org/grpc#ReadBufferSize) +- [`tls_settings`](../configtls/README.md) +- [`write_buffer_size`](https://godoc.org/google.golang.org/grpc#WriteBufferSize) diff --git a/internal/otel_collector/config/configgrpc/configgrpc.go b/internal/otel_collector/config/configgrpc/configgrpc.go new file mode 100644 index 00000000000..2d04a8a4e2f --- /dev/null +++ b/internal/otel_collector/config/configgrpc/configgrpc.go @@ -0,0 +1,370 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configgrpc + +import ( + "crypto/tls" + "fmt" + "net" + "strings" + "time" + + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + "go.opentelemetry.io/otel" + "google.golang.org/grpc" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding/gzip" + "google.golang.org/grpc/keepalive" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configauth" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/config/configtls" +) + +// Compression gRPC keys for supported compression types within collector. +const ( + CompressionUnsupported = "" + CompressionGzip = "gzip" +) + +var ( + // Map of opentelemetry compression types to grpc registered compression types. + gRPCCompressionKeyMap = map[string]string{ + CompressionGzip: gzip.Name, + } +) + +// Allowed balancer names to be set in grpclb_policy to discover the servers. +var allowedBalancerNames = []string{roundrobin.Name, grpc.PickFirstBalancerName} + +// KeepaliveClientConfig exposes the keepalive.ClientParameters to be used by the exporter. +// Refer to the original data-structure for the meaning of each parameter: +// https://godoc.org/google.golang.org/grpc/keepalive#ClientParameters +type KeepaliveClientConfig struct { + Time time.Duration `mapstructure:"time,omitempty"` + Timeout time.Duration `mapstructure:"timeout,omitempty"` + PermitWithoutStream bool `mapstructure:"permit_without_stream,omitempty"` +} + +// GRPCClientSettings defines common settings for a gRPC client configuration. +type GRPCClientSettings struct { + // The target to which the exporter is going to send traces or metrics, + // using the gRPC protocol. The valid syntax is described at + // https://github.com/grpc/grpc/blob/master/doc/naming.md. + Endpoint string `mapstructure:"endpoint"` + + // The compression key for supported compression types within + // collector. Currently the only supported mode is `gzip`. + Compression string `mapstructure:"compression"` + + // TLSSetting struct exposes TLS client configuration. + TLSSetting configtls.TLSClientSetting `mapstructure:",squash"` + + // The keepalive parameters for gRPC client. See grpc.WithKeepaliveParams. + // (https://godoc.org/google.golang.org/grpc#WithKeepaliveParams). + Keepalive *KeepaliveClientConfig `mapstructure:"keepalive"` + + // ReadBufferSize for gRPC client. See grpc.WithReadBufferSize. + // (https://godoc.org/google.golang.org/grpc#WithReadBufferSize). + ReadBufferSize int `mapstructure:"read_buffer_size"` + + // WriteBufferSize for gRPC gRPC. See grpc.WithWriteBufferSize. + // (https://godoc.org/google.golang.org/grpc#WithWriteBufferSize). + WriteBufferSize int `mapstructure:"write_buffer_size"` + + // WaitForReady parameter configures client to wait for ready state before sending data. + // (https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md) + WaitForReady bool `mapstructure:"wait_for_ready"` + + // The headers associated with gRPC requests. + Headers map[string]string `mapstructure:"headers"` + + // Sets the balancer in grpclb_policy to discover the servers. Default is pick_first. + // https://github.com/grpc/grpc-go/blob/master/examples/features/load_balancing/README.md + BalancerName string `mapstructure:"balancer_name"` + + // Auth configuration for outgoing RPCs. + Auth *configauth.Authentication `mapstructure:"auth,omitempty"` +} + +// KeepaliveServerConfig is the configuration for keepalive. +type KeepaliveServerConfig struct { + ServerParameters *KeepaliveServerParameters `mapstructure:"server_parameters,omitempty"` + EnforcementPolicy *KeepaliveEnforcementPolicy `mapstructure:"enforcement_policy,omitempty"` +} + +// KeepaliveServerParameters allow configuration of the keepalive.ServerParameters. +// The same default values as keepalive.ServerParameters are applicable and get applied by the server. +// See https://godoc.org/google.golang.org/grpc/keepalive#ServerParameters for details. +type KeepaliveServerParameters struct { + MaxConnectionIdle time.Duration `mapstructure:"max_connection_idle,omitempty"` + MaxConnectionAge time.Duration `mapstructure:"max_connection_age,omitempty"` + MaxConnectionAgeGrace time.Duration `mapstructure:"max_connection_age_grace,omitempty"` + Time time.Duration `mapstructure:"time,omitempty"` + Timeout time.Duration `mapstructure:"timeout,omitempty"` +} + +// KeepaliveEnforcementPolicy allow configuration of the keepalive.EnforcementPolicy. +// The same default values as keepalive.EnforcementPolicy are applicable and get applied by the server. +// See https://godoc.org/google.golang.org/grpc/keepalive#EnforcementPolicy for details. +type KeepaliveEnforcementPolicy struct { + MinTime time.Duration `mapstructure:"min_time,omitempty"` + PermitWithoutStream bool `mapstructure:"permit_without_stream,omitempty"` +} + +// GRPCServerSettings defines common settings for a gRPC server configuration. +type GRPCServerSettings struct { + // Server net.Addr config. For transport only "tcp" and "unix" are valid options. + NetAddr confignet.NetAddr `mapstructure:",squash"` + + // Configures the protocol to use TLS. + // The default value is nil, which will cause the protocol to not use TLS. + TLSSetting *configtls.TLSServerSetting `mapstructure:"tls_settings,omitempty"` + + // MaxRecvMsgSizeMiB sets the maximum size (in MiB) of messages accepted by the server. + MaxRecvMsgSizeMiB uint64 `mapstructure:"max_recv_msg_size_mib"` + + // MaxConcurrentStreams sets the limit on the number of concurrent streams to each ServerTransport. + // It has effect only for streaming RPCs. + MaxConcurrentStreams uint32 `mapstructure:"max_concurrent_streams"` + + // ReadBufferSize for gRPC server. See grpc.ReadBufferSize. + // (https://godoc.org/google.golang.org/grpc#ReadBufferSize). + ReadBufferSize int `mapstructure:"read_buffer_size"` + + // WriteBufferSize for gRPC server. See grpc.WriteBufferSize. + // (https://godoc.org/google.golang.org/grpc#WriteBufferSize). + WriteBufferSize int `mapstructure:"write_buffer_size"` + + // Keepalive anchor for all the settings related to keepalive. + Keepalive *KeepaliveServerConfig `mapstructure:"keepalive,omitempty"` + + // Auth for this receiver + Auth *configauth.Authentication `mapstructure:"auth,omitempty"` +} + +// SanitizedEndpoint strips the prefix of either http:// or https:// from configgrpc.GRPCClientSettings.Endpoint. +func (gcs *GRPCClientSettings) SanitizedEndpoint() string { + switch { + case gcs.isSchemeHTTP(): + return strings.TrimPrefix(gcs.Endpoint, "http://") + case gcs.isSchemeHTTPS(): + return strings.TrimPrefix(gcs.Endpoint, "https://") + default: + return gcs.Endpoint + } +} + +func (gcs *GRPCClientSettings) isSchemeHTTP() bool { + return strings.HasPrefix(gcs.Endpoint, "http://") +} + +func (gcs *GRPCClientSettings) isSchemeHTTPS() bool { + return strings.HasPrefix(gcs.Endpoint, "https://") +} + +// ToDialOptions maps configgrpc.GRPCClientSettings to a slice of dial options for gRPC. +func (gcs *GRPCClientSettings) ToDialOptions(ext map[config.ComponentID]component.Extension) ([]grpc.DialOption, error) { + var opts []grpc.DialOption + if gcs.Compression != "" { + if compressionKey := GetGRPCCompressionKey(gcs.Compression); compressionKey != CompressionUnsupported { + opts = append(opts, grpc.WithDefaultCallOptions(grpc.UseCompressor(compressionKey))) + } else { + return nil, fmt.Errorf("unsupported compression type %q", gcs.Compression) + } + } + + tlsCfg, err := gcs.TLSSetting.LoadTLSConfig() + if err != nil { + return nil, err + } + tlsDialOption := grpc.WithInsecure() + if tlsCfg != nil { + tlsDialOption = grpc.WithTransportCredentials(credentials.NewTLS(tlsCfg)) + } else if gcs.isSchemeHTTPS() { + tlsDialOption = grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{})) + } + opts = append(opts, tlsDialOption) + + if gcs.ReadBufferSize > 0 { + opts = append(opts, grpc.WithReadBufferSize(gcs.ReadBufferSize)) + } + + if gcs.WriteBufferSize > 0 { + opts = append(opts, grpc.WithWriteBufferSize(gcs.WriteBufferSize)) + } + + if gcs.Keepalive != nil { + keepAliveOption := grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: gcs.Keepalive.Time, + Timeout: gcs.Keepalive.Timeout, + PermitWithoutStream: gcs.Keepalive.PermitWithoutStream, + }) + opts = append(opts, keepAliveOption) + } + + if gcs.Auth != nil { + if ext == nil { + return nil, fmt.Errorf("no extensions configuration available") + } + + componentID, cperr := config.NewIDFromString(gcs.Auth.AuthenticatorName) + if cperr != nil { + return nil, cperr + } + + grpcAuthenticator, cerr := configauth.GetGRPCClientAuthenticator(ext, componentID) + if cerr != nil { + return nil, cerr + } + + perRPCCredentials, perr := grpcAuthenticator.PerRPCCredentials() + if perr != nil { + return nil, err + } + opts = append(opts, grpc.WithPerRPCCredentials(perRPCCredentials)) + } + + if gcs.BalancerName != "" { + valid := validateBalancerName(gcs.BalancerName) + if !valid { + return nil, fmt.Errorf("invalid balancer_name: %s", gcs.BalancerName) + } + opts = append(opts, grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingPolicy":"%s"}`, gcs.BalancerName))) + } + + // Enable OpenTelemetry observability plugin. + opts = append(opts, grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor())) + opts = append(opts, grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor())) + + return opts, nil +} + +func validateBalancerName(balancerName string) bool { + for _, item := range allowedBalancerNames { + if item == balancerName { + return true + } + } + return false +} + +// ToListener returns the net.Listener constructed from the settings. +func (gss *GRPCServerSettings) ToListener() (net.Listener, error) { + return gss.NetAddr.Listen() +} + +// ToServerOption maps configgrpc.GRPCServerSettings to a slice of server options for gRPC. +func (gss *GRPCServerSettings) ToServerOption(ext map[config.ComponentID]component.Extension) ([]grpc.ServerOption, error) { + var opts []grpc.ServerOption + + if gss.TLSSetting != nil { + tlsCfg, err := gss.TLSSetting.LoadTLSConfig() + if err != nil { + return nil, err + } + opts = append(opts, grpc.Creds(credentials.NewTLS(tlsCfg))) + } + + if gss.MaxRecvMsgSizeMiB > 0 { + opts = append(opts, grpc.MaxRecvMsgSize(int(gss.MaxRecvMsgSizeMiB*1024*1024))) + } + + if gss.MaxConcurrentStreams > 0 { + opts = append(opts, grpc.MaxConcurrentStreams(gss.MaxConcurrentStreams)) + } + + if gss.ReadBufferSize > 0 { + opts = append(opts, grpc.ReadBufferSize(gss.ReadBufferSize)) + } + + if gss.WriteBufferSize > 0 { + opts = append(opts, grpc.WriteBufferSize(gss.WriteBufferSize)) + } + + // The default values referenced in the GRPC docs are set within the server, so this code doesn't need + // to apply them over zero/nil values before passing these as grpc.ServerOptions. + // The following shows the server code for applying default grpc.ServerOptions. + // https://github.com/grpc/grpc-go/blob/120728e1f775e40a2a764341939b78d666b08260/internal/transport/http2_server.go#L184-L200 + if gss.Keepalive != nil { + if gss.Keepalive.ServerParameters != nil { + svrParams := gss.Keepalive.ServerParameters + opts = append(opts, grpc.KeepaliveParams(keepalive.ServerParameters{ + MaxConnectionIdle: svrParams.MaxConnectionIdle, + MaxConnectionAge: svrParams.MaxConnectionAge, + MaxConnectionAgeGrace: svrParams.MaxConnectionAgeGrace, + Time: svrParams.Time, + Timeout: svrParams.Timeout, + })) + } + // The default values referenced in the GRPC are set within the server, so this code doesn't need + // to apply them over zero/nil values before passing these as grpc.ServerOptions. + // The following shows the server code for applying default grpc.ServerOptions. + // https://github.com/grpc/grpc-go/blob/120728e1f775e40a2a764341939b78d666b08260/internal/transport/http2_server.go#L202-L205 + if gss.Keepalive.EnforcementPolicy != nil { + enfPol := gss.Keepalive.EnforcementPolicy + opts = append(opts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: enfPol.MinTime, + PermitWithoutStream: enfPol.PermitWithoutStream, + })) + } + } + + uInterceptors := []grpc.UnaryServerInterceptor{} + sInterceptors := []grpc.StreamServerInterceptor{} + + if gss.Auth != nil { + componentID, cperr := config.NewIDFromString(gss.Auth.AuthenticatorName) + if cperr != nil { + return nil, cperr + } + + authenticator, err := configauth.GetServerAuthenticator(ext, componentID) + if err != nil { + return nil, err + } + + uInterceptors = append(uInterceptors, authenticator.GRPCUnaryServerInterceptor) + sInterceptors = append(sInterceptors, authenticator.GRPCStreamServerInterceptor) + } + + // Enable OpenTelemetry observability plugin. + // TODO: Pass construct settings to have access to Tracer. + uInterceptors = append(uInterceptors, otelgrpc.UnaryServerInterceptor( + otelgrpc.WithTracerProvider(otel.GetTracerProvider()), + otelgrpc.WithPropagators(otel.GetTextMapPropagator()), + )) + sInterceptors = append(sInterceptors, otelgrpc.StreamServerInterceptor( + otelgrpc.WithTracerProvider(otel.GetTracerProvider()), + otelgrpc.WithPropagators(otel.GetTextMapPropagator()), + )) + + opts = append(opts, grpc.ChainUnaryInterceptor(uInterceptors...), grpc.ChainStreamInterceptor(sInterceptors...)) + + return opts, nil +} + +// GetGRPCCompressionKey returns the grpc registered compression key if the +// passed in compression key is supported, and CompressionUnsupported otherwise. +func GetGRPCCompressionKey(compressionType string) string { + compressionKey := strings.ToLower(compressionType) + if encodingKey, ok := gRPCCompressionKeyMap[compressionKey]; ok { + return encodingKey + } + return CompressionUnsupported +} diff --git a/internal/otel_collector/config/configgrpc/doc.go b/internal/otel_collector/config/configgrpc/doc.go new file mode 100644 index 00000000000..04a7330b359 --- /dev/null +++ b/internal/otel_collector/config/configgrpc/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package configgrpc defines the configuration settings to create +// a gRPC client and server. +package configgrpc diff --git a/internal/otel_collector/config/configgrpc/gzip.go b/internal/otel_collector/config/configgrpc/gzip.go new file mode 100644 index 00000000000..a6f64286ddc --- /dev/null +++ b/internal/otel_collector/config/configgrpc/gzip.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configgrpc + +import ( + // Import the gzip package which auto-registers the gzip gRPC compressor. + _ "google.golang.org/grpc/encoding/gzip" +) diff --git a/internal/otel_collector/config/configgrpc/testdata/ca.crt b/internal/otel_collector/config/configgrpc/testdata/ca.crt new file mode 100644 index 00000000000..7a677e39a77 --- /dev/null +++ b/internal/otel_collector/config/configgrpc/testdata/ca.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDNjCCAh4CCQCYMh590xiOGzANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJB +VTESMBAGA1UECAwJQXVzdHJhbGlhMQ8wDQYDVQQHDAZTeWRuZXkxEjAQBgNVBAoM +CU15T3JnTmFtZTEVMBMGA1UEAwwMTXlDb21tb25OYW1lMB4XDTIwMDkyMjA1MjIx +MFoXDTMwMDkyMDA1MjIxMFowXTELMAkGA1UEBhMCQVUxEjAQBgNVBAgMCUF1c3Ry +YWxpYTEPMA0GA1UEBwwGU3lkbmV5MRIwEAYDVQQKDAlNeU9yZ05hbWUxFTATBgNV +BAMMDE15Q29tbW9uTmFtZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AJdLtbEbPVuBQqh2MLDuyYB/zg3dMfl1jyE64UgW5AVRHrfAGXgP55yeEh3XGiO+ +i5PYNeEILoYcLXtMstA24OTgxeLjTZ0zEaja50/Ow9/NjZcTc0f/DErHI3GvWTxW +dCdosGe4qSwi9BbRGPfAat5fJMSTERXDcAcH2aaD3ekK3WTqtXsFsErF7+SpzJfL +PZw4aSFS9a26PkxO+Z5coqdYRC1CIpZGVFRg/PVcb7NNTrRf+Wu/hOncNkHDXKKz +qeBkhnHczQrPDzxhG2FvrahMgGSsRgBDdMwTBfmBhlbP+sM0HSPuCmKD95/osO03 +sG13nWMSDb7QYETVyg3E4t8CAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAfLGe9cAN +1WH09KVYIWWzu74tkaOIRFdkXzcx6fMq4Gpi49/lxG1INCrJ/4F8UyhHq0mmSsxb +UGs3KFfDsRctX7PNgOLYHxlUcAhQFzT3xqrRg7iqaiGWKTSGE1fXg29LKm/Ox/MC +npumt7rsSix5Viyb0/njcSX8CdSCirhKCiJklfd5J/Cwxqm+j/Pgaz2YrOj8Axa1 +/GJtPOtIpPYEBbXXUMpuijSikcfurZJL62WWxrzUGZjRsmSJAl5bvTJTOKGQb634 +Y0oehROKnkA2N0UVa4LM2M5C+CVZNl8vKAsdj1pywRGEOQoH42wBNu71Wob1f7jt +JOXWGJcoyEjbSg== +-----END CERTIFICATE----- diff --git a/internal/otel_collector/config/configgrpc/testdata/client.crt b/internal/otel_collector/config/configgrpc/testdata/client.crt new file mode 100644 index 00000000000..2fc037de49f --- /dev/null +++ b/internal/otel_collector/config/configgrpc/testdata/client.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDazCCAlOgAwIBAgIJANACN0VTlMdZMA0GCSqGSIb3DQEBCwUAMF0xCzAJBgNV +BAYTAkFVMRIwEAYDVQQIDAlBdXN0cmFsaWExDzANBgNVBAcMBlN5ZG5leTESMBAG +A1UECgwJTXlPcmdOYW1lMRUwEwYDVQQDDAxNeUNvbW1vbk5hbWUwHhcNMjAwOTIy +MDUyMjEwWhcNMzAwOTIwMDUyMjEwWjBdMQswCQYDVQQGEwJBVTESMBAGA1UECAwJ +QXVzdHJhbGlhMQ8wDQYDVQQHDAZTeWRuZXkxEjAQBgNVBAoMCU15T3JnTmFtZTEV +MBMGA1UEAwwMTXlDb21tb25OYW1lMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAyMraDxgfr7DEShaRbpKnKnRm4xrh9StCpTWVxViCS4JmACrlNsDBggan +Xz4rQSsV1Z2lznYPdbpXVVDY/8Q87GDXQLmB48cff+DLdU2TAvsalraty4edlf1Q +j6WNi/jFca9XIqqS358bmBau3SlEEJVv0StE8fDiZpHQuYADtdXxWhXGcrNC3quu +GKBtTCaj01EiZU5Rdqzd/KFEUQ5ns5K8j1vXJJzEhbmOXRN4NM0vvEBnd3ObP+Lw +pFUSkhxgYYLga8L5432bg/BA7OSLhZoEZzuMivyyNVC7sIoyLBYR0/Nk53ICmKz4 +gR18lTmpDXnmFZv7D1HXhwvFQ/xvbwIDAQABoy4wLDAUBgNVHREEDTALgglsb2Nh +bGhvc3QwFAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQAM +K90gY676JVzErSogCDn3XCsEkSLyBa90LK2FMogU7ZF0x2/Y6qf2yBLqYAjp4Br1 +CXmmQAXyLGsTs1ahobgZNVEvdvhVxc6CHBw4aBbXaUfVGq26xauPu47wWHtxEeAx +h9huRcZZKtTsJBJ1N6Yg7mJbzFT0nQ0FGWuoWd9HQP7ncOlfmlBfuAGRKRn1lXXr +na0thmOFQskzyAByijuGuaFvr+v4IVHYqO3JPXNpwp2LNHvD/f0OOS2XWpsUX6Vn +2IDdMgZSNLrHDZpemtl1QSaHemG8s67LEvuG0/fsfV38pKPlhKV1xrkojNN3kvPq +IyU5uT3m01KkJAMtRrMT +-----END CERTIFICATE----- diff --git a/internal/otel_collector/config/configgrpc/testdata/client.key b/internal/otel_collector/config/configgrpc/testdata/client.key new file mode 100644 index 00000000000..4c77070cc03 --- /dev/null +++ b/internal/otel_collector/config/configgrpc/testdata/client.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAyMraDxgfr7DEShaRbpKnKnRm4xrh9StCpTWVxViCS4JmACrl +NsDBgganXz4rQSsV1Z2lznYPdbpXVVDY/8Q87GDXQLmB48cff+DLdU2TAvsalrat +y4edlf1Qj6WNi/jFca9XIqqS358bmBau3SlEEJVv0StE8fDiZpHQuYADtdXxWhXG +crNC3quuGKBtTCaj01EiZU5Rdqzd/KFEUQ5ns5K8j1vXJJzEhbmOXRN4NM0vvEBn +d3ObP+LwpFUSkhxgYYLga8L5432bg/BA7OSLhZoEZzuMivyyNVC7sIoyLBYR0/Nk +53ICmKz4gR18lTmpDXnmFZv7D1HXhwvFQ/xvbwIDAQABAoIBADHS1AUG0WYBENPp +gbDURxKry5Py6bqyP1lLUJyld79Q3gqQmkvZzKp9CC8D+Cu1izd0ZN40QWXPFTig +VRgyE4P8C62N2oMwt8o9d37l/uKweEqJjdqBDkNXlhPu2o6u7h9liNObS9KdYnV8 +u2s5gCA1VIesmvEF+sfEyuwcrc8ClHf4qs7VDqopZ6HZ3aT5ns4xXA5QoEZJlhDG +axwqWQ/jC4G+nGyrE2/AAGAgQtRhcs8aHTuEGBlNGlC9af/obyYLCqPm0A6ceyKz +PcZUDQCrsZnQpwqF7zsF7WmW8W5XqVHDFoJaNQt2/sp3OkOv9z78JodvB/MbGmNV +MkP1GeECgYEA9kbhLVsDDPA82wQuBsbK9u6A59ZPIXDfXJVNjcg1LKJkqJsKhY9z +uZ98rHlTI+FS5sCL/ixdM/tVNFI3EHaS7wOLJI9y2y+CVi2d5ffMKbPUtFJf5Q+A +zlJq1LseKdwsVT1jSah/jZ53YW1pOiJZPByUfLWIwLNHo0+fIEMfCTkCgYEA0LhC +sNb1W8GpMy6eTDfa4D90Wm0LvZgEyV8SCc09xdC6yp1bE4G19x037/YQsbLnE5vB +0YM8ILh977zCYHokC9qMdKDAxZx0RQD2IUDRbTymQ89uS5ednSg9dBxs9f/cxTlU +wQUxf4+yY/Rohyo0+mK4zkobG9lU1H83KKc1BecCgYEAkvQkdW3zWgsYJRBPbpe8 +kLAslypYOXoirhohFtM6d5HHQpyRILVCtqamPDyBEc3oK+0FG/vY+aWlZ/0PAnHe +p2ST6JL4VDX7LfU2XP0KBHBcIeVtdz9S+spPGPU2wH+yrIJe9prm0diXH7mrqpbI +bIgZSnkASwwvWRGvwA6NPHECgYBkD+JRG0zXp3lxgyj6y1BQb7tdWqflRgsNa1mf +f1jdDBtw5Y1zRZ0yEjzt+p64QleLzAFYaz0ZRrmBhJH/ZK8BS85IX4Trd/0506Ms +AAInB4uCOODctpwmatNDZhlKulZh6wFZ5B591CsmxlaSbkalv0xwAZELgd6sXSzZ +fYfrAwKBgQDM9StAiTdSjGn0Qk/YzkLlloEEebjJ7tRUpDGQgX3Z7YsCdfl/LeWU +yMV7UVDggPVjveT8TUJUm+ipD7CpesY1GTJovyRWKlyMpgAY2wKXV41oOnDD/0ef +AAa3FWMAf27ogbeXBxSUBN+1EhKBMKihQSD+Odnbu6SHUeiKskGU3Q== +-----END RSA PRIVATE KEY----- diff --git a/internal/otel_collector/config/configgrpc/testdata/server.crt b/internal/otel_collector/config/configgrpc/testdata/server.crt new file mode 100644 index 00000000000..70292eae048 --- /dev/null +++ b/internal/otel_collector/config/configgrpc/testdata/server.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDazCCAlOgAwIBAgIJANACN0VTlMdYMA0GCSqGSIb3DQEBCwUAMF0xCzAJBgNV +BAYTAkFVMRIwEAYDVQQIDAlBdXN0cmFsaWExDzANBgNVBAcMBlN5ZG5leTESMBAG +A1UECgwJTXlPcmdOYW1lMRUwEwYDVQQDDAxNeUNvbW1vbk5hbWUwHhcNMjAwOTIy +MDUyMjEwWhcNMzAwOTIwMDUyMjEwWjBdMQswCQYDVQQGEwJBVTESMBAGA1UECAwJ +QXVzdHJhbGlhMQ8wDQYDVQQHDAZTeWRuZXkxEjAQBgNVBAoMCU15T3JnTmFtZTEV +MBMGA1UEAwwMTXlDb21tb25OYW1lMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA0HqHHTXVljLlyknGttODUu675uGGJJWdUCmjr+I9+3BAxGgNG4xVjMao +IsgAHajXgfTgfLha0mnMCdAHL4OUmvAj7sJ6NZRIK9DASiu5NWLQhgjUDg1DY2K/ +nhCVGp3w/J6aFfN+qSIaHCQz7MN66mcPtsXxMMPUBlqjWCLFpR5vL2M6k5tZI1L2 +pP4jfkfEIdlHgs/AXftwiYsNo57Rlaj+O7DwPqmJdVGeeE6Wka4ANK/5svIAgW9h +mwKhSwaXwle8GDMYgtbfQCrIO/Z5ctMKG9KXEgOBpoANYmeAGT2OPaCc43710T9P +MONdj3TKc8Y5FsUA0/kwac7oGFl+hQIDAQABoy4wLDAUBgNVHREEDTALgglsb2Nh +bGhvc3QwFAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQCP +5as7sDX2vcXbROg62Weyg6YCDd12LYRWS2UHBebxwDFKVj1GmkJM2eN5bZWlyULI +Sv0yXSIQuMXIZDECdg0+YWxk6ZV57iYkduhez61wqhqYu9H1h5jlOvdlevunfNZ3 +VlpIkE2vVRIpu+IiNRSkh08M5csAe7MsrgdUcgenjygwNM3wPaQtlQ7tZ+quWyYc +rHO2lByVexHwpN2ZPiMZ7eIyEs9W2kt6ohcr8jJdryfO+7Q2FR5vE8K1Uh1wNcFh +WLPMIl4InYmIFfUChHvHCEmLS0TLW4lD9srFmO7VrlrPqUOULzUIm5wuXWgvdxw9 +3XHsXLqvMOf79boGpkfv +-----END CERTIFICATE----- diff --git a/internal/otel_collector/config/configgrpc/testdata/server.key b/internal/otel_collector/config/configgrpc/testdata/server.key new file mode 100644 index 00000000000..98fba4b9432 --- /dev/null +++ b/internal/otel_collector/config/configgrpc/testdata/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA0HqHHTXVljLlyknGttODUu675uGGJJWdUCmjr+I9+3BAxGgN +G4xVjMaoIsgAHajXgfTgfLha0mnMCdAHL4OUmvAj7sJ6NZRIK9DASiu5NWLQhgjU +Dg1DY2K/nhCVGp3w/J6aFfN+qSIaHCQz7MN66mcPtsXxMMPUBlqjWCLFpR5vL2M6 +k5tZI1L2pP4jfkfEIdlHgs/AXftwiYsNo57Rlaj+O7DwPqmJdVGeeE6Wka4ANK/5 +svIAgW9hmwKhSwaXwle8GDMYgtbfQCrIO/Z5ctMKG9KXEgOBpoANYmeAGT2OPaCc +43710T9PMONdj3TKc8Y5FsUA0/kwac7oGFl+hQIDAQABAoIBAFTJD/wcMcIE7xlG +yc7+1FC9EKQEIgbs5e59ELnuG+EPNPfrjTEf8IbxH94NUqa9TO/oRAfU/fLG3hk7 +hkCXla8xbJukcgkqRfOz0RAZGhiRGFb6bitMz5Qyy9Ufz1Pk2eYTJn046tEkMlQx +kQCAO5Pq2CQv+jgn3Cm9YOLuOU0+CEpET2lNgdUbj7wo0k2jDbxuU/CGqrQua8uH +hwM2hBH3eZJzO7EwdBhdubImg9RsDrLUkltgdVMAROP5+m03+J653v4UbaAvG4jM +IxkVW11Wdh4caKJNQY5gnNhnNG79uDeikX/dLTnSnvaARQWay/XKhP9EyKp3zVVk +4S4GEyECgYEA8TjZiGLSbgERoigUpgMxECyUAi/+GffiAR4PARdaoCXLGlXYBAax +N8n+CF62VS7FY0IYeGuzDG9O/rEGS27OBVM2j+cCkqOT9+vNJe5iwGfAzwuBAuCA +m5eRysLG4jIwhw2XRCL2gGQM92XodKkShAhXuG05nUqcUdpgdpBdJK0CgYEA3UAn +YhbXvNKUHcpjvyyQpLrJHS+Z+key1e8FWJ8TzQDWldbgCeJZrm9xCNARKZEXFxNG +V3MJWehKl2mC8ctU6u1aTi83bA7MdVvDw57SGj0HMLa4CXtdWEOt57Z6HzFJLoQy +aAxvKwbeBfyRbt7f5HaHw/w3VjZN9HA7ip7EJDkCgYAFlWhLpOX0D+hFlaHsudQv +6KhAaLX8CeXcWsLEJrM9U8KgyG3oofMGNJHBxdd4n02IX6ZLW0rYtdbhRF2970Gr +k+KGcDV6CXlKWtXz09HLXFt1L3H8DBBOCbMhO2L5J2pCJgljVV/ZVveJ3n0D/knk +boEBTt3viyOVLXXgKLVPPQKBgEFtGzhScPGRg+NbWivKTeuooJhU3z+3vBavW/Fc ++UoCGXKt3AqQONzwb4ifnrOgCCf2tzJc/kLsAkLMHMDL1Ay0q6O7KrR1m9iIjldm +u9KugVXScpG7PVtAiEihGXPn6zAqP42tP6KFoVo72fXjSmoQ8wztpJ+F53+FQNY5 +JN9hAoGBALV/knUA5tVyMofCoZflHVM3E4pFJiKQII1dzxE4g/sLYtSWDfKXP64W +mc7PKy46vjeTeRE0B8sGQ7RIhnUpPaA6OS8sMPSeJtdvfAPK0KCukHXTsLJg5DZo +XuC2gsdqPFQJQ/VDnp3JO7rbj7A3uYgzRT5xKHMluJnDDuUg1UUr +-----END RSA PRIVATE KEY----- diff --git a/internal/otel_collector/config/confighttp/README.md b/internal/otel_collector/config/confighttp/README.md new file mode 100644 index 00000000000..ead126dc4a3 --- /dev/null +++ b/internal/otel_collector/config/confighttp/README.md @@ -0,0 +1,62 @@ +# HTTP Configuration Settings + +HTTP exposes a [variety of settings](https://golang.org/pkg/net/http/). +Several of these settings are available for configuration within individual +receivers or exporters. + +## Client Configuration + +[Exporters](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/README.md) +leverage client configuration. + +Note that client configuration supports TLS configuration, however +configuration parameters are not defined under `tls_settings` like server +configuration. For more information, see [configtls +README](../configtls/README.md). + +- `endpoint`: address:port +- `headers`: name/value pairs added to the HTTP request headers +- [`read_buffer_size`](https://golang.org/pkg/net/http/#Transport) +- [`timeout`](https://golang.org/pkg/net/http/#Client) +- [`write_buffer_size`](https://golang.org/pkg/net/http/#Transport) + +Example: + +```yaml +exporter: + otlp: + endpoint: otelcol2:55690 + headers: + test1: "value1" + "test 2": "value 2" +``` + +## Server Configuration + +[Receivers](https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/README.md) +leverage server configuration. + +- [`cors_allowed_origins`](https://github.com/rs/cors): An empty list means + that CORS is not enabled at all. A wildcard can be used to match any origin + or one or more characters of an origin. +- [`cors_allowed_headers`](https://github.com/rs/cors): When CORS is enabled, + can be used to specify an optional list of allowed headers. By default, it includes `Accept`, + `Content-Type`, `X-Requested-With`. `Origin` is also always + added to the list. A wildcard (`*`) can be used to match any header. +- `endpoint`: Valid value syntax available [here](https://github.com/grpc/grpc/blob/master/doc/naming.md) +- [`tls_settings`](../configtls/README.md) + +Example: + +```yaml +receivers: + otlp: + cors_allowed_origins: + - https://foo.bar.com + - https://*.test.com + cors_allowed_headers: + - ExampleHeader + endpoint: 0.0.0.0:55690 + protocols: + http: +``` diff --git a/internal/otel_collector/config/confighttp/confighttp.go b/internal/otel_collector/config/confighttp/confighttp.go new file mode 100644 index 00000000000..039c210043e --- /dev/null +++ b/internal/otel_collector/config/confighttp/confighttp.go @@ -0,0 +1,231 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package confighttp + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" + "time" + + "github.com/rs/cors" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + "go.opentelemetry.io/otel" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configauth" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/internal/middleware" +) + +// HTTPClientSettings defines settings for creating an HTTP client. +type HTTPClientSettings struct { + // The target URL to send data to (e.g.: http://some.url:9411/v1/traces). + Endpoint string `mapstructure:"endpoint"` + + // TLSSetting struct exposes TLS client configuration. + TLSSetting configtls.TLSClientSetting `mapstructure:",squash"` + + // ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize. + ReadBufferSize int `mapstructure:"read_buffer_size"` + + // WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize. + WriteBufferSize int `mapstructure:"write_buffer_size"` + + // Timeout parameter configures `http.Client.Timeout`. + Timeout time.Duration `mapstructure:"timeout,omitempty"` + + // Additional headers attached to each HTTP request sent by the client. + // Existing header values are overwritten if collision happens. + Headers map[string]string `mapstructure:"headers,omitempty"` + + // Custom Round Tripper to allow for individual components to intercept HTTP requests + CustomRoundTripper func(next http.RoundTripper) (http.RoundTripper, error) + + // Auth configuration for outgoing HTTP calls. + Auth *configauth.Authentication `mapstructure:"auth,omitempty"` +} + +// ToClient creates an HTTP client. +func (hcs *HTTPClientSettings) ToClient(ext map[config.ComponentID]component.Extension) (*http.Client, error) { + tlsCfg, err := hcs.TLSSetting.LoadTLSConfig() + if err != nil { + return nil, err + } + transport := http.DefaultTransport.(*http.Transport).Clone() + if tlsCfg != nil { + transport.TLSClientConfig = tlsCfg + } + if hcs.ReadBufferSize > 0 { + transport.ReadBufferSize = hcs.ReadBufferSize + } + if hcs.WriteBufferSize > 0 { + transport.WriteBufferSize = hcs.WriteBufferSize + } + + clientTransport := (http.RoundTripper)(transport) + if len(hcs.Headers) > 0 { + clientTransport = &headerRoundTripper{ + transport: transport, + headers: hcs.Headers, + } + } + + if hcs.Auth != nil { + if ext == nil { + return nil, fmt.Errorf("extensions configuration not found") + } + + componentID, cperr := config.NewIDFromString(hcs.Auth.AuthenticatorName) + if cperr != nil { + return nil, cperr + } + + httpCustomAuthRoundTripper, aerr := configauth.GetHTTPClientAuthenticator(ext, componentID) + if aerr != nil { + return nil, aerr + } + + clientTransport, err = httpCustomAuthRoundTripper.RoundTripper(clientTransport) + if err != nil { + return nil, err + } + } + + if hcs.CustomRoundTripper != nil { + clientTransport, err = hcs.CustomRoundTripper(clientTransport) + if err != nil { + return nil, err + } + } + + return &http.Client{ + Transport: clientTransport, + Timeout: hcs.Timeout, + }, nil +} + +// Custom RoundTripper that adds headers. +type headerRoundTripper struct { + transport http.RoundTripper + headers map[string]string +} + +// RoundTrip is a custom RoundTripper that adds headers to the request. +func (interceptor *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + for k, v := range interceptor.headers { + req.Header.Set(k, v) + } + // Send the request to next transport. + return interceptor.transport.RoundTrip(req) +} + +// HTTPServerSettings defines settings for creating an HTTP server. +type HTTPServerSettings struct { + // Endpoint configures the listening address for the server. + Endpoint string `mapstructure:"endpoint"` + + // TLSSetting struct exposes TLS client configuration. + TLSSetting *configtls.TLSServerSetting `mapstructure:"tls_settings, omitempty"` + + // CorsOrigins are the allowed CORS origins for HTTP/JSON requests to grpc-gateway adapter + // for the OTLP receiver. See github.com/rs/cors + // An empty list means that CORS is not enabled at all. A wildcard (*) can be + // used to match any origin or one or more characters of an origin. + CorsOrigins []string `mapstructure:"cors_allowed_origins"` + + // CorsHeaders are the allowed CORS headers for HTTP/JSON requests to grpc-gateway adapter + // for the OTLP receiver. See github.com/rs/cors + // CORS needs to be enabled first by providing a non-empty list in CorsOrigins + // A wildcard (*) can be used to match any header. + CorsHeaders []string `mapstructure:"cors_allowed_headers"` +} + +// ToListener creates a net.Listener. +func (hss *HTTPServerSettings) ToListener() (net.Listener, error) { + listener, err := net.Listen("tcp", hss.Endpoint) + if err != nil { + return nil, err + } + + if hss.TLSSetting != nil { + var tlsCfg *tls.Config + tlsCfg, err = hss.TLSSetting.LoadTLSConfig() + if err != nil { + return nil, err + } + listener = tls.NewListener(listener, tlsCfg) + } + return listener, nil +} + +// toServerOptions has options that change the behavior of the HTTP server +// returned by HTTPServerSettings.ToServer(). +type toServerOptions struct { + errorHandler middleware.ErrorHandler +} + +// ToServerOption is an option to change the behavior of the HTTP server +// returned by HTTPServerSettings.ToServer(). +type ToServerOption func(opts *toServerOptions) + +// WithErrorHandler overrides the HTTP error handler that gets invoked +// when there is a failure inside middleware.HTTPContentDecompressor. +func WithErrorHandler(e middleware.ErrorHandler) ToServerOption { + return func(opts *toServerOptions) { + opts.errorHandler = e + } +} + +// ToServer creates an http.Server from settings object. +func (hss *HTTPServerSettings) ToServer(handler http.Handler, opts ...ToServerOption) *http.Server { + serverOpts := &toServerOptions{} + for _, o := range opts { + o(serverOpts) + } + + handler = middleware.HTTPContentDecompressor( + handler, + middleware.WithErrorHandler(serverOpts.errorHandler), + ) + + if len(hss.CorsOrigins) > 0 { + co := cors.Options{ + AllowedOrigins: hss.CorsOrigins, + AllowCredentials: true, + AllowedHeaders: hss.CorsHeaders, + } + handler = cors.New(co).Handler(handler) + } + // TODO: emit a warning when non-empty CorsHeaders and empty CorsOrigins. + + // Enable OpenTelemetry observability plugin. + // TODO: Consider to use component ID string as prefix for all the operations. + handler = otelhttp.NewHandler( + handler, + "", + otelhttp.WithTracerProvider(otel.GetTracerProvider()), + otelhttp.WithPropagators(otel.GetTextMapPropagator()), + otelhttp.WithSpanNameFormatter(func(operation string, r *http.Request) string { + return r.URL.Path + }), + ) + + return &http.Server{ + Handler: handler, + } +} diff --git a/internal/otel_collector/config/confighttp/doc.go b/internal/otel_collector/config/confighttp/doc.go new file mode 100644 index 00000000000..377c2fa74b1 --- /dev/null +++ b/internal/otel_collector/config/confighttp/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package confighttp defines the configuration settings +// for creating an HTTP client and server. +package confighttp diff --git a/internal/otel_collector/config/confighttp/testdata/ca.crt b/internal/otel_collector/config/confighttp/testdata/ca.crt new file mode 100644 index 00000000000..9ab794b7ffc --- /dev/null +++ b/internal/otel_collector/config/confighttp/testdata/ca.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDNjCCAh4CCQDgJLdDKyhMRTANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJB +VTESMBAGA1UECAwJQXVzdHJhbGlhMQ8wDQYDVQQHDAZTeWRuZXkxEjAQBgNVBAoM +CU15T3JnTmFtZTEVMBMGA1UEAwwMTXlDb21tb25OYW1lMB4XDTIwMDkyMjA1MjIx +MFoXDTMwMDkyMDA1MjIxMFowXTELMAkGA1UEBhMCQVUxEjAQBgNVBAgMCUF1c3Ry +YWxpYTEPMA0GA1UEBwwGU3lkbmV5MRIwEAYDVQQKDAlNeU9yZ05hbWUxFTATBgNV +BAMMDE15Q29tbW9uTmFtZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AMKJc2wz8eLzAPonO37JahdY1Rt1dkRzQuung2Fe5O8UnbtEnDc7N7fbRLBgSRl0 +F5+V2USHCtYfAJ0tLifmInLOfEgmxIB2HNnwVLwDAnyXzp6NQEVw51bsILMTuFfB +mgp8Jq8KokGGOOh6GmM9h0a3KVdpxqPD+088t8AAwZrO5dHNIxZ4Bq471Stvcm7Z +jAWAoRsjceVdGr82+iB9wTio/FIeygb5rO5Ju1GMisR1LgJ6apDv9FrtWdorRxnb +qFMXdPvMyM34oIRT6bxETSIYYHjozUz1/H0GB4NeGUbov0etnviTl+oMpRj0vZpT +DB8SD1XjHGOpbUZ6ibgUrWMCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEApBKbrk4g +Bd9/T1P3T3BBWOn8iMYLapBeapP6kVW9VtrpSvwKv6dchhh2Iizz5PnEEKBnU7ho ++GQXrKM0L/ejQeDqEo0lkZLJmovGNzXTNBGhgcVJ37qt0Bt58SoCA2Oc8dn4gtyR +eGi2lSVDUc+kiZWm9lUfTcwQeqTb8oS64DwJR8f11uX3NJn9N7fbwino60D5U7Na +ojO9ua4W3K5C8cuNEjssyE6qjSQ4lhXBlHxA9viSdQSQN0Lv/AH1s175jQ7G24jM +58v5DC7P0oodiOdr9Z0hndK8c1mgB2fTTme+h9iDYVttbMHoARYCWSy02/ZzHRah +tAEubJUHnzv5vA== +-----END CERTIFICATE----- diff --git a/internal/otel_collector/config/confighttp/testdata/client.crt b/internal/otel_collector/config/confighttp/testdata/client.crt new file mode 100644 index 00000000000..ee5dab3c561 --- /dev/null +++ b/internal/otel_collector/config/confighttp/testdata/client.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDazCCAlOgAwIBAgIJANACN0VTlMdbMA0GCSqGSIb3DQEBCwUAMF0xCzAJBgNV +BAYTAkFVMRIwEAYDVQQIDAlBdXN0cmFsaWExDzANBgNVBAcMBlN5ZG5leTESMBAG +A1UECgwJTXlPcmdOYW1lMRUwEwYDVQQDDAxNeUNvbW1vbk5hbWUwHhcNMjAwOTIy +MDUyMjEwWhcNMzAwOTIwMDUyMjEwWjBdMQswCQYDVQQGEwJBVTESMBAGA1UECAwJ +QXVzdHJhbGlhMQ8wDQYDVQQHDAZTeWRuZXkxEjAQBgNVBAoMCU15T3JnTmFtZTEV +MBMGA1UEAwwMTXlDb21tb25OYW1lMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA0TJ19WUFfGqdkus7kOqFQkR570sncfM0oLWK3Jzrqf0LO18BKv3LKQ/N +vVVkKa4IXkGo+oopzb/I5GQuCkp0LFz+lIfPioiEF1DQ+MJAHBEvNerwSgezlYbh +2cg/NS+f7CTe98AaEIiA+UtXDCWq2mttBLSckkvDzFpB++WL5HjonUyzE03ijAli +CJvMZmVFY7Q/uP00S0tyvgskHkeQXxVQ7rBlg43OYKRs0lXyEOYypv+2i7vxb2NM +rZciZa9wNxdWHPukeMsY2HEpZPEAochE3zppfomjc2T+B2F3uBkB0YcK0K4ugO2+ +KuzpIhoQpmdFwXjmnLjaYUZ7s+XUrwIDAQABoy4wLDAUBgNVHREEDTALgglsb2Nh +bGhvc3QwFAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQB6 +II9PnbwfjQPn5vx6vEnoe6HQV0V+xnh5zfD8DJ6hM42HbGSBAqD64z6odkx7jKdj +B0tdxgx9eN0/tp15ss3h5BRksVMf1k4fFG0MY/jS5GDX4V8G3e/4SbrkNjXdA2UR +i0QMB2nyPObkpCVIRDwtdv0E416Rpm1GDtcjjuyBRAfODkj/LZ3nmwzEtXwo2XG3 +hthyC/4x6LmK0g4siA0ru8vtwUHh7d7A7rcZDPGajA+B9ByBQT3GzCND8NVqbyiq +G/XpRVQ4XmE2Vdg05hDVpHzgmyii6eIrDnQd4XrHBWLV6JuUMGu1goQDTxKlyt0p +gPm/gT00VmSUUh4QLX91 +-----END CERTIFICATE----- diff --git a/internal/otel_collector/config/confighttp/testdata/client.key b/internal/otel_collector/config/confighttp/testdata/client.key new file mode 100644 index 00000000000..e5876e89835 --- /dev/null +++ b/internal/otel_collector/config/confighttp/testdata/client.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA0TJ19WUFfGqdkus7kOqFQkR570sncfM0oLWK3Jzrqf0LO18B +Kv3LKQ/NvVVkKa4IXkGo+oopzb/I5GQuCkp0LFz+lIfPioiEF1DQ+MJAHBEvNerw +SgezlYbh2cg/NS+f7CTe98AaEIiA+UtXDCWq2mttBLSckkvDzFpB++WL5HjonUyz +E03ijAliCJvMZmVFY7Q/uP00S0tyvgskHkeQXxVQ7rBlg43OYKRs0lXyEOYypv+2 +i7vxb2NMrZciZa9wNxdWHPukeMsY2HEpZPEAochE3zppfomjc2T+B2F3uBkB0YcK +0K4ugO2+KuzpIhoQpmdFwXjmnLjaYUZ7s+XUrwIDAQABAoIBAEH+CxwIbDydXWv1 +bOsAMF2BQH3uVVkrAZUY7988WVNckeh+xd2MBkTDyYFKqLhFQDqLuAShBSL0tyjl +OWjhp9g+1ciBN0VaX2EDi4iNrq+r9BqsLHUODObEkAalltruVSKnVvcM0Kwag6Ug +0Srxzv3sGY38c8/quq+CYYJXHVRLBX8vQ70rwp+BKEipce18/kVxywaYGYqjzwcA +iiG6QJlyl7BrOUK8KPpyvC0OhnDpDdCO875MuBLzvulFcqvGbGNHcgRqAEk1xH5b +SQoiZBaZGWK1Ns7+bwCtcVBhvxsWqIffIvXAG74DQqpIdgmR1hVLx1e4HxVBHpDQ +Z096yVECgYEA+9M2xKbzyUJCc4MfOjRG0V1lzECrBt0sv6GMY2o8PFj1+MRONmHV +G556oxeK1NT9r6KRxK8NKSxkR775HDgSFd3VdFLpmCDQu/z/PSWoSo+0jmToOX9t +eykF4MCLhU8ck2AiDne4MB7MNKqPesbGsmK2IwPkHLGQ8Sz0367AqFMCgYEA1KpT +tafR5D/yq4iC51o6PjQ4gMn7vpiGvkU9VVEzZQRGaP5W3ssTEh9b58wlMTOxQE3Z +cpoVNRXAg1jOkKa0NZm5SOOz1PpdNINIbGpVVrx/cUkhKHDEj+uDt72fS8cyU14n +52jlh+3LpG1UyLNX7eod/xv+Wo5oLe3fvJAzprUCgYEA5PtBqb9FnZOqaO6pznsK +igWrMvb6jNtAfV+gECXhb95Ui0e09q4u4VZRnUsi6jRiGPpyIa4rAW1kIfj8+zPg +/hEgrw1VawcrxkResnMze9kADRqkLuQ34O2EcsGiHC27hia70Pv7d4YJmToeDT4C +HuKzS1OWcKDlcue2Ik780BECgYAVwsACDIQLqQd5yeQrLC5dgxZtBz39SLow6gDW +pBJwObnCsJPPBFSVPCQ5WchMeo+eltizQ1T8M5eZWRL59jTmby5oaPRTzLKQ1wYo +IdFNqMgZnXQJIVDbsSuvN3X/WQirQy0uHqut9wUpdA6C4ucSbyxWmFS0i3HZkUed +kdvXKQKBgGpQ7jpDzNh3zXO7AU4m4FlAN+N+dVXCP0C8DwcyRM69Sioyybh5lVww +QfQoSs3/m6/XZ5r6l9OvM6oUGfovk9Ja7NN8aqXQ4lwrET4SblRroKXQqzvp0aJ2 +XwHVfW5N9DcAQtQQzyWHxcKLyNOZLYQmBRsKuw4wu2I/rn8gWsy5 +-----END RSA PRIVATE KEY----- diff --git a/internal/otel_collector/config/confighttp/testdata/server.crt b/internal/otel_collector/config/confighttp/testdata/server.crt new file mode 100644 index 00000000000..b1346ea87b4 --- /dev/null +++ b/internal/otel_collector/config/confighttp/testdata/server.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDazCCAlOgAwIBAgIJANACN0VTlMdaMA0GCSqGSIb3DQEBCwUAMF0xCzAJBgNV +BAYTAkFVMRIwEAYDVQQIDAlBdXN0cmFsaWExDzANBgNVBAcMBlN5ZG5leTESMBAG +A1UECgwJTXlPcmdOYW1lMRUwEwYDVQQDDAxNeUNvbW1vbk5hbWUwHhcNMjAwOTIy +MDUyMjEwWhcNMzAwOTIwMDUyMjEwWjBdMQswCQYDVQQGEwJBVTESMBAGA1UECAwJ +QXVzdHJhbGlhMQ8wDQYDVQQHDAZTeWRuZXkxEjAQBgNVBAoMCU15T3JnTmFtZTEV +MBMGA1UEAwwMTXlDb21tb25OYW1lMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAzIHy8CASiF6aI3CsI6RGlJBZExPk/Utvlp89ga42g+e1YxZUZtMm79A4 +uVOXnNsRvFtgiRA8xHrdcNcgCDBhBA7p5vQC/KgJymM6cdiNTStQbhvl7qpgyU8d +PYQNqKaaHo5ceW/AQM2z5XZRnak2HhI7VhO4QLOfp7CB0XvpFGG2lWpZ/xEHGIit +PcUQUmiROPremupF7mB04HQVH3TxWTtmHwvfWICbjO6gMfIT3me/4HrECA/WX2hj +ffP1HPfPz3ZU8UMWmodQif2/aX7auh1CfqpJbVVYMCtMr7WCmKXiYkrMK6osaoku +eCgM+ouNf1rXnzxdX6ApwZXrx9t/3QIDAQABoy4wLDAUBgNVHREEDTALgglsb2Nh +bGhvc3QwFAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQBi +K8J0Vy3/CHbYcQTg2UK0OCo08auuUUy3j9nSM2KfeeIKdM0DlRgLyIH63VwZNWFI +0tk27MONgsnXPFuEfUg4QG0QXx+rny10ckzI74ff1tOvu3LFkKRYafz3a1LfWW2Q +WDnta3lC+OsSKMEze1Bd4mvHZoiqTvkLbpmAudoWF7n+VSNXoOjizoMissqxy8iD +uZ6ChBWvJ1V+MtttXP0D7rSJuB0bkVwtcyEMNkUh7GrZVl61EcMQfjg5Vwsntdrv +cIIubS1F8uzT7ABLOGhiYm6gr3HPHsQp/t8sXNWIbjTmoueYBK215rvY3FQrzvAW +hNltkVRow5h+FyA/WVDN +-----END CERTIFICATE----- diff --git a/internal/otel_collector/config/confighttp/testdata/server.key b/internal/otel_collector/config/confighttp/testdata/server.key new file mode 100644 index 00000000000..cef03655b40 --- /dev/null +++ b/internal/otel_collector/config/confighttp/testdata/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAzIHy8CASiF6aI3CsI6RGlJBZExPk/Utvlp89ga42g+e1YxZU +ZtMm79A4uVOXnNsRvFtgiRA8xHrdcNcgCDBhBA7p5vQC/KgJymM6cdiNTStQbhvl +7qpgyU8dPYQNqKaaHo5ceW/AQM2z5XZRnak2HhI7VhO4QLOfp7CB0XvpFGG2lWpZ +/xEHGIitPcUQUmiROPremupF7mB04HQVH3TxWTtmHwvfWICbjO6gMfIT3me/4HrE +CA/WX2hjffP1HPfPz3ZU8UMWmodQif2/aX7auh1CfqpJbVVYMCtMr7WCmKXiYkrM +K6osaokueCgM+ouNf1rXnzxdX6ApwZXrx9t/3QIDAQABAoIBABRe3VQN3cq3oaLm +Fj92nZEuz7CWyrhwSy01r2q7b7Kz4d182+tiHP7GPuA282MsbxfUAkmk1Gi91FDp +HMe0CfXdhm7631FLa649NBUi/PAy4FAXd0/OqNVkjAUUokeqUK+6fnuaJgxOcRzq +LDcII9va9Q4d6LyJJ94MNuIm9ZCR/Yg3S3X6TnW+fh6CWw0NL0MV9/7JLPLUZglT +UsFayjNUUxXqrL1OuQ6yyEEVxPtu0rBD9n6s3LGf7iWrmltRaPOkq6feaU741PMV +uF7YUB5oNOVSJNWDFg9cxxJfpO+5I05YA0oiahrrd1jLu+j/1LdKvDSXBy2bLnIu +m3VbigECgYEA5qZdBj/id/HMCoX/Pw/jXzv0OEkOIULc1Jh1il6GUavCYjwPI0KE +tzJUjYfEa7ToymZtcrRYg4qoG7diWHndW4J7hmxj17b+PlwCsoU/TMkxLgw9mmc0 +Qp6fn8VOdGZ4ysTGn80Pn9zRDApy5f29b070cIHjFBXZnREuE0o8hOsCgYEA4vwK +C7JoHFNnxzpDj2aW6JDmNxMRpNOeQkC5rRR6uDjTHdaGq3WI0aGqvc6l47kIcb9w +MJiapHWCzJNc56jqmb/lgDku4sGRs5g6meOYENCYf9aKZzG9fkG/gGZf3eg2Yp2z +KwfKsk4g1HUdwIcC6dTQTgsGoPMYReP44R6Z/FcCgYBeb4Us9uExvPWO5XgxiL7O +kkyW8wpvAeJKxTVy9urF665F7FNCW4zdOSU3YXxBoSujGzb6vO50xUO5PWdt1E+W +lSEgU6a5frowLBoKn9XgCYwyT161pkXWdP3kO7O4ovAYDWNJsHsSOCX7aRfMJQz3 +0vrwSa4A3kVgMtWLnlyTCwKBgQDKfpLvsG9Upcu1XnMbISiLvYjDpU1eQDO1Y0zB +7b01T+x3eASYPbibW6CYyBwSNeYko+aQU/PRt8vCecyuFnGETD+PznPXc1xqXeoZ +k4L7rTv/AARk32jvk/Qlti7cJuctvwYx4zefLjf3kavDMC8XL/XNSeTV/UiwQRqs +qsIw7QKBgDSHMszYPoSaihjPFwIlEDqjk6QNUm4MuISV9/rdRuA+RzBVUOhtWnI0 +Oxh71iELCWxxKn73G0DqIMUfMLvnR7IMBFPS7wn0T13eF9f7LanJYCEXPAJU5OsZ +RNgLchEoy8xRALwfPxYncMEEzOcvexZWXs+76vZc30BXfvEWuXbS +-----END RSA PRIVATE KEY----- diff --git a/internal/otel_collector/config/confignet/README.md b/internal/otel_collector/config/confignet/README.md new file mode 100644 index 00000000000..579564cbff4 --- /dev/null +++ b/internal/otel_collector/config/confignet/README.md @@ -0,0 +1,18 @@ +# Network Configuration Settings + +[Receivers](https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/README.md) +leverage network configuration to set connection and transport information. + +- `endpoint`: Configures the address for this network connection. For TCP and + UDP networks, the address has the form "host:port". The host must be a + literal IP address, or a host name that can be resolved to IP addresses. The + port must be a literal port number or a service name. If the host is a + literal IPv6 address it must be enclosed in square brackets, as in + "[2001:db8::1]:80" or "[fe80::1%zone]:80". The zone specifies the scope of + the literal IPv6 address as defined in RFC 4007. +- `transport`: Known protocols are "tcp", "tcp4" (IPv4-only), "tcp6" + (IPv6-only), "udp", "udp4" (IPv4-only), "udp6" (IPv6-only), "ip", "ip4" + (IPv4-only), "ip6" (IPv6-only), "unix", "unixgram" and "unixpacket". + +Note that for TCP receivers only the `endpoint` configuration setting is +required. diff --git a/internal/otel_collector/config/confignet/confignet.go b/internal/otel_collector/config/confignet/confignet.go new file mode 100644 index 00000000000..067812f1301 --- /dev/null +++ b/internal/otel_collector/config/confignet/confignet.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package confignet + +import ( + "net" +) + +// NetAddr represents a network endpoint address. +type NetAddr struct { + // Endpoint configures the address for this network connection. + // For TCP and UDP networks, the address has the form "host:port". The host must be a literal IP address, + // or a host name that can be resolved to IP addresses. The port must be a literal port number or a service name. + // If the host is a literal IPv6 address it must be enclosed in square brackets, as in "[2001:db8::1]:80" or + // "[fe80::1%zone]:80". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007. + Endpoint string `mapstructure:"endpoint"` + + // Transport to use. Known protocols are "tcp", "tcp4" (IPv4-only), "tcp6" (IPv6-only), "udp", "udp4" (IPv4-only), + // "udp6" (IPv6-only), "ip", "ip4" (IPv4-only), "ip6" (IPv6-only), "unix", "unixgram" and "unixpacket". + Transport string `mapstructure:"transport"` +} + +// Dial equivalent with net.Dial for this address. +func (na *NetAddr) Dial() (net.Conn, error) { + return net.Dial(na.Transport, na.Endpoint) +} + +// Listen equivalent with net.Listen for this address. +func (na *NetAddr) Listen() (net.Listener, error) { + return net.Listen(na.Transport, na.Endpoint) +} + +// TCPAddr represents a TCP endpoint address. +type TCPAddr struct { + // Endpoint configures the address for this network connection. + // The address has the form "host:port". The host must be a literal IP address, or a host name that can be + // resolved to IP addresses. The port must be a literal port number or a service name. + // If the host is a literal IPv6 address it must be enclosed in square brackets, as in "[2001:db8::1]:80" or + // "[fe80::1%zone]:80". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007. + Endpoint string `mapstructure:"endpoint"` +} + +// Dial equivalent with net.Dial for this address. +func (na *TCPAddr) Dial() (net.Conn, error) { + return net.Dial("tcp", na.Endpoint) +} + +// Listen equivalent with net.Listen for this address. +func (na *TCPAddr) Listen() (net.Listener, error) { + return net.Listen("tcp", na.Endpoint) +} diff --git a/internal/otel_collector/config/confignet/doc.go b/internal/otel_collector/config/confignet/doc.go new file mode 100644 index 00000000000..1a37c602d6d --- /dev/null +++ b/internal/otel_collector/config/confignet/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package confignet implements the configuration settings for protocols to +// connect and transport data information. +package confignet diff --git a/internal/otel_collector/config/configparser/parser.go b/internal/otel_collector/config/configparser/parser.go new file mode 100644 index 00000000000..6400ad439df --- /dev/null +++ b/internal/otel_collector/config/configparser/parser.go @@ -0,0 +1,214 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configparser + +import ( + "fmt" + "io" + "io/ioutil" + "reflect" + + "github.com/knadh/koanf" + "github.com/knadh/koanf/maps" + "github.com/knadh/koanf/parsers/yaml" + "github.com/knadh/koanf/providers/confmap" + "github.com/knadh/koanf/providers/file" + "github.com/knadh/koanf/providers/rawbytes" + "github.com/mitchellh/mapstructure" + "github.com/spf13/cast" +) + +const ( + // KeyDelimiter is used as the default key delimiter in the default koanf instance. + KeyDelimiter = "::" +) + +// NewParser creates a new empty Parser instance. +func NewParser() *Parser { + return &Parser{k: koanf.New(KeyDelimiter)} +} + +// NewParserFromFile creates a new Parser by reading the given file. +func NewParserFromFile(fileName string) (*Parser, error) { + // Read yaml config from file. + p := NewParser() + if err := p.k.Load(file.Provider(fileName), yaml.Parser()); err != nil { + return nil, fmt.Errorf("unable to read the file %v: %w", fileName, err) + } + return p, nil +} + +// NewParserFromBuffer creates a new Parser by reading the given yaml buffer. +func NewParserFromBuffer(buf io.Reader) (*Parser, error) { + content, err := ioutil.ReadAll(buf) + if err != nil { + return nil, err + } + + p := NewParser() + if err := p.k.Load(rawbytes.Provider(content), yaml.Parser()); err != nil { + return nil, err + } + + return p, nil +} + +// NewParserFromStringMap creates a parser from a map[string]interface{}. +func NewParserFromStringMap(data map[string]interface{}) *Parser { + p := NewParser() + // Cannot return error because the koanf instance is empty. + _ = p.k.Load(confmap.Provider(data, KeyDelimiter), nil) + return p +} + +// Parser loads configuration. +type Parser struct { + k *koanf.Koanf +} + +// AllKeys returns all keys holding a value, regardless of where they are set. +// Nested keys are returned with a KeyDelimiter separator. +func (l *Parser) AllKeys() []string { + return l.k.Keys() +} + +// Unmarshal unmarshalls the config into a struct. +// Tags on the fields of the structure must be properly set. +func (l *Parser) Unmarshal(rawVal interface{}) error { + decoder, err := mapstructure.NewDecoder(decoderConfig(rawVal)) + if err != nil { + return err + } + return decoder.Decode(l.ToStringMap()) +} + +// UnmarshalExact unmarshalls the config into a struct, erroring if a field is nonexistent. +func (l *Parser) UnmarshalExact(intoCfg interface{}) error { + dc := decoderConfig(intoCfg) + dc.ErrorUnused = true + decoder, err := mapstructure.NewDecoder(dc) + if err != nil { + return err + } + return decoder.Decode(l.ToStringMap()) +} + +// Get can retrieve any value given the key to use. +func (l *Parser) Get(key string) interface{} { + return l.k.Get(key) +} + +// Set sets the value for the key. +func (l *Parser) Set(key string, value interface{}) { + // koanf doesn't offer a direct setting mechanism so merging is required. + merged := koanf.New(KeyDelimiter) + merged.Load(confmap.Provider(map[string]interface{}{key: value}, KeyDelimiter), nil) + l.k.Merge(merged) +} + +// IsSet checks to see if the key has been set in any of the data locations. +// IsSet is case-insensitive for a key. +func (l *Parser) IsSet(key string) bool { + return l.k.Exists(key) +} + +// MergeStringMap merges the configuration from the given map with the existing config. +// Note that the given map may be modified. +func (l *Parser) MergeStringMap(cfg map[string]interface{}) error { + toMerge := koanf.New(KeyDelimiter) + toMerge.Load(confmap.Provider(cfg, KeyDelimiter), nil) + return l.k.Merge(toMerge) +} + +// Sub returns new Parser instance representing a sub-config of this instance. +// It returns an error is the sub-config is not a map (use Get()) and an empty Parser if +// none exists. +func (l *Parser) Sub(key string) (*Parser, error) { + data := l.Get(key) + if data == nil { + return NewParser(), nil + } + + if reflect.TypeOf(data).Kind() == reflect.Map { + subParser := NewParser() + // Cannot return error because the subv is empty. + _ = subParser.MergeStringMap(cast.ToStringMap(data)) + return subParser, nil + } + + return nil, fmt.Errorf("unexpected sub-config value kind for key:%s value:%v kind:%v)", key, data, reflect.TypeOf(data).Kind()) +} + +// ToStringMap creates a map[string]interface{} from a Parser. +func (l *Parser) ToStringMap() map[string]interface{} { + return maps.Unflatten(l.k.All(), KeyDelimiter) +} + +// decoderConfig returns a default mapstructure.DecoderConfig capable of parsing time.Duration +// and weakly converting config field values to primitive types. It also ensures that maps +// whose values are nil pointer structs resolved to the zero value of the target struct (see +// expandNilStructPointers). A decoder created from this mapstructure.DecoderConfig will decode +// its contents to the result argument. +func decoderConfig(result interface{}) *mapstructure.DecoderConfig { + return &mapstructure.DecoderConfig{ + Result: result, + Metadata: nil, + TagName: "mapstructure", + WeaklyTypedInput: true, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + expandNilStructPointers(), + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + ), + } +} + +// In cases where a config has a mapping of something to a struct pointers +// we want nil values to resolve to a pointer to the zero value of the +// underlying struct just as we want nil values of a mapping of something +// to a struct to resolve to the zero value of that struct. +// +// e.g. given a config type: +// type Config struct { Thing *SomeStruct `mapstructure:"thing"` } +// +// and yaml of: +// config: +// thing: +// +// we want an unmarshalled Config to be equivalent to +// Config{Thing: &SomeStruct{}} instead of Config{Thing: nil} +func expandNilStructPointers() mapstructure.DecodeHookFunc { + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + // ensure we are dealing with map to map comparison + if from.Kind() == reflect.Map && to.Kind() == reflect.Map { + toElem := to.Type().Elem() + // ensure that map values are pointers to a struct + // (that may be nil and require manual setting w/ zero value) + if toElem.Kind() == reflect.Ptr && toElem.Elem().Kind() == reflect.Struct { + fromRange := from.MapRange() + for fromRange.Next() { + fromKey := fromRange.Key() + fromValue := fromRange.Value() + // ensure that we've run into a nil pointer instance + if fromValue.IsNil() { + newFromValue := reflect.New(toElem.Elem()) + from.SetMapIndex(fromKey, newFromValue) + } + } + } + } + return from.Interface(), nil + } +} diff --git a/internal/otel_collector/config/configparser/testdata/basic_types.yaml b/internal/otel_collector/config/configparser/testdata/basic_types.yaml new file mode 100644 index 00000000000..ff4c384e9b5 --- /dev/null +++ b/internal/otel_collector/config/configparser/testdata/basic_types.yaml @@ -0,0 +1,6 @@ +typed.options: + floating.point.example: 3.14 + integer.example: 1234 + bool.example: false + string.example: this is a string + nil.example: diff --git a/internal/otel_collector/config/configparser/testdata/config.yaml b/internal/otel_collector/config/configparser/testdata/config.yaml new file mode 100644 index 00000000000..38227d7a68b --- /dev/null +++ b/internal/otel_collector/config/configparser/testdata/config.yaml @@ -0,0 +1,23 @@ +receivers: + nop: + nop/myreceiver: + +processors: + nop: + nop/myprocessor: + +exporters: + nop: + nop/myexporter: + +extensions: + nop: + nop/myextension: + +service: + extensions: [nop] + pipelines: + traces: + receivers: [nop] + processors: [nop] + exporters: [nop] diff --git a/internal/otel_collector/config/configparser/testdata/embedded_keys.yaml b/internal/otel_collector/config/configparser/testdata/embedded_keys.yaml new file mode 100644 index 00000000000..db5c7a12655 --- /dev/null +++ b/internal/otel_collector/config/configparser/testdata/embedded_keys.yaml @@ -0,0 +1,6 @@ +typed::options: + floating::point::example: 3.14 + integer::example: 1234 + bool::example: false + string::example: this is a string + nil::example: diff --git a/internal/otel_collector/config/configtelemetry/configtelemetry.go b/internal/otel_collector/config/configtelemetry/configtelemetry.go new file mode 100644 index 00000000000..b8bd1e60078 --- /dev/null +++ b/internal/otel_collector/config/configtelemetry/configtelemetry.go @@ -0,0 +1,128 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configtelemetry + +import ( + "flag" + "fmt" + "strings" +) + +const ( + // LevelNone indicates that no telemetry data should be collected. + LevelNone Level = iota - 1 + // LevelBasic is the recommended and covers the basics of the service telemetry. + LevelBasic + // LevelNormal adds some other indicators on top of basic. + LevelNormal + // LevelDetailed adds dimensions and views to the previous levels. + LevelDetailed + + levelNoneStr = "none" + levelBasicStr = "basic" + levelNormalStr = "normal" + levelDetailedStr = "detailed" + + metricsLevelCfg = "metrics-level" +) + +var metricsLevelPtr = new(Level) + +// Flags is a helper function to add telemetry config flags to the service that exposes +// the application flags. +func Flags(flags *flag.FlagSet) { + flags.Var( + metricsLevelPtr, + metricsLevelCfg, + "Output level of telemetry metrics (none, basic, normal, detailed)") +} + +// Level is the level of internal telemetry (metrics, logs, traces about the component itself) +// that every component should generate. +type Level int8 + +var _ flag.Value = (*Level)(nil) + +func (l *Level) String() string { + switch *l { + case LevelNone: + return levelNoneStr + case LevelBasic: + return levelBasicStr + case LevelNormal: + return levelNormalStr + case LevelDetailed: + return levelDetailedStr + } + return "unknown" +} + +// Set sets the telemetry level. +func (l *Level) Set(s string) error { + lvl, err := parseLevel(s) + if err != nil { + return err + } + *l = lvl + return nil +} + +// GetMetricsLevelFlagValue returns the value of the "--metrics-level" flag. +// IMPORTANT: This must be used only in the core collector code for the moment. +func GetMetricsLevelFlagValue() Level { + return *metricsLevelPtr +} + +// TelemetrySetting exposes the common Telemetry configuration for one component. +type TelemetrySetting struct { + // MetricsLevelStr is the level of telemetry metrics, the possible values are: + // - "none" indicates that no telemetry data should be collected; + // - "basic" is the recommended and covers the basics of the service telemetry. + // - "normal" adds some other indicators on top of basic. + // - "detailed" adds dimensions and views to the previous levels. + MetricsLevelStr string `mapstructure:"metrics_level"` +} + +// DefaultTelemetrySetting returns the default TelemetrySetting. +// The level is set to the "--metrics-level" flag if set, otherwise the default "basic" level. +func DefaultTelemetrySetting() TelemetrySetting { + return TelemetrySetting{ + MetricsLevelStr: metricsLevelPtr.String(), + } +} + +// GetMetricsLevel returns the parsed level, or error if unknown value. +// Empty string is consider unknown value. +func (ts TelemetrySetting) GetMetricsLevel() (Level, error) { + return parseLevel(ts.MetricsLevelStr) +} + +// ParseLevel returns the Level represented by the string. The parsing is case-insensitive +// and it returns error if the string value is unknown. +func parseLevel(str string) (Level, error) { + str = strings.ToLower(str) + + switch str { + case levelNoneStr: + return LevelNone, nil + case levelBasicStr: + return LevelBasic, nil + case levelNormalStr: + return LevelNormal, nil + case levelDetailedStr: + return LevelDetailed, nil + } + return LevelNone, fmt.Errorf("unknown metrics level %q", str) +} diff --git a/internal/otel_collector/config/configtelemetry/doc.go b/internal/otel_collector/config/configtelemetry/doc.go new file mode 100644 index 00000000000..36fe369e957 --- /dev/null +++ b/internal/otel_collector/config/configtelemetry/doc.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package configtelemetry defines various telemetry level for configuration. +// It enables every component to have access to telemetry level +// to enable metrics only when necessary. +package configtelemetry diff --git a/internal/otel_collector/config/configtest/configtest.go b/internal/otel_collector/config/configtest/configtest.go new file mode 100644 index 00000000000..c27ecc96d4b --- /dev/null +++ b/internal/otel_collector/config/configtest/configtest.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configtest + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configparser" + "go.opentelemetry.io/collector/config/configunmarshaler" +) + +// LoadConfig loads a config from file, and does NOT validate the configuration. +func LoadConfig(fileName string, factories component.Factories) (*config.Config, error) { + // Read yaml config from file + cp, err := configparser.NewParserFromFile(fileName) + if err != nil { + return nil, err + } + // Unmarshal the config using the given factories. + return configunmarshaler.NewDefault().Unmarshal(cp, factories) +} + +// LoadConfigAndValidate loads a config from the file, and validates the configuration. +func LoadConfigAndValidate(fileName string, factories component.Factories) (*config.Config, error) { + cfg, err := LoadConfig(fileName, factories) + if err != nil { + return nil, err + } + return cfg, cfg.Validate() +} diff --git a/internal/otel_collector/config/configtest/doc.go b/internal/otel_collector/config/configtest/doc.go new file mode 100644 index 00000000000..a76c6c8e717 --- /dev/null +++ b/internal/otel_collector/config/configtest/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package configtest loads the configuration to test packages +// implementing the config package interfaces. +package configtest diff --git a/internal/otel_collector/config/configtest/testdata/config.yaml b/internal/otel_collector/config/configtest/testdata/config.yaml new file mode 100644 index 00000000000..38227d7a68b --- /dev/null +++ b/internal/otel_collector/config/configtest/testdata/config.yaml @@ -0,0 +1,23 @@ +receivers: + nop: + nop/myreceiver: + +processors: + nop: + nop/myprocessor: + +exporters: + nop: + nop/myexporter: + +extensions: + nop: + nop/myextension: + +service: + extensions: [nop] + pipelines: + traces: + receivers: [nop] + processors: [nop] + exporters: [nop] diff --git a/internal/otel_collector/config/configtls/README.md b/internal/otel_collector/config/configtls/README.md new file mode 100644 index 00000000000..60be861dcf0 --- /dev/null +++ b/internal/otel_collector/config/configtls/README.md @@ -0,0 +1,119 @@ +# TLS Configuration Settings + +Crypto TLS exposes a [variety of settings](https://godoc.org/crypto/tls). +Several of these settings are available for configuration within individual +receivers or exporters. + +Note that mutual TLS (mTLS) is also supported. + +## TLS / mTLS Configuration + +By default, TLS is enabled: + +- `insecure` (default = false): whether to enable client transport security for + the exporter's gRPC connection. See + [grpc.WithInsecure()](https://godoc.org/google.golang.org/grpc#WithInsecure). + +As a result, the following parameters are also required: + +- `cert_file`: Path to the TLS cert to use for TLS required connections. Should + only be used if `insecure` is set to false. +- `key_file`: Path to the TLS key to use for TLS required connections. Should + only be used if `insecure` is set to false. + +A certificate authority may also need to be defined: + +- `ca_file`: Path to the CA cert. For a client this verifies the server + certificate. For a server this verifies client certificates. If empty uses + system root CA. Should only be used if `insecure` is set to false. + +Additionally you can configure TLS to be enabled but skip verifying the server's +certificate chain. This cannot be combined with `insecure` since `insecure` +won't use TLS at all. + +- `insecure_skip_verify` (default = false): whether to skip verifying the + certificate or not. + +Minimum and maximum TLS version can be set: + +- `min_version` (default = "1.0"): Minimum acceptable TLS version. + +- `max_version` (default = "1.3"): Maximum acceptable TLS version. + +How TLS/mTLS is configured depends on whether configuring the client or server. +See below for examples. + +## Client Configuration + +[Exporters](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/README.md) +leverage client configuration. + +Note that client configuration supports TLS configuration, however +configuration parameters are not defined under `tls_settings` like server +configuration. For more information, see [configtls +README](../configtls/README.md). + +Beyond TLS configuration, the following setting can optionally be configured: + +- `server_name_override`: If set to a non-empty string, it will override the + virtual host name of authority (e.g. :authority header field) in requests + (typically used for testing). + +Example: + +```yaml +exporters: + otlp: + endpoint: myserver.local:55690 + insecure: false + ca_file: server.crt + cert_file: client.crt + key_file: client.key + min_version: "1.1" + max_version: "1.2" + otlp/insecure: + endpoint: myserver.local:55690 + insecure: true + otlp/secure_no_verify: + endpoint: myserver.local:55690 + insecure: false + insecure_skip_verify: true +``` + +## Server Configuration + +[Receivers](https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/README.md) +leverage server configuration. + +Beyond TLS configuration, the following setting can optionally be configured +(required for mTLS): + +- `client_ca_file`: Path to the TLS cert to use by the server to verify a + client certificate. (optional) This sets the ClientCAs and ClientAuth to + RequireAndVerifyClientCert in the TLSConfig. Please refer to + https://godoc.org/crypto/tls#Config for more information. + +Example: + +```yaml +receivers: + otlp: + protocols: + grpc: + endpoint: mysite.local:55690 + tls_settings: + cert_file: server.crt + key_file: server.key + otlp/mtls: + protocols: + grpc: + endpoint: mysite.local:55690 + tls_settings: + client_ca_file: client.pem + cert_file: server.crt + key_file: server.key + otlp/notls: + protocols: + grpc: + endpoint: mysite.local:55690 +``` diff --git a/internal/otel_collector/config/configtls/configtls.go b/internal/otel_collector/config/configtls/configtls.go new file mode 100644 index 00000000000..7b6c215d3b8 --- /dev/null +++ b/internal/otel_collector/config/configtls/configtls.go @@ -0,0 +1,195 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configtls + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "path/filepath" +) + +// TLSSetting exposes the common client and server TLS configurations. +// Note: Since there isn't anything specific to a server connection. Components +// with server connections should use TLSSetting. +type TLSSetting struct { + // Path to the CA cert. For a client this verifies the server certificate. + // For a server this verifies client certificates. If empty uses system root CA. + // (optional) + CAFile string `mapstructure:"ca_file"` + + // Path to the TLS cert to use for TLS required connections. (optional) + CertFile string `mapstructure:"cert_file"` + + // Path to the TLS key to use for TLS required connections. (optional) + KeyFile string `mapstructure:"key_file"` + + // MinVersion sets the minimum TLS version that is acceptable. + // If not set, TLS 1.0 is used. (optional) + MinVersion string `mapstructure:"min_version"` + + // MaxVersion sets the maximum TLS version that is acceptable. + // If not set, TLS 1.3 is used. (optional) + MaxVersion string `mapstructure:"max_version"` +} + +// TLSClientSetting contains TLS configurations that are specific to client +// connections in addition to the common configurations. This should be used by +// components configuring TLS client connections. +type TLSClientSetting struct { + // squash ensures fields are correctly decoded in embedded struct. + TLSSetting `mapstructure:",squash"` + + // These are config options specific to client connections. + + // In gRPC when set to true, this is used to disable the client transport security. + // See https://godoc.org/google.golang.org/grpc#WithInsecure. + // In HTTP, this disables verifying the server's certificate chain and host name + // (InsecureSkipVerify in the tls Config). Please refer to + // https://godoc.org/crypto/tls#Config for more information. + // (optional, default false) + Insecure bool `mapstructure:"insecure"` + // InsecureSkipVerify will enable TLS but not verify the certificate. + InsecureSkipVerify bool `mapstructure:"insecure_skip_verify"` + // ServerName requested by client for virtual hosting. + // This sets the ServerName in the TLSConfig. Please refer to + // https://godoc.org/crypto/tls#Config for more information. (optional) + ServerName string `mapstructure:"server_name_override"` +} + +// TLSServerSetting contains TLS configurations that are specific to server +// connections in addition to the common configurations. This should be used by +// components configuring TLS server connections. +type TLSServerSetting struct { + // squash ensures fields are correctly decoded in embedded struct. + TLSSetting `mapstructure:",squash"` + + // These are config options specific to server connections. + + // Path to the TLS cert to use by the server to verify a client certificate. (optional) + // This sets the ClientCAs and ClientAuth to RequireAndVerifyClientCert in the TLSConfig. Please refer to + // https://godoc.org/crypto/tls#Config for more information. (optional) + ClientCAFile string `mapstructure:"client_ca_file"` +} + +// LoadTLSConfig loads TLS certificates and returns a tls.Config. +// This will set the RootCAs and Certificates of a tls.Config. +func (c TLSSetting) loadTLSConfig() (*tls.Config, error) { + // There is no need to load the System Certs for RootCAs because + // if the value is nil, it will default to checking against th System Certs. + var err error + var certPool *x509.CertPool + if len(c.CAFile) != 0 { + // Set up user specified truststore. + certPool, err = c.loadCert(c.CAFile) + if err != nil { + return nil, fmt.Errorf("failed to load CA CertPool: %w", err) + } + } + + if (c.CertFile == "" && c.KeyFile != "") || (c.CertFile != "" && c.KeyFile == "") { + return nil, fmt.Errorf("for auth via TLS, either both certificate and key must be supplied, or neither") + } + + var certificates []tls.Certificate + if c.CertFile != "" && c.KeyFile != "" { + var tlsCert tls.Certificate + tlsCert, err = tls.LoadX509KeyPair(filepath.Clean(c.CertFile), filepath.Clean(c.KeyFile)) + if err != nil { + return nil, fmt.Errorf("failed to load TLS cert and key: %w", err) + } + certificates = append(certificates, tlsCert) + } + + minTLS, err := convertVersion(c.MinVersion) + if err != nil { + return nil, fmt.Errorf("invalid TLS min_version: %w", err) + } + maxTLS, err := convertVersion(c.MaxVersion) + if err != nil { + return nil, fmt.Errorf("invalid TLS max_version: %w", err) + } + + return &tls.Config{ + RootCAs: certPool, + Certificates: certificates, + MinVersion: minTLS, + MaxVersion: maxTLS, + }, nil +} + +func (c TLSSetting) loadCert(caPath string) (*x509.CertPool, error) { + caPEM, err := ioutil.ReadFile(filepath.Clean(caPath)) + if err != nil { + return nil, fmt.Errorf("failed to load CA %s: %w", caPath, err) + } + + certPool := x509.NewCertPool() + if !certPool.AppendCertsFromPEM(caPEM) { + return nil, fmt.Errorf("failed to parse CA %s", caPath) + } + return certPool, nil +} + +// LoadTLSConfig loads the TLS configuration. +func (c TLSClientSetting) LoadTLSConfig() (*tls.Config, error) { + if c.Insecure && c.CAFile == "" { + return nil, nil + } + + tlsCfg, err := c.TLSSetting.loadTLSConfig() + if err != nil { + return nil, fmt.Errorf("failed to load TLS config: %w", err) + } + tlsCfg.ServerName = c.ServerName + tlsCfg.InsecureSkipVerify = c.InsecureSkipVerify + return tlsCfg, nil +} + +// LoadTLSConfig loads the TLS configuration. +func (c TLSServerSetting) LoadTLSConfig() (*tls.Config, error) { + tlsCfg, err := c.loadTLSConfig() + if err != nil { + return nil, fmt.Errorf("failed to load TLS config: %w", err) + } + if c.ClientCAFile != "" { + certPool, err := c.loadCert(c.ClientCAFile) + if err != nil { + return nil, fmt.Errorf("failed to load TLS config: failed to load client CA CertPool: %w", err) + } + tlsCfg.ClientCAs = certPool + tlsCfg.ClientAuth = tls.RequireAndVerifyClientCert + } + return tlsCfg, nil +} + +func convertVersion(v string) (uint16, error) { + if v == "" { + return 0, nil // default + } + val, ok := tlsVersions[v] + if !ok { + return 0, fmt.Errorf("unsupported TLS version: %q", v) + } + return val, nil +} + +var tlsVersions = map[string]uint16{ + "1.0": tls.VersionTLS10, + "1.1": tls.VersionTLS11, + "1.2": tls.VersionTLS12, + "1.3": tls.VersionTLS13, +} diff --git a/internal/otel_collector/config/configtls/doc.go b/internal/otel_collector/config/configtls/doc.go new file mode 100644 index 00000000000..93a167500c4 --- /dev/null +++ b/internal/otel_collector/config/configtls/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package configtls implements the TLS settings to load and +// configure TLS clients and servers. +package configtls diff --git a/internal/otel_collector/config/configtls/testdata/test-cert.pem b/internal/otel_collector/config/configtls/testdata/test-cert.pem new file mode 100644 index 00000000000..627628866fa --- /dev/null +++ b/internal/otel_collector/config/configtls/testdata/test-cert.pem @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEHjCCAoagAwIBAgIQTUSsPHdq9Uhu2bMD29ThkDANBgkqhkiG9w0BAQsFADBd +MR4wHAYDVQQKExVta2NlcnQgZGV2ZWxvcG1lbnQgQ0ExGTAXBgNVBAsMEHRyaXN0 +YW5AdHJpc3RhbnMxIDAeBgNVBAMMF21rY2VydCB0cmlzdGFuQHRyaXN0YW5zMB4X +DTE5MDYxMTA3NTA0NloXDTI5MDYxMTA3NTA0NlowRDEnMCUGA1UEChMebWtjZXJ0 +IGRldmVsb3BtZW50IGNlcnRpZmljYXRlMRkwFwYDVQQLDBB0cmlzdGFuQHRyaXN0 +YW5zMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA6zf2JlSdKTdiYDZV +i1yPnP65/CgqlxMflTP9N2P1W7F1SbvQgiGiSfUyc7NicffLGqqDbK3Q4hvANkRC +wOYc+nXZLL6IAxsZ/QBfud3GG2XhuETT2p84Wlqo55I3wFF+Efb89FRp+IiAy2gj +c275hmie6zDRYNJticmZwBIXfnYvwY66V8Y2jKEAjtf6BEmB8yPxWLhxdgY3FjWR +y3kRLfr6BhxVM2qYtl/gXbyGTFjAv7LgFQa/25OXRevs+VjBWFQiQ89b+YIZPpJB +y8y+02nsRLt9Oy9lWMq1/pEqySDV6T3rrw5rV7TLj2RGNkxbnjk+qmf5mWxYzO5X +QaBqeQIDAQABo3MwcTAOBgNVHQ8BAf8EBAMCBaAwEwYDVR0lBAwwCgYIKwYBBQUH +AwIwDAYDVR0TAQH/BAIwADAfBgNVHSMEGDAWgBQbdcIL3c/Yr+lR9wLU2FLPJSfk +ZjAbBgNVHREEFDASghBjbGllbnQuamFlZ2VyLmlvMA0GCSqGSIb3DQEBCwUAA4IB +gQBx/tQKqGLQGv90TyQOdKFPPOQ0iU/pXrM0t1Gn55UuvSz6G66IufPQuV+MeW1B +CGcSm12QAjwIvFVPFiBurygQ9eYU/tZW1NCaTSoSRa8KzYMBuDlfqYdS3/7gq2+L +L3b9QZt4rLgaQp0StTlpgCZuKa6N4aK25HQpu+lZ/ZxL1cvLlTGtI2VEWrx9hZ9q +5ImLy063iSc/YqD51XR0LJTkjSdep4sBEGtl5V582ncZAGZQim90hiaPrf3TXVnN +HQuzJNE5pwS637nCcyzmXn07Wh4qcT5vWDmySeN9eDJjfrbM9il11mkGZ9JQYf8Z +S+1562KvxjVVlsegnXaR27tAGkJ40X/OZRC28jLEXIjManDhClZD3uwqlSRtb6/M +ux4+8kqL90msVRlZR5VnUCR5/rZr4ji07NMDVJijI99lRQ5rDbf7Z9CMUpLTXcfd +jJBweUKlFEe3HZ9BfZOU3tLbAdQa2/I420lFVo8mEdu6cpKQpW8fITDvl/71OpQu +FsI= +-----END CERTIFICATE----- diff --git a/internal/otel_collector/config/configtls/testdata/test-key.pem b/internal/otel_collector/config/configtls/testdata/test-key.pem new file mode 100644 index 00000000000..dc7766f9363 --- /dev/null +++ b/internal/otel_collector/config/configtls/testdata/test-key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDrN/YmVJ0pN2Jg +NlWLXI+c/rn8KCqXEx+VM/03Y/VbsXVJu9CCIaJJ9TJzs2Jx98saqoNsrdDiG8A2 +RELA5hz6ddksvogDGxn9AF+53cYbZeG4RNPanzhaWqjnkjfAUX4R9vz0VGn4iIDL +aCNzbvmGaJ7rMNFg0m2JyZnAEhd+di/BjrpXxjaMoQCO1/oESYHzI/FYuHF2BjcW +NZHLeREt+voGHFUzapi2X+BdvIZMWMC/suAVBr/bk5dF6+z5WMFYVCJDz1v5ghk+ +kkHLzL7TaexEu307L2VYyrX+kSrJINXpPeuvDmtXtMuPZEY2TFueOT6qZ/mZbFjM +7ldBoGp5AgMBAAECgf854ouw4yHKAtcy1iw3H5A4Eneyli/k/c/H6ANonjDDX+h9 +PLsTSzOk/7JqxrpzUYeqCExPcnb1Ld8fe6zxy69V86p+WGUgXosGuBDWrL0UAP6L +WmTIaGZ11dm7I0CVE3jy8tVNS3jIsM8BP595yNWfPh/dwSXFrgNG5VXw7oLZm8Nd +q4+yybeRT/1dhlz+toV44x1GjfKkxqhnTPZvnyqvg8jYMVQmbsnUlvAyfRr3fh3g +zEnzuBW0KPPkNbMyL1Q3QU/8LVf4lQ37pI1887edJmlXtbEuh8QjTVDB/5oi7O5/ +2wxdGDTGIad4kXYG2vsuTVunZZq15BfMVyGkHoECgYEA9h1ROB6AfoxkykvQyJEb +1rOxQVz0tAgwzb25aThkSEXVZ6GgdZabgH4aArCGOEXrVt5hlDoDHC8ZEcyQ+yuF ++wFa2C6SorUkGnBJH9J9umWW+bOa5XigqgMHnpjM9yhNH34UnMSm+VarqczQhVx5 +QqIsbCsT+hbAkhwAgJo64kkCgYEA9KqbQ8YTRMc58n3juX8PIFYrOXsUGhWPG2jo +YoiUXgHSZDvxAsp6AtU2jUXjzjTCaF+h4zhxND3FD2yBLRt/Xx/GYXzmDf+Wx68B +4G0ZW4a+huoIEhsM6WGs7oT/sQxluMFb6G/rOaZEWDNzhYtVGNZTxnxCsd4eWj1j +9zy6RrECgYEA4qWTAyxLxr6Bny58ogfH7Evk4723+AdG8mFS2ww8hbYR1fKpM0C0 +CXuXdnybzjzNgl0e3YMjFBRncNXDehrVspbH0yfokABitBpNrQmKEVq201NMRSB2 +TLqnjK1IrB+oDmVslAYhgqMHSUK9kOLdJLj2UdLF/dxwEN3KtKPTsEkCgYEAhPPU +rY6MV/qfDZvFTL6z3JGWqYStVsNSYcWvSiQH49G/n4JHJIocpT9xhnFtKlfXMNqO +4SeBtK7AT/JZe8aOf4WHyuARL5gtOlNqhKckeW0OScgRHK2gZY4TaAXT4ETpXe2M +4RE4VLp6Nye2ZeJiGr4VBi3uHDOkcMsdcHOKkfECgYEAwEizw5kfhQ79bl9SwPbl +euE0wxUyEu+1lNqqAr6ty+BtfGufOxupzejNKghdpgB/bmuK77G8ikbDh6Ya6pQ1 +++Oes8NSFNiKq7pZOpjOeXRRo/OncBFKRDOX/i4ARWeJ/ZvjYz1fPyQuQiylaeDx +IYDJ4/DyVeyPiVrSQKJ5YLk= +-----END PRIVATE KEY----- diff --git a/internal/otel_collector/config/configtls/testdata/testCA-bad.txt b/internal/otel_collector/config/configtls/testdata/testCA-bad.txt new file mode 100644 index 00000000000..2f4aad65c7f --- /dev/null +++ b/internal/otel_collector/config/configtls/testdata/testCA-bad.txt @@ -0,0 +1,3 @@ +-----BEGIN CERTIFICATE----- +bad certificate +-----END CERTIFICATE----- diff --git a/internal/otel_collector/config/configtls/testdata/testCA.pem b/internal/otel_collector/config/configtls/testdata/testCA.pem new file mode 100644 index 00000000000..0a986020542 --- /dev/null +++ b/internal/otel_collector/config/configtls/testdata/testCA.pem @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIICBzCCAXCgAwIBAgIQNkTaUtOczDHvL2YT/kqScTANBgkqhkiG9w0BAQsFADAX +MRUwEwYDVQQKEwxqYWdlcnRyYWNpbmcwHhcNMTkwMjA4MDYyODAyWhcNMTkwMjA4 +MDcyODAyWjAXMRUwEwYDVQQKEwxqYWdlcnRyYWNpbmcwgZ8wDQYJKoZIhvcNAQEB +BQADgY0AMIGJAoGBAMcOLYflHGbqC1f7+tbnsdfcpd0rEuX65+ab0WzelAgvo988 +yD+j7LDLPIE8IPk/tfqaETZ8h0LRUUTn8F2rW/wgrl/G8Onz0utog38N0elfTifG +Mu7GJCr/+aYM5xbQMDj4Brb4vhnkJF8UBe49fWILhIltUcm1SeKqVX3d1FvpAgMB +AAGjVDBSMA4GA1UdDwEB/wQEAwICpDATBgNVHSUEDDAKBggrBgEFBQcDATAPBgNV +HRMBAf8EBTADAQH/MBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG +9w0BAQsFAAOBgQCreFjwpAn1HqJT812JOwoWKrt1NjOKGcz7pvIs1k3DfQVLH2aZ +iPKnCkzNgxMzQtwdgpAOXIAqXyNibvyOAv1C+3QSMLKbuPEHaIxlCuvl1suX/g25 +17x1o3Q64AnPCWOLpN2wjkfZqX7gZ84nsxpqb9Sbw1+2+kqX7dSZ3mfVxQ== +-----END CERTIFICATE----- diff --git a/internal/otel_collector/config/configunmarshaler/defaultunmarshaler.go b/internal/otel_collector/config/configunmarshaler/defaultunmarshaler.go new file mode 100644 index 00000000000..50919199897 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/defaultunmarshaler.go @@ -0,0 +1,536 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configunmarshaler + +import ( + "fmt" + "os" + "reflect" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configparser" +) + +// These are errors that can be returned by Unmarshal(). Note that error codes are not part +// of Unmarshal()'s public API, they are for internal unit testing only. +type configErrorCode int + +const ( + // Skip 0, start errors codes from 1. + _ configErrorCode = iota + + errInvalidTypeAndNameKey + errUnknownType + errDuplicateName + errUnmarshalTopLevelStructureError +) + +type configError struct { + // Human readable error message. + msg string + + // Internal error code. + code configErrorCode +} + +func (e *configError) Error() string { + return e.msg +} + +// YAML top-level configuration keys. +const ( + // extensionsKeyName is the configuration key name for extensions section. + extensionsKeyName = "extensions" + + // receiversKeyName is the configuration key name for receivers section. + receiversKeyName = "receivers" + + // exportersKeyName is the configuration key name for exporters section. + exportersKeyName = "exporters" + + // processorsKeyName is the configuration key name for processors section. + processorsKeyName = "processors" + + // pipelinesKeyName is the configuration key name for pipelines section. + pipelinesKeyName = "pipelines" +) + +type configSettings struct { + Receivers map[string]map[string]interface{} `mapstructure:"receivers"` + Processors map[string]map[string]interface{} `mapstructure:"processors"` + Exporters map[string]map[string]interface{} `mapstructure:"exporters"` + Extensions map[string]map[string]interface{} `mapstructure:"extensions"` + Service serviceSettings `mapstructure:"service"` +} + +type serviceSettings struct { + Extensions []string `mapstructure:"extensions"` + Pipelines map[string]pipelineSettings `mapstructure:"pipelines"` +} + +type pipelineSettings struct { + Receivers []string `mapstructure:"receivers"` + Processors []string `mapstructure:"processors"` + Exporters []string `mapstructure:"exporters"` +} + +type defaultUnmarshaler struct{} + +// NewDefault returns a default ConfigUnmarshaler that unmarshalls every configuration +// using the custom unmarshaler if present or default to strict +func NewDefault() ConfigUnmarshaler { + return &defaultUnmarshaler{} +} + +// Unmarshal the Config from a Parser. +// After the config is unmarshalled, `Validate()` must be called to validate. +func (*defaultUnmarshaler) Unmarshal(v *configparser.Parser, factories component.Factories) (*config.Config, error) { + var cfg config.Config + + // Unmarshal the config. + + // Struct to validate top level sections. + var rawCfg configSettings + if err := v.UnmarshalExact(&rawCfg); err != nil { + return nil, &configError{ + code: errUnmarshalTopLevelStructureError, + msg: fmt.Sprintf("error reading top level configuration sections: %s", err.Error()), + } + } + + // Start with the service extensions. + + extensions, err := unmarshalExtensions(rawCfg.Extensions, factories.Extensions) + if err != nil { + return nil, err + } + cfg.Extensions = extensions + + // Unmarshal data components (receivers, exporters, and processors). + + receivers, err := unmarshalReceivers(rawCfg.Receivers, factories.Receivers) + if err != nil { + return nil, err + } + cfg.Receivers = receivers + + exporters, err := unmarshalExporters(rawCfg.Exporters, factories.Exporters) + if err != nil { + return nil, err + } + cfg.Exporters = exporters + + processors, err := unmarshalProcessors(rawCfg.Processors, factories.Processors) + if err != nil { + return nil, err + } + cfg.Processors = processors + + // Unmarshal the service and its data pipelines. + service, err := unmarshalService(rawCfg.Service) + if err != nil { + return nil, err + } + cfg.Service = service + + return &cfg, nil +} + +func errorInvalidTypeAndNameKey(component, key string, err error) error { + return &configError{ + code: errInvalidTypeAndNameKey, + msg: fmt.Sprintf("invalid %s type and name key %q: %v", component, key, err), + } +} + +func errorUnknownType(component string, id config.ComponentID) error { + return &configError{ + code: errUnknownType, + msg: fmt.Sprintf("unknown %s type %q for %v", component, id.Type(), id), + } +} + +func errorUnmarshalError(component string, id config.ComponentID, err error) error { + return &configError{ + code: errUnmarshalTopLevelStructureError, + msg: fmt.Sprintf("error reading %s configuration for %v: %v", component, id, err), + } +} + +func errorDuplicateName(component string, id config.ComponentID) error { + return &configError{ + code: errDuplicateName, + msg: fmt.Sprintf("duplicate %s name %v", component, id), + } +} + +func unmarshalExtensions(exts map[string]map[string]interface{}, factories map[config.Type]component.ExtensionFactory) (config.Extensions, error) { + // Prepare resulting map. + extensions := make(config.Extensions) + + // Iterate over extensions and create a config for each. + for key, value := range exts { + componentConfig := configparser.NewParserFromStringMap(value) + expandEnvConfig(componentConfig) + + // Decode the key into type and fullName components. + id, err := config.NewIDFromString(key) + if err != nil { + return nil, errorInvalidTypeAndNameKey(extensionsKeyName, key, err) + } + + // Find extension factory based on "type" that we read from config source. + factory := factories[id.Type()] + if factory == nil { + return nil, errorUnknownType(extensionsKeyName, id) + } + + // Create the default config for this extension. + extensionCfg := factory.CreateDefaultConfig() + extensionCfg.SetIDName(id.Name()) + expandEnvLoadedConfig(extensionCfg) + + // Now that the default config struct is created we can Unmarshal into it, + // and it will apply user-defined config on top of the default. + if err = unmarshal(componentConfig, extensionCfg); err != nil { + return nil, errorUnmarshalError(extensionsKeyName, id, err) + } + + if extensions[id] != nil { + return nil, errorDuplicateName(extensionsKeyName, id) + } + + extensions[id] = extensionCfg + } + + return extensions, nil +} + +func unmarshalService(rawService serviceSettings) (config.Service, error) { + var ret config.Service + ret.Extensions = make([]config.ComponentID, 0, len(rawService.Extensions)) + for _, extIDStr := range rawService.Extensions { + id, err := config.NewIDFromString(extIDStr) + if err != nil { + return ret, err + } + ret.Extensions = append(ret.Extensions, id) + } + + // Process the pipelines first so in case of error on them it can be properly + // reported. + pipelines, err := unmarshalPipelines(rawService.Pipelines) + ret.Pipelines = pipelines + + return ret, err +} + +// LoadReceiver loads a receiver config from componentConfig using the provided factories. +func LoadReceiver(componentConfig *configparser.Parser, id config.ComponentID, factory component.ReceiverFactory) (config.Receiver, error) { + // Create the default config for this receiver. + receiverCfg := factory.CreateDefaultConfig() + receiverCfg.SetIDName(id.Name()) + expandEnvLoadedConfig(receiverCfg) + + // Now that the default config struct is created we can Unmarshal into it, + // and it will apply user-defined config on top of the default. + if err := unmarshal(componentConfig, receiverCfg); err != nil { + return nil, errorUnmarshalError(receiversKeyName, id, err) + } + + return receiverCfg, nil +} + +func unmarshalReceivers(recvs map[string]map[string]interface{}, factories map[config.Type]component.ReceiverFactory) (config.Receivers, error) { + // Prepare resulting map. + receivers := make(config.Receivers) + + // Iterate over input map and create a config for each. + for key, value := range recvs { + componentConfig := configparser.NewParserFromStringMap(value) + expandEnvConfig(componentConfig) + + // Decode the key into type and fullName components. + id, err := config.NewIDFromString(key) + if err != nil { + return nil, errorInvalidTypeAndNameKey(receiversKeyName, key, err) + } + + // Find receiver factory based on "type" that we read from config source. + factory := factories[id.Type()] + if factory == nil { + return nil, errorUnknownType(receiversKeyName, id) + } + + receiverCfg, err := LoadReceiver(componentConfig, id, factory) + + if err != nil { + // LoadReceiver already wraps the error. + return nil, err + } + + if receivers[id] != nil { + return nil, errorDuplicateName(receiversKeyName, id) + } + receivers[id] = receiverCfg + } + + return receivers, nil +} + +func unmarshalExporters(exps map[string]map[string]interface{}, factories map[config.Type]component.ExporterFactory) (config.Exporters, error) { + // Prepare resulting map. + exporters := make(config.Exporters) + + // Iterate over Exporters and create a config for each. + for key, value := range exps { + componentConfig := configparser.NewParserFromStringMap(value) + expandEnvConfig(componentConfig) + + // Decode the key into type and fullName components. + id, err := config.NewIDFromString(key) + if err != nil { + return nil, errorInvalidTypeAndNameKey(exportersKeyName, key, err) + } + + // Find exporter factory based on "type" that we read from config source. + factory := factories[id.Type()] + if factory == nil { + return nil, errorUnknownType(exportersKeyName, id) + } + + // Create the default config for this exporter. + exporterCfg := factory.CreateDefaultConfig() + exporterCfg.SetIDName(id.Name()) + expandEnvLoadedConfig(exporterCfg) + + // Now that the default config struct is created we can Unmarshal into it, + // and it will apply user-defined config on top of the default. + if err = unmarshal(componentConfig, exporterCfg); err != nil { + return nil, errorUnmarshalError(exportersKeyName, id, err) + } + + if exporters[id] != nil { + return nil, errorDuplicateName(exportersKeyName, id) + } + + exporters[id] = exporterCfg + } + + return exporters, nil +} + +func unmarshalProcessors(procs map[string]map[string]interface{}, factories map[config.Type]component.ProcessorFactory) (config.Processors, error) { + // Prepare resulting map. + processors := make(config.Processors) + + // Iterate over processors and create a config for each. + for key, value := range procs { + componentConfig := configparser.NewParserFromStringMap(value) + expandEnvConfig(componentConfig) + + // Decode the key into type and fullName components. + id, err := config.NewIDFromString(key) + if err != nil { + return nil, errorInvalidTypeAndNameKey(processorsKeyName, key, err) + } + + // Find processor factory based on "type" that we read from config source. + factory := factories[id.Type()] + if factory == nil { + return nil, errorUnknownType(processorsKeyName, id) + } + + // Create the default config for this processor. + processorCfg := factory.CreateDefaultConfig() + processorCfg.SetIDName(id.Name()) + expandEnvLoadedConfig(processorCfg) + + // Now that the default config struct is created we can Unmarshal into it, + // and it will apply user-defined config on top of the default. + if err = unmarshal(componentConfig, processorCfg); err != nil { + return nil, errorUnmarshalError(processorsKeyName, id, err) + } + + if processors[id] != nil { + return nil, errorDuplicateName(processorsKeyName, id) + } + + processors[id] = processorCfg + } + + return processors, nil +} + +func unmarshalPipelines(pipelinesConfig map[string]pipelineSettings) (config.Pipelines, error) { + // Prepare resulting map. + pipelines := make(config.Pipelines) + + // Iterate over input map and create a config for each. + for key, rawPipeline := range pipelinesConfig { + // Decode the key into type and name components. + id, err := config.NewIDFromString(key) + if err != nil { + return nil, errorInvalidTypeAndNameKey(pipelinesKeyName, key, err) + } + fullName := id.String() + + // Create the config for this pipeline. + var pipelineCfg config.Pipeline + + // Set the type. + pipelineCfg.InputType = config.DataType(id.Type()) + switch pipelineCfg.InputType { + case config.TracesDataType: + case config.MetricsDataType: + case config.LogsDataType: + default: + return nil, errorUnknownType(pipelinesKeyName, id) + } + + pipelineCfg.Name = fullName + if pipelineCfg.Receivers, err = parseIDNames(id, receiversKeyName, rawPipeline.Receivers); err != nil { + return nil, err + } + if pipelineCfg.Processors, err = parseIDNames(id, processorsKeyName, rawPipeline.Processors); err != nil { + return nil, err + } + if pipelineCfg.Exporters, err = parseIDNames(id, exportersKeyName, rawPipeline.Exporters); err != nil { + return nil, err + } + + if pipelines[fullName] != nil { + return nil, errorDuplicateName(pipelinesKeyName, id) + } + + pipelines[fullName] = &pipelineCfg + } + + return pipelines, nil +} + +func parseIDNames(pipelineID config.ComponentID, componentType string, names []string) ([]config.ComponentID, error) { + var ret []config.ComponentID + for _, idProcStr := range names { + idRecv, err := config.NewIDFromString(idProcStr) + if err != nil { + return nil, fmt.Errorf("pipelines: config for %v contains invalid %s name %s : %w", pipelineID, componentType, idProcStr, err) + } + ret = append(ret, idRecv) + } + return ret, nil +} + +// expandEnvConfig updates a configparser.Parser with expanded values for all the values (simple, list or map value). +// It does not expand the keys. +func expandEnvConfig(v *configparser.Parser) { + for _, k := range v.AllKeys() { + v.Set(k, expandStringValues(v.Get(k))) + } +} + +func expandStringValues(value interface{}) interface{} { + switch v := value.(type) { + default: + return v + case string: + return expandEnv(v) + case []interface{}: + nslice := make([]interface{}, 0, len(v)) + for _, vint := range v { + nslice = append(nslice, expandStringValues(vint)) + } + return nslice + case map[string]interface{}: + nmap := make(map[interface{}]interface{}, len(v)) + for k, vint := range v { + nmap[k] = expandStringValues(vint) + } + return nmap + case map[interface{}]interface{}: + nmap := make(map[interface{}]interface{}, len(v)) + for k, vint := range v { + nmap[k] = expandStringValues(vint) + } + return nmap + } +} + +// expandEnvLoadedConfig is a utility function that goes recursively through a config object +// and tries to expand environment variables in its string fields. +func expandEnvLoadedConfig(s interface{}) { + expandEnvLoadedConfigPointer(s) +} + +func expandEnvLoadedConfigPointer(s interface{}) { + // Check that the value given is indeed a pointer, otherwise safely stop the search here + value := reflect.ValueOf(s) + if value.Kind() != reflect.Ptr { + return + } + // Run expandLoadedConfigValue on the value behind the pointer. + expandEnvLoadedConfigValue(value.Elem()) +} + +func expandEnvLoadedConfigValue(value reflect.Value) { + // The value given is a string, we expand it (if allowed). + if value.Kind() == reflect.String && value.CanSet() { + value.SetString(expandEnv(value.String())) + } + // The value given is a struct, we go through its fields. + if value.Kind() == reflect.Struct { + for i := 0; i < value.NumField(); i++ { + // Returns the content of the field. + field := value.Field(i) + + // Only try to modify a field if it can be modified (eg. skip unexported private fields). + if field.CanSet() { + switch field.Kind() { + case reflect.String: + // The current field is a string, expand env variables in the string. + field.SetString(expandEnv(field.String())) + case reflect.Ptr: + // The current field is a pointer, run the expansion function on the pointer. + expandEnvLoadedConfigPointer(field.Interface()) + case reflect.Struct: + // The current field is a nested struct, go through the nested struct + expandEnvLoadedConfigValue(field) + } + } + } + } +} + +func expandEnv(s string) string { + return os.Expand(s, func(str string) string { + // This allows escaping environment variable substitution via $$, e.g. + // - $FOO will be substituted with env var FOO + // - $$FOO will be replaced with $FOO + // - $$$FOO will be replaced with $ + substituted env var FOO + if str == "$" { + return "$" + } + return os.Getenv(str) + }) +} + +func unmarshal(componentSection *configparser.Parser, intoCfg interface{}) error { + if cu, ok := intoCfg.(config.Unmarshallable); ok { + return cu.Unmarshal(componentSection) + } + + return componentSection.UnmarshalExact(intoCfg) +} diff --git a/internal/otel_collector/config/configunmarshaler/doc.go b/internal/otel_collector/config/configunmarshaler/doc.go new file mode 100644 index 00000000000..7ca58b87eb8 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/doc.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package configunmarshaler implements configuration unmarshalling from a config.Parser. +// The implementation relies on registered factories that allow creating +// default configuration for each type of receiver/exporter/processor. +package configunmarshaler diff --git a/internal/otel_collector/config/configunmarshaler/testdata/duplicate-exporter.yaml b/internal/otel_collector/config/configunmarshaler/testdata/duplicate-exporter.yaml new file mode 100644 index 00000000000..399be0b7bb5 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/duplicate-exporter.yaml @@ -0,0 +1,13 @@ +receivers: + examplereceiver: +exporters: + exampleexporter/exp: + exampleexporter/ exp : +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: [examplereceiver] + exporters: [exampleexporter] + processors: [exampleprocessor] diff --git a/internal/otel_collector/config/configunmarshaler/testdata/duplicate-extension.yaml b/internal/otel_collector/config/configunmarshaler/testdata/duplicate-extension.yaml new file mode 100644 index 00000000000..854d146723e --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/duplicate-extension.yaml @@ -0,0 +1,3 @@ +extensions: + exampleextension/ext: + exampleextension/ ext: diff --git a/internal/otel_collector/config/configunmarshaler/testdata/duplicate-pipeline.yaml b/internal/otel_collector/config/configunmarshaler/testdata/duplicate-pipeline.yaml new file mode 100644 index 00000000000..671991a79d5 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/duplicate-pipeline.yaml @@ -0,0 +1,16 @@ +receivers: + examplereceiver: +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + traces/default: + receivers: [examplereceiver] + exporters: [exampleexporter] + processors: [exampleprocessor] + traces/ default: + receivers: [examplereceiver] + exporters: [exampleexporter] + processors: [exampleprocessor] diff --git a/internal/otel_collector/config/configunmarshaler/testdata/duplicate-processor.yaml b/internal/otel_collector/config/configunmarshaler/testdata/duplicate-processor.yaml new file mode 100644 index 00000000000..5345cec5926 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/duplicate-processor.yaml @@ -0,0 +1,13 @@ +receivers: + examplereceiver: +exporters: + exampleexporter: +processors: + exampleprocessor/ abc: + exampleprocessor/abc: +service: + pipelines: + traces: + receivers: [examplereceiver] + exporters: [exampleexporter] + processors: [exampleprocessor] diff --git a/internal/otel_collector/config/configunmarshaler/testdata/duplicate-receiver.yaml b/internal/otel_collector/config/configunmarshaler/testdata/duplicate-receiver.yaml new file mode 100644 index 00000000000..275054e6772 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/duplicate-receiver.yaml @@ -0,0 +1,13 @@ +receivers: + examplereceiver/ 1: + examplereceiver/1: +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: [examplereceiver] + exporters: [exampleexporter] + processors: [exampleprocessor] diff --git a/internal/otel_collector/config/configunmarshaler/testdata/empty-all-sections.yaml b/internal/otel_collector/config/configunmarshaler/testdata/empty-all-sections.yaml new file mode 100644 index 00000000000..05341a02188 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/empty-all-sections.yaml @@ -0,0 +1,5 @@ +receivers: +exporters: +processors: +service: + pipelines: diff --git a/internal/otel_collector/config/configunmarshaler/testdata/empty-config.yaml b/internal/otel_collector/config/configunmarshaler/testdata/empty-config.yaml new file mode 100644 index 00000000000..e69de29bb2d diff --git a/internal/otel_collector/config/configunmarshaler/testdata/invalid-exporter-name-after-slash.yaml b/internal/otel_collector/config/configunmarshaler/testdata/invalid-exporter-name-after-slash.yaml new file mode 100644 index 00000000000..b6a03029a3e --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/invalid-exporter-name-after-slash.yaml @@ -0,0 +1,11 @@ +receivers: + examplereceiver: +exporters: + exampleexporter: + exampleexporter/: +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: [somereceiver] diff --git a/internal/otel_collector/config/configunmarshaler/testdata/invalid-exporter-section.yaml b/internal/otel_collector/config/configunmarshaler/testdata/invalid-exporter-section.yaml new file mode 100644 index 00000000000..fa5e0257b9a --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/invalid-exporter-section.yaml @@ -0,0 +1,20 @@ +receivers: + examplereceiver: +processors: + exampleprocessor: +exporters: + exampleexporter: + unknown_section: exporter +extensions: + exampleextension: +service: + extensions: + - exampleextension + pipelines: + traces: + receivers: + - examplereceiver + processors: + - exampleprocessor + exporters: + - exampleexporter diff --git a/internal/otel_collector/config/configunmarshaler/testdata/invalid-exporter-sub-config.yaml b/internal/otel_collector/config/configunmarshaler/testdata/invalid-exporter-sub-config.yaml new file mode 100644 index 00000000000..a965a9e9e20 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/invalid-exporter-sub-config.yaml @@ -0,0 +1,13 @@ +receivers: + examplereceiver: +exporters: + exampleexporter: + tests +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: [examplereceiver] + exporters: [exampleexporter] + processors: [exampleprocessor] diff --git a/internal/otel_collector/config/configunmarshaler/testdata/invalid-exporter-type.yaml b/internal/otel_collector/config/configunmarshaler/testdata/invalid-exporter-type.yaml new file mode 100644 index 00000000000..9f497c228e6 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/invalid-exporter-type.yaml @@ -0,0 +1,12 @@ +receivers: + examplereceiver: +exporters: + exampleexporter: + /custom: +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: [examplereceiver] + exporters: [exampleexporter] diff --git a/internal/otel_collector/config/configunmarshaler/testdata/invalid-extension-name-after-slash.yaml b/internal/otel_collector/config/configunmarshaler/testdata/invalid-extension-name-after-slash.yaml new file mode 100644 index 00000000000..7361f28073b --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/invalid-extension-name-after-slash.yaml @@ -0,0 +1,14 @@ +extensions: + exampleextension/: +receivers: + examplereceiver: +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] \ No newline at end of file diff --git a/internal/otel_collector/config/configunmarshaler/testdata/invalid-extension-section.yaml b/internal/otel_collector/config/configunmarshaler/testdata/invalid-extension-section.yaml new file mode 100644 index 00000000000..1e0a809d9a3 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/invalid-extension-section.yaml @@ -0,0 +1,21 @@ +receivers: + examplereceiver: +processors: + exampleprocessor: +exporters: + exampleexporter: +extensions: + exampleextension: + unknown_section: + a_num: 2 +service: + extensions: + - examapleextension + pipelines: + traces: + receivers: + - examplereceiver + processors: + - exampleprocessor + exporters: + - exampleexporter diff --git a/internal/otel_collector/config/configunmarshaler/testdata/invalid-extension-sub-config.yaml b/internal/otel_collector/config/configunmarshaler/testdata/invalid-extension-sub-config.yaml new file mode 100644 index 00000000000..0859a71cfeb --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/invalid-extension-sub-config.yaml @@ -0,0 +1,16 @@ +extensions: + exampleextension: + tests +receivers: + examplereceiver: +processors: + exampleprocessor: +exporters: + exampleexporter: +service: + extensions: [exampleextension] + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/config/configunmarshaler/testdata/invalid-extension-type.yaml b/internal/otel_collector/config/configunmarshaler/testdata/invalid-extension-type.yaml new file mode 100644 index 00000000000..95bca7040e6 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/invalid-extension-type.yaml @@ -0,0 +1,14 @@ +extensions: + /custom: +receivers: + examplereceiver: +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] \ No newline at end of file diff --git a/internal/otel_collector/config/configunmarshaler/testdata/invalid-pipeline-name-after-slash.yaml b/internal/otel_collector/config/configunmarshaler/testdata/invalid-pipeline-name-after-slash.yaml new file mode 100644 index 00000000000..1f4718a2d94 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/invalid-pipeline-name-after-slash.yaml @@ -0,0 +1,13 @@ +receivers: + examplereceiver: +processors: + exampleprocessor: +exporters: + exampleexporter: + +service: + pipelines: + metrics/: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/config/configunmarshaler/testdata/invalid-pipeline-section.yaml b/internal/otel_collector/config/configunmarshaler/testdata/invalid-pipeline-section.yaml new file mode 100644 index 00000000000..760edb27db4 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/invalid-pipeline-section.yaml @@ -0,0 +1,20 @@ +receivers: + examplereceiver: +processors: + exampleprocessor: +exporters: + exampleexporter: +extensions: + exampleextension: +service: + extensions: + - exampleextension + pipelines: + traces: + receivers: + - examplereceiver + processors: + - exampleprocessor + exporters: + - exampleexporter + unknown_section: 1 diff --git a/internal/otel_collector/config/configunmarshaler/testdata/invalid-pipeline-sub-config.yaml b/internal/otel_collector/config/configunmarshaler/testdata/invalid-pipeline-sub-config.yaml new file mode 100644 index 00000000000..ddf95f3b9bd --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/invalid-pipeline-sub-config.yaml @@ -0,0 +1,9 @@ +receivers: + examplereceiver: +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + traces diff --git a/internal/otel_collector/config/configunmarshaler/testdata/invalid-pipeline-type.yaml b/internal/otel_collector/config/configunmarshaler/testdata/invalid-pipeline-type.yaml new file mode 100644 index 00000000000..889dd6b1b51 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/invalid-pipeline-type.yaml @@ -0,0 +1,13 @@ +receivers: + examplereceiver: +processors: + exampleprocessor: +exporters: + exampleexporter: + +service: + pipelines: + /metrics: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/config/configunmarshaler/testdata/invalid-processor-name-after-slash.yaml b/internal/otel_collector/config/configunmarshaler/testdata/invalid-processor-name-after-slash.yaml new file mode 100644 index 00000000000..7b0fe077c32 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/invalid-processor-name-after-slash.yaml @@ -0,0 +1,13 @@ +receivers: + examplereceiver: +exporters: + exampleexporter: +processors: + exampleprocessor: + exampleprocessor/: +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/config/configunmarshaler/testdata/invalid-processor-section.yaml b/internal/otel_collector/config/configunmarshaler/testdata/invalid-processor-section.yaml new file mode 100644 index 00000000000..01d2a086ed4 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/invalid-processor-section.yaml @@ -0,0 +1,21 @@ +receivers: + examplereceiver: +processors: + exampleprocessor: + unknown_section: + a_num: 2 +exporters: + exampleexporter: +extensions: + exampleextension: +service: + extensions: + - examapleextension + pipelines: + traces: + receivers: + - examplereceiver + processors: + - exampleprocessor + exporters: + - exampleexporter diff --git a/internal/otel_collector/config/configunmarshaler/testdata/invalid-processor-sub-config.yaml b/internal/otel_collector/config/configunmarshaler/testdata/invalid-processor-sub-config.yaml new file mode 100644 index 00000000000..862fd86ab8e --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/invalid-processor-sub-config.yaml @@ -0,0 +1,13 @@ +receivers: + examplereceiver: +exporters: + exampleexporter: +processors: + exampleprocessor: + tests +service: + pipelines: + traces: + receivers: [examplereceiver] + exporters: [exampleexporter] + processors: [exampleprocessor] diff --git a/internal/otel_collector/config/configunmarshaler/testdata/invalid-processor-type.yaml b/internal/otel_collector/config/configunmarshaler/testdata/invalid-processor-type.yaml new file mode 100644 index 00000000000..69d87aac4bb --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/invalid-processor-type.yaml @@ -0,0 +1,13 @@ +receivers: + examplereceiver: +exporters: + exampleexporter: +processors: + exampleprocessor: + /custom: +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/config/configunmarshaler/testdata/invalid-receiver-name-after-slash.yaml b/internal/otel_collector/config/configunmarshaler/testdata/invalid-receiver-name-after-slash.yaml new file mode 100644 index 00000000000..e2d74fd5e03 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/invalid-receiver-name-after-slash.yaml @@ -0,0 +1,13 @@ +receivers: + examplereceiver: + examplereceiver/: +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/config/configunmarshaler/testdata/invalid-receiver-section.yaml b/internal/otel_collector/config/configunmarshaler/testdata/invalid-receiver-section.yaml new file mode 100644 index 00000000000..558f17a4682 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/invalid-receiver-section.yaml @@ -0,0 +1,21 @@ +receivers: + examplereceiver: + unknown_section: + a_num: 2 +processors: + exampleprocessor: +exporters: + exampleexporter: +extensions: + exampleextension: +service: + extensions: + - examapleextension + pipelines: + traces: + receivers: + - examplereceiver + processors: + - exampleprocessor + exporters: + - exampleexporter diff --git a/internal/otel_collector/config/configunmarshaler/testdata/invalid-receiver-sub-config.yaml b/internal/otel_collector/config/configunmarshaler/testdata/invalid-receiver-sub-config.yaml new file mode 100644 index 00000000000..2fb171b21fe --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/invalid-receiver-sub-config.yaml @@ -0,0 +1,13 @@ +receivers: + examplereceiver: + tests +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: [examplereceiver] + exporters: [exampleexporter] + processors: [exampleprocessor] diff --git a/internal/otel_collector/config/configunmarshaler/testdata/invalid-receiver-type.yaml b/internal/otel_collector/config/configunmarshaler/testdata/invalid-receiver-type.yaml new file mode 100644 index 00000000000..29ffaffac33 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/invalid-receiver-type.yaml @@ -0,0 +1,13 @@ +receivers: + examplereceiver: + /custom: +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/config/configunmarshaler/testdata/invalid-sequence-value.yaml b/internal/otel_collector/config/configunmarshaler/testdata/invalid-sequence-value.yaml new file mode 100644 index 00000000000..7f7a0a385ee --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/invalid-sequence-value.yaml @@ -0,0 +1,14 @@ +receivers: + examplereceiver: +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: + examplereceiver: + some: config + exporters: [exampleexporter] + processors: [exampleprocessor] diff --git a/internal/otel_collector/config/configunmarshaler/testdata/invalid-service-extensions-section.yaml b/internal/otel_collector/config/configunmarshaler/testdata/invalid-service-extensions-section.yaml new file mode 100644 index 00000000000..00b123c1931 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/invalid-service-extensions-section.yaml @@ -0,0 +1,18 @@ +extensions: + exampleextension: +receivers: + examplereceiver: +processors: + exampleprocessor: +exporters: + exampleexporter: + +service: + extensions: + exampleextension: + error: true + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/config/configunmarshaler/testdata/invalid-service-section.yaml b/internal/otel_collector/config/configunmarshaler/testdata/invalid-service-section.yaml new file mode 100644 index 00000000000..6a3b8f92772 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/invalid-service-section.yaml @@ -0,0 +1,21 @@ +receivers: + examplereceiver: +processors: + exampleprocessor: +exporters: + exampleexporter: +extensions: + exampleextension: +service: + extenstions: + - examapleextension + unknown_section: + a_num: 2 + pipelines: + traces: + receivers: + - examplereceiver + processors: + - exampleprocessor + exporters: + - exampleexporter diff --git a/internal/otel_collector/config/configunmarshaler/testdata/invalid-top-level-section.yaml b/internal/otel_collector/config/configunmarshaler/testdata/invalid-top-level-section.yaml new file mode 100644 index 00000000000..0b9819d17de --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/invalid-top-level-section.yaml @@ -0,0 +1,21 @@ +receivers: + examplereceiver: +processors: + exampleprocessor: +exporters: + exampleexporter: +extensions: + exampleextension: +service: + extenstions: + - examapleextension + pipelines: + traces: + receivers: + - examplereceiver + processors: + - exampleprocessor + exporters: + - exampleexporter +unknown_section: + a_num: 2 diff --git a/internal/otel_collector/config/configunmarshaler/testdata/simple-config-with-all-env.yaml b/internal/otel_collector/config/configunmarshaler/testdata/simple-config-with-all-env.yaml new file mode 100644 index 00000000000..f3428d7eda4 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/simple-config-with-all-env.yaml @@ -0,0 +1,51 @@ +receivers: + examplereceiver: + endpoint: "localhost:1234" + extra: "$RECEIVERS_EXAMPLERECEIVER_EXTRA" + extra_map: + recv.1: "$RECEIVERS_EXAMPLERECEIVER_EXTRA_MAP_RECV_VALUE_1" + recv.2: "$RECEIVERS_EXAMPLERECEIVER_EXTRA_MAP_RECV_VALUE_2" + extra_list: + - "$RECEIVERS_EXAMPLERECEIVER_EXTRA_LIST_VALUE_1" + - "$RECEIVERS_EXAMPLERECEIVER_EXTRA_LIST_VALUE_2" + + +processors: + exampleprocessor: + extra: "$PROCESSORS_EXAMPLEPROCESSOR_EXTRA" + extra_map: + proc_1: "$PROCESSORS_EXAMPLEPROCESSOR_EXTRA_MAP_PROC_VALUE_1" + proc_2: "$PROCESSORS_EXAMPLEPROCESSOR_EXTRA_MAP_PROC_VALUE_2" + extra_list: + - "$PROCESSORS_EXAMPLEPROCESSOR_EXTRA_LIST_VALUE_1" + - "$PROCESSORS_EXAMPLEPROCESSOR_EXTRA_LIST_VALUE_2" + +exporters: + exampleexporter: + extra_int: "${EXPORTERS_EXAMPLEEXPORTER_EXTRA_INT}" + extra: "${EXPORTERS_EXAMPLEEXPORTER_EXTRA}" + extra_map: + exp_1: "${EXPORTERS_EXAMPLEEXPORTER_EXTRA_MAP_EXP_VALUE_1}" + exp_2: "${EXPORTERS_EXAMPLEEXPORTER_EXTRA_MAP_EXP_VALUE_2}" + extra_list: + - "${EXPORTERS_EXAMPLEEXPORTER_EXTRA_LIST_VALUE_1}" + - "${EXPORTERS_EXAMPLEEXPORTER_EXTRA_LIST_VALUE_2}" + +extensions: + exampleextension: + extra: "${EXTENSIONS_EXAMPLEEXTENSION_EXTRA}" + extra_map: + ext-1: "${EXTENSIONS_EXAMPLEEXTENSION_EXTRA_MAP_EXT_VALUE_1}" + ext-2: "${EXTENSIONS_EXAMPLEEXTENSION_EXTRA_MAP_EXT_VALUE_2}" + extra_list: + - "${EXTENSIONS_EXAMPLEEXTENSION_EXTRA_LIST_VALUE_1}" + - "${EXTENSIONS_EXAMPLEEXTENSION_EXTRA_LIST_VALUE_2}" + +service: + extensions: [exampleextension] + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] + diff --git a/internal/otel_collector/config/configunmarshaler/testdata/simple-config-with-escaped-env.yaml b/internal/otel_collector/config/configunmarshaler/testdata/simple-config-with-escaped-env.yaml new file mode 100644 index 00000000000..8870e6baf77 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/simple-config-with-escaped-env.yaml @@ -0,0 +1,62 @@ +receivers: + examplereceiver: + endpoint: "localhost:1234" + extra: "$$RECEIVERS_EXAMPLERECEIVER_EXTRA" + extra_map: + # $$ -> escaped $ + recv.1: "$$RECEIVERS_EXAMPLERECEIVER_EXTRA_MAP_RECV_VALUE_1" + # $$$ -> escaped $ + substituted env var + recv.2: "$$$RECEIVERS_EXAMPLERECEIVER_EXTRA_MAP_RECV_VALUE_2" + # $$$$ -> two escaped $ + recv.3: "$$$$RECEIVERS_EXAMPLERECEIVER_EXTRA_MAP_RECV_VALUE_3" + # escaped $ in the middle + recv.4: "some$${RECEIVERS_EXAMPLERECEIVER_EXTRA_MAP_RECV_VALUE_4}text" + # two escaped $ + recv.5: "$${ONE}$${TWO}" + # trailing escaped $ + recv.6: "text$$" + # escaped $ alone + recv.7: "$$" + extra_list: + - "$$RECEIVERS_EXAMPLERECEIVER_EXTRA_LIST_VALUE_1" + - "$$RECEIVERS_EXAMPLERECEIVER_EXTRA_LIST_VALUE_2" + + +processors: + exampleprocessor: + extra: "$$PROCESSORS_EXAMPLEPROCESSOR_EXTRA" + extra_map: + proc_1: "$$PROCESSORS_EXAMPLEPROCESSOR_EXTRA_MAP_PROC_VALUE_1" + proc_2: "$$PROCESSORS_EXAMPLEPROCESSOR_EXTRA_MAP_PROC_VALUE_2" + extra_list: + - "$$PROCESSORS_EXAMPLEPROCESSOR_EXTRA_LIST_VALUE_1" + - "$$PROCESSORS_EXAMPLEPROCESSOR_EXTRA_LIST_VALUE_2" + +exporters: + exampleexporter: + extra: "$${EXPORTERS_EXAMPLEEXPORTER_EXTRA}" + extra_map: + exp_1: "$${EXPORTERS_EXAMPLEEXPORTER_EXTRA_MAP_EXP_VALUE_1}" + exp_2: "$${EXPORTERS_EXAMPLEEXPORTER_EXTRA_MAP_EXP_VALUE_2}" + extra_list: + - "$${EXPORTERS_EXAMPLEEXPORTER_EXTRA_LIST_VALUE_1}" + - "$${EXPORTERS_EXAMPLEEXPORTER_EXTRA_LIST_VALUE_2}" + +extensions: + exampleextension: + extra: "$${EXTENSIONS_EXAMPLEEXTENSION_EXTRA}" + extra_map: + ext-1: "$${EXTENSIONS_EXAMPLEEXTENSION_EXTRA_MAP_EXT_VALUE_1}" + ext-2: "$${EXTENSIONS_EXAMPLEEXTENSION_EXTRA_MAP_EXT_VALUE_2}" + extra_list: + - "$${EXTENSIONS_EXAMPLEEXTENSION_EXTRA_LIST_VALUE_1}" + - "$${EXTENSIONS_EXAMPLEEXTENSION_EXTRA_LIST_VALUE_2}" + +service: + extensions: [exampleextension] + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] + diff --git a/internal/otel_collector/config/configunmarshaler/testdata/simple-config-with-no-env.yaml b/internal/otel_collector/config/configunmarshaler/testdata/simple-config-with-no-env.yaml new file mode 100644 index 00000000000..f9e1d7708b3 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/simple-config-with-no-env.yaml @@ -0,0 +1,51 @@ +receivers: + examplereceiver: + endpoint: "localhost:1234" + extra: "some receiver string" + extra_map: + recv.1: "some receiver map value_1" + recv.2: "some receiver map value_2" + extra_list: + - "some receiver list value_1" + - "some receiver list value_2" + + +processors: + exampleprocessor: + extra: "some processor string" + extra_map: + proc_1: "some processor map value_1" + proc_2: "some processor map value_2" + extra_list: + - "some processor list value_1" + - "some processor list value_2" + +exporters: + exampleexporter: + extra_int: 65 + extra: "some exporter string" + extra_map: + exp_1: "some exporter map value_1" + exp_2: "some exporter map value_2" + extra_list: + - "some exporter list value_1" + - "some exporter list value_2" + +extensions: + exampleextension: + extra: "some extension string" + extra_map: + ext-1: "some extension map value_1" + ext-2: "some extension map value_2" + extra_list: + - "some extension list value_1" + - "some extension list value_2" + +service: + extensions: [exampleextension] + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] + diff --git a/internal/otel_collector/config/configunmarshaler/testdata/simple-config-with-partial-env.yaml b/internal/otel_collector/config/configunmarshaler/testdata/simple-config-with-partial-env.yaml new file mode 100644 index 00000000000..7d3087b4a50 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/simple-config-with-partial-env.yaml @@ -0,0 +1,51 @@ +receivers: + examplereceiver: + endpoint: "localhost:1234" + extra: "some receiver string" + extra_map: + recv.1: "some receiver map value_1" + recv.2: "some receiver map value_2" + extra_list: + - "some receiver list value_1" + - "some receiver list value_2" + + +processors: + exampleprocessor: + extra: "$PROCESSORS_EXAMPLEPROCESSOR_EXTRA" + extra_map: + proc_1: "$PROCESSORS_EXAMPLEPROCESSOR_EXTRA_MAP_PROC_VALUE_1" + proc_2: "some processor map value_2" + extra_list: + - "$PROCESSORS_EXAMPLEPROCESSOR_EXTRA_LIST_VALUE_1" + - "some processor list value_2" + +exporters: + exampleexporter: + extra_int: "${EXPORTERS_EXAMPLEEXPORTER_EXTRA_INT}" + extra: "${EXPORTERS_EXAMPLEEXPORTER_EXTRA}" + extra_map: + exp_1: "some exporter map value_1" + exp_2: "some exporter map value_2" + extra_list: + - "${EXPORTERS_EXAMPLEEXPORTER_EXTRA_LIST_VALUE_1}" + - "${EXPORTERS_EXAMPLEEXPORTER_EXTRA_LIST_VALUE_2}" + +extensions: + exampleextension: + extra: "${EXTENSIONS_EXAMPLEEXTENSION_EXTRA}" + extra_map: + ext-1: "${EXTENSIONS_EXAMPLEEXTENSION_EXTRA_MAP_EXT_VALUE_1}" + ext-2: "${EXTENSIONS_EXAMPLEEXTENSION_EXTRA_MAP_EXT_VALUE_2}" + extra_list: + - "some extension list value_1" + - "some extension list value_2" + +service: + extensions: [exampleextension] + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] + diff --git a/internal/otel_collector/config/configunmarshaler/testdata/unknown-exporter-type.yaml b/internal/otel_collector/config/configunmarshaler/testdata/unknown-exporter-type.yaml new file mode 100644 index 00000000000..df98a241f30 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/unknown-exporter-type.yaml @@ -0,0 +1,12 @@ +receivers: + examplereceiver: +exporters: + nosuchexporter: +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: [examplereceiver] + exporters: [exampleexporter] + processors: [exampleprocessor] diff --git a/internal/otel_collector/config/configunmarshaler/testdata/unknown-extension-type.yaml b/internal/otel_collector/config/configunmarshaler/testdata/unknown-extension-type.yaml new file mode 100644 index 00000000000..1ee2ca759c5 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/unknown-extension-type.yaml @@ -0,0 +1,2 @@ +extensions: + nosuchextension: diff --git a/internal/otel_collector/config/configunmarshaler/testdata/unknown-pipeline-type.yaml b/internal/otel_collector/config/configunmarshaler/testdata/unknown-pipeline-type.yaml new file mode 100644 index 00000000000..7c88405a947 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/unknown-pipeline-type.yaml @@ -0,0 +1,13 @@ +receivers: + examplereceiver: +processors: + exampleprocessor: +exporters: + exampleexporter: + +service: + pipelines: + wrongdatatype: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] diff --git a/internal/otel_collector/config/configunmarshaler/testdata/unknown-processor-type.yaml b/internal/otel_collector/config/configunmarshaler/testdata/unknown-processor-type.yaml new file mode 100644 index 00000000000..02230c9d565 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/unknown-processor-type.yaml @@ -0,0 +1,12 @@ +receivers: + examplereceiver: +exporters: + exampleexporter: +processors: + nosuchprocessor: +service: + pipelines: + traces: + receivers: [examplereceiver] + exporters: [exampleexporter] + processors: [exampleprocessor] diff --git a/internal/otel_collector/config/configunmarshaler/testdata/unknown-receiver-type.yaml b/internal/otel_collector/config/configunmarshaler/testdata/unknown-receiver-type.yaml new file mode 100644 index 00000000000..75c777f2bb0 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/unknown-receiver-type.yaml @@ -0,0 +1,15 @@ +receivers: + nosuchreceiver: +exporters: + exampleexporter: +processors: + exampleprocessor: +service: + pipelines: + traces: + receivers: + - examplereceiver + exporters: + - exampleexporter + processors: + - exampleprocessor diff --git a/internal/otel_collector/config/configunmarshaler/testdata/valid-config.yaml b/internal/otel_collector/config/configunmarshaler/testdata/valid-config.yaml new file mode 100644 index 00000000000..986f6ea1049 --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/testdata/valid-config.yaml @@ -0,0 +1,29 @@ +receivers: + examplereceiver: + examplereceiver/myreceiver: + endpoint: "localhost:12345" + extra: "some string" + +processors: + exampleprocessor: + +exporters: + exampleexporter/myexporter: + extra: "some export string 2" + exampleexporter: + +extensions: + exampleextension/0: + exampleextension/disabled: + extra: "not present in the service" + exampleextension/1: + extra: "some string" + +service: + extensions: [exampleextension/0, exampleextension/1] + pipelines: + traces: + receivers: [examplereceiver] + processors: [exampleprocessor] + exporters: [exampleexporter] + diff --git a/internal/otel_collector/config/configunmarshaler/unmarshaler.go b/internal/otel_collector/config/configunmarshaler/unmarshaler.go new file mode 100644 index 00000000000..123ab4c96ad --- /dev/null +++ b/internal/otel_collector/config/configunmarshaler/unmarshaler.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configunmarshaler + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configparser" +) + +// ConfigUnmarshaler is the interface that unmarshalls the collector configuration from the configparser.Parser. +type ConfigUnmarshaler interface { + // Unmarshal the configuration from the given parser and factories. + Unmarshal(v *configparser.Parser, factories component.Factories) (*config.Config, error) +} diff --git a/internal/otel_collector/config/doc.go b/internal/otel_collector/config/doc.go new file mode 100644 index 00000000000..10f21a33f54 --- /dev/null +++ b/internal/otel_collector/config/doc.go @@ -0,0 +1,30 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package config defines the data models for entities. This file defines the +// models for configuration format. The defined entities are: +// Config (the top-level structure), Receivers, Exporters, Processors, Pipelines. +// +// Receivers, Exporters and Processors typically have common configuration settings, however +// sometimes specific implementations will have extra configuration settings. +// This requires the configuration data for these entities to be polymorphic. +// +// To satisfy these requirements we declare interfaces Receiver, Exporter, Processor, +// which define the behavior. We also provide helper structs ReceiverSettings, ExporterSettings, +// ProcessorSettings, which define the common settings and un-marshaling from config files. +// +// Specific Receivers/Exporters/Processors are expected to at the minimum implement the +// corresponding interface and if they have additional settings they must also extend +// the corresponding common settings struct (the easiest approach is to embed the common struct). +package config diff --git a/internal/otel_collector/config/experimental/config/config.go b/internal/otel_collector/config/experimental/config/config.go new file mode 100644 index 00000000000..8b5596fc3a6 --- /dev/null +++ b/internal/otel_collector/config/experimental/config/config.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "go.opentelemetry.io/collector/config" +) + +// SourceSettings defines common settings of a Source configuration. +// Specific config sources can embed this struct and extend it with more fields if needed. +// When embedded it must be with `mapstructure:",squash"` tag. +type SourceSettings struct { + config.ComponentID `mapstructure:"-"` +} + +// ID returns the ID of the component that this configuration belongs to. +func (s *SourceSettings) ID() config.ComponentID { + return s.ComponentID +} + +// SetIDName updates the name part of the ID for the component that this configuration belongs to. +func (s *SourceSettings) SetIDName(idName string) { + s.ComponentID = config.NewIDWithName(s.ComponentID.Type(), idName) +} + +// NewSourceSettings return a new config.SourceSettings struct with the given ComponentID. +func NewSourceSettings(id config.ComponentID) SourceSettings { + return SourceSettings{id} +} + +// Source is the configuration of a config source. Specific config sources must implement this +// interface and will typically embed SourceSettings struct or a struct that extends it. +type Source interface { + // TODO: While config sources are experimental and not in the config package they can't + // reference the private interfaces config.identifiable and config.validatable. + // Defining the required methods of the interfaces temporarily here. + + // From config.identifiable: + + // ID returns the ID of the component that this configuration belongs to. + ID() config.ComponentID + // SetIDName updates the name part of the ID for the component that this configuration belongs to. + SetIDName(idName string) + + // From config.validatable: + + // Validate validates the configuration and returns an error if invalid. + Validate() error +} diff --git a/internal/otel_collector/config/experimental/config/doc.go b/internal/otel_collector/config/experimental/config/doc.go new file mode 100644 index 00000000000..0b7985886b8 --- /dev/null +++ b/internal/otel_collector/config/experimental/config/doc.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package config under config/experimental contains configuration related types and interfaces +// that typically live under the "go.opentelemetry.io/collector/config" package but aren't stable yet +// to be published there. +// ATTENTION: the package is still experimental and subject to changes without advanced notice. +package config diff --git a/internal/otel_collector/config/experimental/configsource/component.go b/internal/otel_collector/config/experimental/configsource/component.go new file mode 100644 index 00000000000..cae3f87be5f --- /dev/null +++ b/internal/otel_collector/config/experimental/configsource/component.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configsource + +import ( + "context" + "errors" +) + +// ErrSessionClosed is returned by WatchForUpdate functions when its parent Session +// object is closed. +// This error can be wrapped with additional information. Callers trying to identify this +// specific error must use errors.Is. +var ErrSessionClosed = errors.New("parent session was closed") + +// ErrValueUpdated is returned by WatchForUpdate functions when the value being watched +// was changed or expired and needs to be retrieved again. +// This error can be wrapped with additional information. Callers trying to identify this +// specific error must use errors.Is. +var ErrValueUpdated = errors.New("configuration must retrieve the updated value") + +// ConfigSource is the interface to be implemented by objects used by the collector +// to retrieve external configuration information. +// +// ConfigSource object will be used to retrieve full configuration or data to be +// injected into a configuration. +// +// The ConfigSource object should use its creation according to the source needs: +// lock resources, open connections, etc. An implementation, for instance, +// can use the creation time to prevent torn configurations, by acquiring a lock +// (or some other mechanism) that prevents concurrent changes to the configuration +// during time that data is being retrieved from the source. +// +// The code managing the ConfigSource instance must guarantee that the object is not used concurrently. +type ConfigSource interface { + // Retrieve goes to the configuration source and retrieves the selected data which + // contains the value to be injected in the configuration and the corresponding watcher that + // will be used to monitor for updates of the retrieved value. The retrieved value is selected + // according to the selector and the params passed in the call to Retrieve. + // + // The selector is a string that is required on all invocations, the params are optional. Each + // implementation handles the generic params according to their requirements. + Retrieve(ctx context.Context, selector string, params interface{}) (Retrieved, error) + + // RetrieveEnd signals that the Session must not be used to retrieve any new values from the + // source, ie.: all values from this source were retrieved for the configuration. It should + // be used to release resources that are only needed to retrieve configuration data. + RetrieveEnd(ctx context.Context) error + + // Close signals that the configuration for which it was used to retrieve values is no longer in use + // and the object should close and release any watchers that it may have created. + // This method must be called when the configuration session ends, either in case of success + // or error. Each Session object should use this call according to their needs: release resources, + // close communication channels, etc. + Close(ctx context.Context) error +} + +// Retrieved holds the result of a call to the Retrieve method of a Session object. +type Retrieved interface { + // Value is the retrieved data that will be injected on the configuration. + Value() interface{} +} + +// Watchable is an optional interface that Retrieved can implement if the given source +// supports monitoring for updates. +type Watchable interface { + // WatchForUpdate is used to monitor for updates on the retrieved value. + // It must not return until one of the following happens: + // + // 1. An update is detected for the monitored value. In this case the function should + // return ErrValueUpdated or an error wrapping it. + // + // 2. The parent Session object is closed, in which case the method should return + // ErrSessionClosed or an error wrapping it. + // + // 3. An error happens while watching for updates. The method should not return + // on first instances of transient errors, optionally there should be + // configurable thresholds to control for how long such errors can be ignored. + // + // This method must only be called when the RetrieveEnd method of the Session that + // retrieved the value was successfully completed. + WatchForUpdate() error +} diff --git a/internal/otel_collector/config/experimental/configsource/doc.go b/internal/otel_collector/config/experimental/configsource/doc.go new file mode 100644 index 00000000000..73b0b772145 --- /dev/null +++ b/internal/otel_collector/config/experimental/configsource/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package configsource is an experimental package that defines the interface of +// "configuration sources," e.g., Vault, ZooKeeper, etcd2, and others. Configuration +// sources retrieve values from their respective storages. A configuration parser/loader +// can inject these values in the configuration data used by the collector. +// ATTENTION: the package is still experimental and subject to changes without advanced notice. +package configsource diff --git a/internal/otel_collector/config/exporter.go b/internal/otel_collector/config/exporter.go new file mode 100644 index 00000000000..fd68703e78f --- /dev/null +++ b/internal/otel_collector/config/exporter.go @@ -0,0 +1,60 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +// Exporter is the configuration of a component.Exporter. Specific extensions must implement +// this interface and must embed ExporterSettings struct or a struct that extends it. +type Exporter interface { + identifiable + validatable + + privateConfigExporter() +} + +// Exporters is a map of names to Exporters. +type Exporters map[ComponentID]Exporter + +// ExporterSettings defines common settings for a component.Exporter configuration. +// Specific exporters can embed this struct and extend it with more fields if needed. +// +// It is highly recommended to "override" the Validate() function. +// +// When embedded in the exporter config, it must be with `mapstructure:",squash"` tag. +type ExporterSettings struct { + id ComponentID `mapstructure:"-"` +} + +// NewExporterSettings return a new ExporterSettings with the given ComponentID. +func NewExporterSettings(id ComponentID) ExporterSettings { + return ExporterSettings{id: ComponentID{typeVal: id.Type(), nameVal: id.Name()}} +} + +var _ Exporter = (*ExporterSettings)(nil) + +// ID returns the receiver ComponentID. +func (rs *ExporterSettings) ID() ComponentID { + return rs.id +} + +// SetIDName sets the receiver name. +func (rs *ExporterSettings) SetIDName(idName string) { + rs.id.nameVal = idName +} + +func (rs *ExporterSettings) Validate() error { + return nil +} + +func (rs *ExporterSettings) privateConfigExporter() {} diff --git a/internal/otel_collector/config/extension.go b/internal/otel_collector/config/extension.go new file mode 100644 index 00000000000..ad0a4a473f8 --- /dev/null +++ b/internal/otel_collector/config/extension.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +// Extension is the configuration of a component.Extension. Specific extensions must implement +// this interface and must embed ExtensionSettings struct or a struct that extends it. +type Extension interface { + identifiable + validatable + + privateConfigExtension() +} + +// Extensions is a map of names to extensions. +type Extensions map[ComponentID]Extension + +// ExtensionSettings defines common settings for a component.Extension configuration. +// Specific processors can embed this struct and extend it with more fields if needed. +// +// It is highly recommended to "override" the Validate() function. +// +// When embedded in the extension config, it must be with `mapstructure:",squash"` tag. +type ExtensionSettings struct { + id ComponentID `mapstructure:"-"` +} + +// NewExtensionSettings return a new ExtensionSettings with the given ComponentID. +func NewExtensionSettings(id ComponentID) ExtensionSettings { + return ExtensionSettings{id: ComponentID{typeVal: id.Type(), nameVal: id.Name()}} +} + +var _ Extension = (*ExtensionSettings)(nil) + +// ID returns the receiver ComponentID. +func (rs *ExtensionSettings) ID() ComponentID { + return rs.id +} + +// SetIDName sets the receiver name. +func (rs *ExtensionSettings) SetIDName(idName string) { + rs.id.nameVal = idName +} + +// Validate validates the configuration and returns an error if invalid. +func (rs *ExtensionSettings) Validate() error { + return nil +} + +func (rs *ExtensionSettings) privateConfigExtension() {} diff --git a/internal/otel_collector/config/identifiable.go b/internal/otel_collector/config/identifiable.go new file mode 100644 index 00000000000..cde3bc1158d --- /dev/null +++ b/internal/otel_collector/config/identifiable.go @@ -0,0 +1,96 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "errors" + "strings" +) + +// typeAndNameSeparator is the separator that is used between type and name in type/name composite keys. +const typeAndNameSeparator = "/" + +// identifiable is an interface that all components configurations MUST embed. +type identifiable interface { + // ID returns the ID of the component that this configuration belongs to. + ID() ComponentID + // SetIDName updates the name part of the ID for the component that this configuration belongs to. + SetIDName(idName string) +} + +// ComponentID represents the identity for a component. It combines two values: +// * type - the Type of the component. +// * name - the name of that component. +// The component ComponentID (combination type + name) is unique for a given component.Kind. +type ComponentID struct { + typeVal Type `mapstructure:"-"` + nameVal string `mapstructure:"-"` +} + +// NewID returns a new ComponentID with the given Type and empty name. +func NewID(typeVal Type) ComponentID { + return ComponentID{typeVal: typeVal} +} + +// NewIDWithName returns a new ComponentID with the given Type and name. +func NewIDWithName(typeVal Type, nameVal string) ComponentID { + return ComponentID{typeVal: typeVal, nameVal: nameVal} +} + +// NewIDFromString decodes a string in type[/name] format into ComponentID. +// The type and name components will have spaces trimmed, the "type" part must be present, +// the forward slash and "name" are optional. +// The returned ComponentID will be invalid if err is not-nil. +func NewIDFromString(idStr string) (ComponentID, error) { + items := strings.SplitN(idStr, typeAndNameSeparator, 2) + + id := ComponentID{} + if len(items) >= 1 { + id.typeVal = Type(strings.TrimSpace(items[0])) + } + + if len(items) == 0 || id.typeVal == "" { + return id, errors.New("idStr must have non empty type") + } + + if len(items) > 1 { + // "name" part is present. + id.nameVal = strings.TrimSpace(items[1]) + if id.nameVal == "" { + return id, errors.New("name part must be specified after " + typeAndNameSeparator + " in type/name key") + } + } + + return id, nil +} + +// Type returns the type of the component. +func (id ComponentID) Type() Type { + return id.typeVal +} + +// Name returns the custom name of the component. +func (id ComponentID) Name() string { + return id.nameVal +} + +// String returns the ComponentID string representation as "type[/name]" format. +func (id ComponentID) String() string { + if id.nameVal == "" { + return string(id.typeVal) + } + + return string(id.typeVal) + typeAndNameSeparator + id.nameVal +} diff --git a/internal/otel_collector/config/internal/configsource/doc.go b/internal/otel_collector/config/internal/configsource/doc.go new file mode 100644 index 00000000000..f7fa73f46c4 --- /dev/null +++ b/internal/otel_collector/config/internal/configsource/doc.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package configsource is an internal package that implements methods +// for injecting, watching, and updating data from ConfigSource into +// configuration. +package configsource diff --git a/internal/otel_collector/config/internal/configsource/manager.go b/internal/otel_collector/config/internal/configsource/manager.go new file mode 100644 index 00000000000..c7e6affdb28 --- /dev/null +++ b/internal/otel_collector/config/internal/configsource/manager.go @@ -0,0 +1,626 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configsource + +import ( + "bytes" + "context" + "errors" + "fmt" + "net/url" + "os" + "strings" + "sync" + + "gopkg.in/yaml.v2" + + "go.opentelemetry.io/collector/config/configparser" + "go.opentelemetry.io/collector/config/experimental/configsource" + "go.opentelemetry.io/collector/consumer/consumererror" +) + +const ( + // expandPrefixChar is the char used to prefix strings that can be expanded, + // either environment variables or config sources. + expandPrefixChar = '$' + // configSourceNameDelimChar is the char used to terminate the name of config source + // when it is used to retrieve values to inject in the configuration. + configSourceNameDelimChar = ':' +) + +// private error types to help with testability +type ( + errUnknownConfigSource struct{ error } +) + +// Manager is used to inject data from config sources into a configuration and also +// to monitor for updates on the items injected into the configuration. All methods +// of a Manager must be called only once and have an expected sequence: +// +// 1. NewManager to create a new instance; +// 2. Resolve to inject the data from config sources into a configuration; +// 3. WatchForUpdate in a goroutine to wait for configuration updates; +// 4. WaitForWatcher to wait until the watchers are in place; +// 5. Close to close the instance; +// +// The current syntax to reference a config source in a YAML is provisional. Currently +// single-line: +// +// param_to_be_retrieved: $:[?] +// +// bracketed single-line: +// +// param_to_be_retrieved: ${:[?]} +// +// and multi-line are supported: +// +// param_to_be_retrieved: | +// $: +// [] +// +// The is a name string used to identify the config source instance to be used +// to retrieve the value. +// +// The is the mandatory parameter required when retrieving data from a config source. +// +// Not all config sources need the optional parameters, they are used to provide extra control when +// retrieving and preparing the data to be injected into the configuration. +// +// For single-line format uses the same syntax as URL query parameters. +// Hypothetical example in a YAML file: +// +// component: +// config_field: $file:/etc/secret.bin?binary=true +// +// For multi-line format uses syntax as a YAML inside YAML. Possible usage +// example in a YAML file: +// +// component: +// config_field: | +// $yamltemplate: /etc/log_template.yaml +// logs_path: /var/logs/ +// timeout: 10s +// +// Not all config sources need these optional parameters, they are used to provide extra control when +// retrieving and data to be injected into the configuration. +// +// Assuming a config source named "env" that retrieve environment variables and one named "file" that +// retrieves contents from individual files, here are some examples: +// +// component: +// # Retrieves the value of the environment variable LOGS_DIR. +// logs_dir: $env:LOGS_DIR +// +// # Retrieves the value from the file /etc/secret.bin and injects its contents as a []byte. +// bytes_from_file: $file:/etc/secret.bin?binary=true +// +// # Retrieves the value from the file /etc/text.txt and injects its contents as a string. +// # Hypothetically the "file" config source by default tries to inject the file contents +// # as a string if params doesn't specify that "binary" is true. +// text_from_file: $file:/etc/text.txt +// +// Bracketed single-line should be used when concatenating a suffix to the value retrieved by +// the config source. Example: +// +// component: +// # Retrieves the value of the environment variable LOGS_DIR and appends /component.log to it. +// log_file_fullname: ${env:LOGS_DIR}/component.log +// +// Environment variables are expanded before passed to the config source when used in the selector or +// the optional parameters. Example: +// +// component: +// # Retrieves the value from the file text.txt located on the path specified by the environment +// # variable DATA_PATH. The name of the environment variable is the string after the delimiter +// # until the first character different than '_' and non-alpha-numeric. +// text_from_file: $file:$DATA_PATH/text.txt +// +// Since environment variables and config sources both use the '$', with or without brackets, as a prefix +// for their expansion it is necessary to have a way to distinguish between them. For the non-bracketed +// syntax the code will peek at the first character other than alpha-numeric and '_' after the '$'. If +// that character is a ':' it will treat it as a config source and as environment variable otherwise. +// For example: +// +// component: +// field_0: $PATH:/etc/logs # Injects the data from a config sourced named "PATH" using the selector "/etc/logs". +// field_1: $PATH/etc/logs # Expands the environment variable "PATH" and adds the suffix "/etc/logs" to it. +// +// So if you need to include an environment followed by ':' the bracketed syntax must be used instead: +// +// component: +// field_0: ${PATH}:/etc/logs # Expands the environment variable "PATH" and adds the suffix ":/etc/logs" to it. +// +// For the bracketed syntax the presence of ':' inside the brackets indicates that code will treat the bracketed +// contents as a config source. For example: +// +// component: +// field_0: ${file:/var/secret.txt} # Injects the data from a config sourced named "file" using the selector "/var/secret.txt". +// field_1: ${file}:/var/secret.txt # Expands the environment variable "file" and adds the suffix ":/var/secret.txt" to it. +// +// If the character following the '$' is in the set {'*', '#', '$', '@', '!', '?', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} +// the code will consider it to be the name of an environment variable to expand, or config source if followed by ':'. Do not use any of these +// characters as the first char on the name of a config source or an environment variable (even if allowed by the system) to avoid unexpected +// results. +type Manager struct { + // configSources is map from ConfigSource names (as defined in the configuration) + // and the respective instances. + configSources map[string]configsource.ConfigSource + // watchers keeps track of all WatchForUpdate functions for retrieved values. + watchers []configsource.Watchable + // watchersWG is used to ensure that Close waits for all WatchForUpdate calls + // to complete. + watchersWG sync.WaitGroup + // watchingCh is used to notify users of the Manager that the WatchForUpdate function + // is ready and waiting for notifications. + watchingCh chan struct{} + // closeCh is used to notify the Manager WatchForUpdate function that the manager + // is being closed. + closeCh chan struct{} +} + +// NewManager creates a new instance of a Manager to be used to inject data from +// ConfigSource objects into a configuration and watch for updates on the injected +// data. +func NewManager(_ *configparser.Parser) (*Manager, error) { + // TODO: Config sources should be extracted for the config itself, need Factories for that. + + return &Manager{ + watchingCh: make(chan struct{}), + closeCh: make(chan struct{}), + }, nil +} + +// Resolve inspects the given config.Parser and resolves all config sources referenced +// in the configuration, returning a config.Parser fully resolved. This must be called only +// once per lifetime of a Manager object. +func (m *Manager) Resolve(ctx context.Context, parser *configparser.Parser) (*configparser.Parser, error) { + res := configparser.NewParser() + allKeys := parser.AllKeys() + for _, k := range allKeys { + value, err := m.expandStringValues(ctx, parser.Get(k)) + if err != nil { + // Call RetrieveEnd for all sources used so far but don't record any errors. + _ = m.retrieveEnd(ctx) + return nil, err + } + res.Set(k, value) + } + + if errs := m.retrieveEnd(ctx); len(errs) > 0 { + return nil, consumererror.Combine(errs) + } + + return res, nil +} + +// WatchForUpdate must watch for updates on any of the values retrieved from config sources +// and injected into the configuration. Typically this method is launched in a goroutine, the +// method WaitForWatcher blocks until the WatchForUpdate goroutine is running and ready. +func (m *Manager) WatchForUpdate() error { + // Use a channel to capture the first error returned by any watcher and another one + // to ensure completion of any remaining watcher also trying to report an error. + errChannel := make(chan error, 1) + doneCh := make(chan struct{}) + defer close(doneCh) + + for i := range m.watchers { + watcher := m.watchers[i] + m.watchersWG.Add(1) + go func() { + defer m.watchersWG.Done() + err := watcher.WatchForUpdate() + switch { + case errors.Is(err, configsource.ErrSessionClosed): + // The Session from which this watcher was retrieved is being closed. + // There is no error to report, just exit from the goroutine. + return + default: + select { + case errChannel <- err: + // Try to report any other error. + case <-doneCh: + // There was either one error published or the watcher was closed. + // This channel was closed and any goroutines waiting on these + // should simply close. + } + } + }() + } + + // All goroutines were created, they may not be running yet, but the manager WatchForUpdate + // is only waiting for any of the watchers to terminate. + close(m.watchingCh) + + select { + case err := <-errChannel: + // Return the first error that reaches the channel and ignore any other error. + return err + case <-m.closeCh: + // This covers the case that all watchers returned ErrWatcherNotSupported. + return configsource.ErrSessionClosed + } +} + +// WaitForWatcher blocks until the watchers used by WatchForUpdate are all ready. +// This is used to ensure that the watchers are in place before proceeding. +func (m *Manager) WaitForWatcher() { + <-m.watchingCh +} + +// Close terminates the WatchForUpdate function and closes all Session objects used +// in the configuration. It should be called +func (m *Manager) Close(ctx context.Context) error { + var errs []error + for _, source := range m.configSources { + if err := source.Close(ctx); err != nil { + errs = append(errs, err) + } + } + + close(m.closeCh) + m.watchersWG.Wait() + + return consumererror.Combine(errs) +} + +func (m *Manager) retrieveEnd(ctx context.Context) []error { + var errs []error + for _, source := range m.configSources { + if err := source.RetrieveEnd(ctx); err != nil { + errs = append(errs, err) + } + } + return errs +} + +func (m *Manager) expandStringValues(ctx context.Context, value interface{}) (interface{}, error) { + switch v := value.(type) { + case string: + return m.expandString(ctx, v) + case []interface{}: + nslice := make([]interface{}, 0, len(v)) + for _, vint := range v { + value, err := m.expandStringValues(ctx, vint) + if err != nil { + return nil, err + } + nslice = append(nslice, value) + } + return nslice, nil + case map[string]interface{}: + nmap := make(map[interface{}]interface{}, len(v)) + for k, vint := range v { + value, err := m.expandStringValues(ctx, vint) + if err != nil { + return nil, err + } + nmap[k] = value + } + return nmap, nil + case map[interface{}]interface{}: + nmap := make(map[interface{}]interface{}, len(v)) + for k, vint := range v { + value, err := m.expandStringValues(ctx, vint) + if err != nil { + return nil, err + } + nmap[k] = value + } + return nmap, nil + default: + return v, nil + } +} + +// expandConfigSource retrieve data from the specified config source and injects them into +// the configuration. The Manager tracks sessions and watcher objects as needed. +func (m *Manager) expandConfigSource(ctx context.Context, cfgSrc configsource.ConfigSource, s string) (interface{}, error) { + cfgSrcName, selector, params, err := parseCfgSrc(s) + if err != nil { + return nil, err + } + + retrieved, err := cfgSrc.Retrieve(ctx, selector, params) + if err != nil { + return nil, fmt.Errorf("config source %q failed to retrieve value: %w", cfgSrcName, err) + } + + if watcher, okWatcher := retrieved.(configsource.Watchable); okWatcher { + m.watchers = append(m.watchers, watcher) + } + + return retrieved.Value(), nil +} + +// expandString expands environment variables and config sources that are specified on the string. +func (m *Manager) expandString(ctx context.Context, s string) (interface{}, error) { + // Code based on os.Expand function. All delimiters that are checked against are + // ASCII so bytes are fine for this operation. + var buf []byte + + // Using i, j, and w variables to keep correspondence with os.Expand code. + // i tracks the index in s from which a slice to be appended to buf should start. + // j tracks the char being currently checked and also the end of the slice to be appended to buf. + // w tracks the number of characters being consumed after a prefix identifying env vars or config sources. + i := 0 + for j := 0; j < len(s); j++ { + if s[j] == expandPrefixChar && j+1 < len(s) { + if buf == nil { + // Assuming that the length of the string will double after expansion of env vars and config sources. + buf = make([]byte, 0, 2*len(s)) + } + + // Append everything consumed up to the prefix char (but not including the prefix char) to the result. + buf = append(buf, s[i:j]...) + + var expandableContent, cfgSrcName string + w := 0 // number of bytes consumed on this pass + + switch { + case s[j+1] == expandPrefixChar: + // Escaping the prefix so $$ becomes a single $ without attempting + // to treat the string after it as a config source or env var. + expandableContent = string(expandPrefixChar) + w = 1 // consumed a single char + + case s[j+1] == '{': + // Bracketed usage, consume everything until first '}' exactly as os.Expand. + expandableContent, w = getShellName(s[j+1:]) + // Allow for some spaces. + expandableContent = strings.Trim(expandableContent, " ") + if len(expandableContent) > 1 && strings.Contains(expandableContent, string(configSourceNameDelimChar)) { + // Bracket expandableContent contains ':' treating it as a config source. + cfgSrcName, _ = getShellName(expandableContent) + } + + default: + // Non-bracketed usage, ie.: found the prefix char, it can be either a config + // source or an environment variable. + var name string + name, w = getShellName(s[j+1:]) + expandableContent = name // Assume for now that it is an env var. + + // Peek next char after name, if it is a config source name delimiter treat the remaining of the + // string as a config source. + if j+w+1 < len(s) && s[j+w+1] == configSourceNameDelimChar { + // This is a config source, since it is not delimited it will consume until end of the string. + cfgSrcName = name + expandableContent = s[j+1:] + w = len(expandableContent) // Set consumed bytes to the length of expandableContent + } + } + + switch { + case cfgSrcName == "": + // Not a config source, expand as os.ExpandEnv + buf = osExpandEnv(buf, expandableContent, w) + + default: + // A config source, retrieve and apply results. + retrieved, err := m.retrieveConfigSourceData(ctx, cfgSrcName, expandableContent) + if err != nil { + return nil, err + } + + consumedAll := j+w+1 == len(s) + if consumedAll && len(buf) == 0 { + // This is the only expandableContent on the string, config + // source is free to return interface{}. + return retrieved, nil + } + + // Either there was a prefix already or there are still + // characters to be processed. + buf = append(buf, fmt.Sprintf("%v", retrieved)...) + } + + j += w // move the index of the char being checked (j) by the number of characters consumed (w) on this iteration. + i = j + 1 // update start index (i) of next slice of bytes to be copied. + } + } + + if buf == nil { + // No changes to original string, just return it. + return s, nil + } + + // Return whatever was accumulated on the buffer plus the remaining of the original string. + return string(buf) + s[i:], nil +} + +func (m *Manager) retrieveConfigSourceData(ctx context.Context, name, cfgSrcInvoke string) (interface{}, error) { + cfgSrc, ok := m.configSources[name] + if !ok { + return nil, newErrUnknownConfigSource(name) + } + + // Expand any env vars on the selector and parameters. Nested config source usage + // is not supported. + cfgSrcInvoke = expandEnvVars(cfgSrcInvoke) + retrieved, err := m.expandConfigSource(ctx, cfgSrc, cfgSrcInvoke) + if err != nil { + return nil, err + } + + return retrieved, nil +} + +func newErrUnknownConfigSource(cfgSrcName string) error { + return &errUnknownConfigSource{ + fmt.Errorf(`config source %q not found if this was intended to be an environment variable use "${%s}" instead"`, cfgSrcName, cfgSrcName), + } +} + +// parseCfgSrc extracts the reference to a config source from a string value. +// The caller should check for error explicitly since it is possible for the +// other values to have been partially set. +func parseCfgSrc(s string) (cfgSrcName, selector string, params interface{}, err error) { + parts := strings.SplitN(s, string(configSourceNameDelimChar), 2) + if len(parts) != 2 { + err = fmt.Errorf("invalid config source syntax at %q, it must have at least the config source name and a selector", s) + return + } + cfgSrcName = strings.Trim(parts[0], " ") + + // Separate multi-line and single line case. + afterCfgSrcName := parts[1] + switch { + case strings.Contains(afterCfgSrcName, "\n"): + // Multi-line, until the first \n it is the selector, everything after as YAML. + parts = strings.SplitN(afterCfgSrcName, "\n", 2) + selector = strings.Trim(parts[0], " ") + + if len(parts) > 1 && len(parts[1]) > 0 { + var cp *configparser.Parser + cp, err = configparser.NewParserFromBuffer(bytes.NewReader([]byte(parts[1]))) + if err != nil { + return + } + params = cp.ToStringMap() + } + + default: + // Single line, and parameters as URL query. + const selectorDelim string = "?" + parts = strings.SplitN(parts[1], selectorDelim, 2) + selector = strings.Trim(parts[0], " ") + + if len(parts) == 2 { + paramsPart := parts[1] + params, err = parseParamsAsURLQuery(paramsPart) + if err != nil { + err = fmt.Errorf("invalid parameters syntax at %q: %w", s, err) + return + } + } + } + + return cfgSrcName, selector, params, err +} + +func parseParamsAsURLQuery(s string) (interface{}, error) { + values, err := url.ParseQuery(s) + if err != nil { + return nil, err + } + + // Transform single array values in scalars. + params := make(map[string]interface{}) + for k, v := range values { + switch len(v) { + case 0: + params[k] = nil + case 1: + var iface interface{} + if err = yaml.Unmarshal([]byte(v[0]), &iface); err != nil { + return nil, err + } + params[k] = iface + default: + // It is a slice add element by element + elemSlice := make([]interface{}, 0, len(v)) + for _, elem := range v { + var iface interface{} + if err = yaml.Unmarshal([]byte(elem), &iface); err != nil { + return nil, err + } + elemSlice = append(elemSlice, iface) + } + params[k] = elemSlice + } + } + return params, err +} + +// expandEnvVars is used to expand environment variables with the same syntax used +// by config.Parser. +func expandEnvVars(s string) string { + return os.Expand(s, func(str string) string { + // This allows escaping environment variable substitution via $$, e.g. + // - $FOO will be substituted with env var FOO + // - $$FOO will be replaced with $FOO + // - $$$FOO will be replaced with $ + substituted env var FOO + if str == "$" { + return "$" + } + return os.Getenv(str) + }) +} + +// osExpandEnv replicate the internal behavior of os.ExpandEnv when handling env +// vars updating the buffer accordingly. +func osExpandEnv(buf []byte, name string, w int) []byte { + switch { + case name == "" && w > 0: + // Encountered invalid syntax; eat the + // characters. + case name == "" || name == "$": + // Valid syntax, but $ was not followed by a + // name. Leave the dollar character untouched. + buf = append(buf, expandPrefixChar) + default: + buf = append(buf, os.Getenv(name)...) + } + + return buf +} + +// Below are helper functions used by os.Expand, copied without changes from original sources (env.go). + +// isShellSpecialVar reports whether the character identifies a special +// shell variable such as $*. +func isShellSpecialVar(c uint8) bool { + switch c { + case '*', '#', '$', '@', '!', '?', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return true + } + return false +} + +// isAlphaNum reports whether the byte is an ASCII letter, number, or underscore +func isAlphaNum(c uint8) bool { + return c == '_' || '0' <= c && c <= '9' || 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' +} + +// getShellName returns the name that begins the string and the number of bytes +// consumed to extract it. If the name is enclosed in {}, it's part of a ${} +// expansion and two more bytes are needed than the length of the name. +func getShellName(s string) (string, int) { + switch { + case s[0] == '{': + if len(s) > 2 && isShellSpecialVar(s[1]) && s[2] == '}' { + return s[1:2], 3 + } + // Scan to closing brace + for i := 1; i < len(s); i++ { + if s[i] == '}' { + if i == 1 { + // Bad syntax; eat "${}" + return "", 2 + } + return s[1:i], i + 1 + } + } + // Bad syntax; eat "${" + return "", 1 + case isShellSpecialVar(s[0]): + return s[0:1], 1 + } + // Scan alphanumerics. + var i int + for i = 0; i < len(s) && isAlphaNum(s[i]); i++ { + } + return s[:i], i +} diff --git a/internal/otel_collector/config/internal/configsource/testdata/arrays_and_maps.yaml b/internal/otel_collector/config/internal/configsource/testdata/arrays_and_maps.yaml new file mode 100644 index 00000000000..7a52ac88023 --- /dev/null +++ b/internal/otel_collector/config/internal/configsource/testdata/arrays_and_maps.yaml @@ -0,0 +1,12 @@ +top0: + array0: + - $tstcfgsrc:elem0 + - $tstcfgsrc:elem1 + array1: + - entry: + str: $tstcfgsrc:elem0 + - entry: + str: $tstcfgsrc:elem1 + map0: + k0: $tstcfgsrc:k0 + k1: $tstcfgsrc:k1 diff --git a/internal/otel_collector/config/internal/configsource/testdata/arrays_and_maps_expected.yaml b/internal/otel_collector/config/internal/configsource/testdata/arrays_and_maps_expected.yaml new file mode 100644 index 00000000000..9abf036675d --- /dev/null +++ b/internal/otel_collector/config/internal/configsource/testdata/arrays_and_maps_expected.yaml @@ -0,0 +1,12 @@ +top0: + array0: + - elem0_value + - elem1_value + array1: + - entry: + str: elem0_value + - entry: + str: elem1_value + map0: + k0: k0_value + k1: k1_value diff --git a/internal/otel_collector/config/internal/configsource/testdata/envvar_cfgsrc_mix.yaml b/internal/otel_collector/config/internal/configsource/testdata/envvar_cfgsrc_mix.yaml new file mode 100644 index 00000000000..6560b52de07 --- /dev/null +++ b/internal/otel_collector/config/internal/configsource/testdata/envvar_cfgsrc_mix.yaml @@ -0,0 +1,19 @@ +envvar: $envvar +escapedDelim: $$envvar +envvar_bracketed: ${envvar}tests +envvar_legacy_00: $/not/valid$ +envvar_legacy_01: $not_found_envvar/test +envvar_legacy_02: ${}/test +envvar_legacy_03: ${/test +envvar_legacy_04: ${1}/test +envvar_legacy_05: $1/test +cfgsrc_suffix: prefix-$tstcfgsrc:int_key +cfgsrc_middle: prefix-${tstcfgsrc:int_key}-suffix +cfgsrc_in_str: integer ${tstcfgsrc:int_key} injected as string +cfgsrc_params0: ${tstcfgsrc:params_key?p0=true&p1=$envvar&p2=42} +cfgsrc_params1: "${tstcfgsrc:params_key?p0=false&p1=42&p2=$envvar}" +cfgsrc_params2: $tstcfgsrc:params_key?p0=$$envvar +multi_line_envvars: | + $tstcfgsrc: params_key + p0: $envvar + p1: $${envvar} diff --git a/internal/otel_collector/config/internal/configsource/testdata/envvar_cfgsrc_mix_expected.yaml b/internal/otel_collector/config/internal/configsource/testdata/envvar_cfgsrc_mix_expected.yaml new file mode 100644 index 00000000000..e9e9ba862c6 --- /dev/null +++ b/internal/otel_collector/config/internal/configsource/testdata/envvar_cfgsrc_mix_expected.yaml @@ -0,0 +1,25 @@ +envvar: envvar_value +escapedDelim: $envvar +envvar_bracketed: envvar_valuetests +envvar_legacy_00: $/not/valid$ +envvar_legacy_01: /test +envvar_legacy_02: /test +envvar_legacy_03: /test +envvar_legacy_04: /test +envvar_legacy_05: /test +cfgsrc_suffix: prefix-42 +cfgsrc_middle: prefix-42-suffix +cfgsrc_in_str: integer 42 injected as string +cfgsrc_params0: + p0: true + p1: envvar_value + p2: 42 +cfgsrc_params1: + p0: false + p1: 42 + p2: envvar_value +cfgsrc_params2: + p0: $envvar +multi_line_envvars: + p0: envvar_value + p1: ${envvar} diff --git a/internal/otel_collector/config/internal/configsource/testdata/params_handling.yaml b/internal/otel_collector/config/internal/configsource/testdata/params_handling.yaml new file mode 100644 index 00000000000..5543d2ee268 --- /dev/null +++ b/internal/otel_collector/config/internal/configsource/testdata/params_handling.yaml @@ -0,0 +1,13 @@ +single_line: + ex0: $tstcfgsrc:elem0 + ex1: $tstcfgsrc:elem1?p0=true&p1=a string with spaces&p3=42 +multi_line: + k0: | + $tstcfgsrc: k0 + k1: | + $tstcfgsrc: k1 + p0: true + p1: a string with spaces + p2: + p2_0: a nested map0 + p2_1: true diff --git a/internal/otel_collector/config/internal/configsource/testdata/params_handling_expected.yaml b/internal/otel_collector/config/internal/configsource/testdata/params_handling_expected.yaml new file mode 100644 index 00000000000..ce6fc9c55a0 --- /dev/null +++ b/internal/otel_collector/config/internal/configsource/testdata/params_handling_expected.yaml @@ -0,0 +1,14 @@ +single_line: + ex0: + ex1: + p0: true + p1: a string with spaces + p3: 42 +multi_line: + k0: + k1: + p0: true + p1: a string with spaces + p2: + p2_0: a nested map0 + p2_1: true diff --git a/internal/otel_collector/config/processor.go b/internal/otel_collector/config/processor.go new file mode 100644 index 00000000000..75639d66cd5 --- /dev/null +++ b/internal/otel_collector/config/processor.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +// Processor is the configuration of a component.Processor. Specific extensions must implement +// this interface and must embed ProcessorSettings struct or a struct that extends it. +type Processor interface { + identifiable + validatable + + privateConfigProcessor() +} + +// Processors is a map of names to Processors. +type Processors map[ComponentID]Processor + +// ProcessorSettings defines common settings for a component.Processor configuration. +// Specific processors can embed this struct and extend it with more fields if needed. +// +// It is highly recommended to "override" the Validate() function. +// +// When embedded in the processor config it must be with `mapstructure:",squash"` tag. +type ProcessorSettings struct { + id ComponentID `mapstructure:"-"` +} + +// NewProcessorSettings return a new ProcessorSettings with the given ComponentID. +func NewProcessorSettings(id ComponentID) ProcessorSettings { + return ProcessorSettings{id: ComponentID{typeVal: id.Type(), nameVal: id.Name()}} +} + +var _ Processor = (*ProcessorSettings)(nil) + +// ID returns the receiver ComponentID. +func (rs *ProcessorSettings) ID() ComponentID { + return rs.id +} + +// SetIDName sets the receiver name. +func (rs *ProcessorSettings) SetIDName(idName string) { + rs.id.nameVal = idName +} + +// Validate validates the configuration and returns an error if invalid. +func (rs *ProcessorSettings) Validate() error { + return nil +} + +func (rs *ProcessorSettings) privateConfigProcessor() {} diff --git a/internal/otel_collector/config/receiver.go b/internal/otel_collector/config/receiver.go new file mode 100644 index 00000000000..1ca35e6105f --- /dev/null +++ b/internal/otel_collector/config/receiver.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +// Receiver is the configuration of a component.Receiver. Specific extensions must implement +// this interface and must embed ReceiverSettings struct or a struct that extends it. +type Receiver interface { + identifiable + validatable + + privateConfigReceiver() +} + +// Receivers is a map of names to Receivers. +type Receivers map[ComponentID]Receiver + +// ReceiverSettings defines common settings for a component.Receiver configuration. +// Specific receivers can embed this struct and extend it with more fields if needed. +// +// It is highly recommended to "override" the Validate() function. +// +// When embedded in the receiver config it must be with `mapstructure:",squash"` tag. +type ReceiverSettings struct { + id ComponentID `mapstructure:"-"` +} + +// NewReceiverSettings return a new ReceiverSettings with the given ComponentID. +func NewReceiverSettings(id ComponentID) ReceiverSettings { + return ReceiverSettings{id: ComponentID{typeVal: id.Type(), nameVal: id.Name()}} +} + +var _ Receiver = (*ReceiverSettings)(nil) + +// ID returns the receiver ComponentID. +func (rs *ReceiverSettings) ID() ComponentID { + return rs.id +} + +// SetIDName sets the receiver name. +func (rs *ReceiverSettings) SetIDName(idName string) { + rs.id.nameVal = idName +} + +// Validate validates the configuration and returns an error if invalid. +func (rs *ReceiverSettings) Validate() error { + return nil +} + +func (rs *ReceiverSettings) privateConfigReceiver() {} diff --git a/internal/otel_collector/consumer/consumer.go b/internal/otel_collector/consumer/consumer.go new file mode 100644 index 00000000000..b079981c07a --- /dev/null +++ b/internal/otel_collector/consumer/consumer.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumer + +import ( + "context" + + "go.opentelemetry.io/collector/model/pdata" +) + +// Capabilities describes the capabilities of a Processor. +type Capabilities struct { + // MutatesData is set to true if Consume* function of the + // processor modifies the input TraceData or MetricsData argument. + // Processors which modify the input data MUST set this flag to true. If the processor + // does not modify the data it MUST set this flag to false. If the processor creates + // a copy of the data before modifying then this flag can be safely set to false. + MutatesData bool +} + +type baseConsumer interface { + Capabilities() Capabilities +} + +// Metrics is the new metrics consumer interface that receives pdata.Metrics, processes it +// as needed, and sends it to the next processing node if any or to the destination. +type Metrics interface { + baseConsumer + // ConsumeMetrics receives pdata.Metrics for consumption. + ConsumeMetrics(ctx context.Context, md pdata.Metrics) error +} + +// Traces is an interface that receives pdata.Traces, processes it +// as needed, and sends it to the next processing node if any or to the destination. +type Traces interface { + baseConsumer + // ConsumeTraces receives pdata.Traces for consumption. + ConsumeTraces(ctx context.Context, td pdata.Traces) error +} + +// Logs is an interface that receives pdata.Logs, processes it +// as needed, and sends it to the next processing node if any or to the destination. +type Logs interface { + baseConsumer + // ConsumeLogs receives pdata.Logs for consumption. + ConsumeLogs(ctx context.Context, ld pdata.Logs) error +} diff --git a/internal/otel_collector/consumer/consumererror/combine.go b/internal/otel_collector/consumer/consumererror/combine.go new file mode 100644 index 00000000000..5b78d7d8c1a --- /dev/null +++ b/internal/otel_collector/consumer/consumererror/combine.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumererror + +import ( + "fmt" + "strings" +) + +// Combine converts a list of errors into one error. +// +// If any of the errors in errs are Permanent then the returned +// error will also be Permanent. +// +// Any signal data associated with an error from this package +// will be discarded. +func Combine(errs []error) error { + numErrors := len(errs) + if numErrors == 0 { + // No errors + return nil + } + + if numErrors == 1 { + return errs[0] + } + + errMsgs := make([]string, 0, numErrors) + permanent := false + for _, err := range errs { + if !permanent && IsPermanent(err) { + permanent = true + } + errMsgs = append(errMsgs, err.Error()) + } + err := fmt.Errorf("[%s]", strings.Join(errMsgs, "; ")) + if permanent { + err = Permanent(err) + } + return err +} diff --git a/internal/otel_collector/consumer/consumererror/doc.go b/internal/otel_collector/consumer/consumererror/doc.go new file mode 100644 index 00000000000..c0ec2958402 --- /dev/null +++ b/internal/otel_collector/consumer/consumererror/doc.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package consumererror provides wrappers to easily classify errors. This allows +// appropriate action by error handlers without the need to know each individual +// error type/instance. +package consumererror diff --git a/internal/otel_collector/consumer/consumererror/permanent.go b/internal/otel_collector/consumer/consumererror/permanent.go new file mode 100644 index 00000000000..c4b2c4fc0a9 --- /dev/null +++ b/internal/otel_collector/consumer/consumererror/permanent.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumererror + +import "errors" + +// permanent is an error that will be always returned if its source +// receives the same inputs. +type permanent struct { + err error +} + +// Permanent wraps an error to indicate that it is a permanent error, i.e. an +// error that will be always returned if its source receives the same inputs. +func Permanent(err error) error { + return permanent{err: err} +} + +func (p permanent) Error() string { + return "Permanent error: " + p.err.Error() +} + +// Unwrap returns the wrapped error for functions Is and As in standard package errors. +func (p permanent) Unwrap() error { + return p.err +} + +// IsPermanent checks if an error was wrapped with the Permanent function, which +// is used to indicate that a given error will always be returned in the case +// that its sources receives the same input. +func IsPermanent(err error) bool { + if err == nil { + return false + } + return errors.As(err, &permanent{}) +} diff --git a/internal/otel_collector/consumer/consumererror/signalerrors.go b/internal/otel_collector/consumer/consumererror/signalerrors.go new file mode 100644 index 00000000000..a1b1f47c130 --- /dev/null +++ b/internal/otel_collector/consumer/consumererror/signalerrors.go @@ -0,0 +1,123 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumererror + +import ( + "errors" + + "go.opentelemetry.io/collector/model/pdata" +) + +// Traces is an error that may carry associated Trace data for a subset of received data +// that failed to be processed or sent. +type Traces struct { + error + failed pdata.Traces +} + +// NewTraces creates a Traces that can encapsulate received data that failed to be processed or sent. +func NewTraces(err error, failed pdata.Traces) error { + return Traces{ + error: err, + failed: failed, + } +} + +// AsTraces finds the first error in err's chain that can be assigned to target. If such an error is found, +// it is assigned to target and true is returned, otherwise false is returned. +func AsTraces(err error, target *Traces) bool { + if err == nil { + return false + } + return errors.As(err, target) +} + +// GetTraces returns failed traces from the associated error. +func (err Traces) GetTraces() pdata.Traces { + return err.failed +} + +// Unwrap returns the wrapped error for functions Is and As in standard package errors. +func (err Traces) Unwrap() error { + return err.error +} + +// Logs is an error that may carry associated Log data for a subset of received data +// that failed to be processed or sent. +type Logs struct { + error + failed pdata.Logs +} + +// NewLogs creates a Logs that can encapsulate received data that failed to be processed or sent. +func NewLogs(err error, failed pdata.Logs) error { + return Logs{ + error: err, + failed: failed, + } +} + +// AsLogs finds the first error in err's chain that can be assigned to target. If such an error is found, +// it is assigned to target and true is returned, otherwise false is returned. +func AsLogs(err error, target *Logs) bool { + if err == nil { + return false + } + return errors.As(err, target) +} + +// GetLogs returns failed logs from the associated error. +func (err Logs) GetLogs() pdata.Logs { + return err.failed +} + +// Unwrap returns the wrapped error for functions Is and As in standard package errors. +func (err Logs) Unwrap() error { + return err.error +} + +// Metrics is an error that may carry associated Metrics data for a subset of received data +// that failed to be processed or sent. +type Metrics struct { + error + failed pdata.Metrics +} + +// NewMetrics creates a Metrics that can encapsulate received data that failed to be processed or sent. +func NewMetrics(err error, failed pdata.Metrics) error { + return Metrics{ + error: err, + failed: failed, + } +} + +// AsMetrics finds the first error in err's chain that can be assigned to target. If such an error is found, +// it is assigned to target and true is returned, otherwise false is returned. +func AsMetrics(err error, target *Metrics) bool { + if err == nil { + return false + } + return errors.As(err, target) +} + +// GetMetrics returns failed metrics from the associated error. +func (err Metrics) GetMetrics() pdata.Metrics { + return err.failed +} + +// Unwrap returns the wrapped error for functions Is and As in standard package errors. +func (err Metrics) Unwrap() error { + return err.error +} diff --git a/internal/otel_collector/consumer/consumerhelper/common.go b/internal/otel_collector/consumer/consumerhelper/common.go new file mode 100644 index 00000000000..bce9aba07a7 --- /dev/null +++ b/internal/otel_collector/consumer/consumerhelper/common.go @@ -0,0 +1,55 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumerhelper + +import ( + "errors" + + "go.opentelemetry.io/collector/consumer" +) + +var errNilFunc = errors.New("nil consumer func") + +type baseConsumer struct { + capabilities consumer.Capabilities +} + +// Option applies changes to internalOptions. +type Option func(*baseConsumer) + +// WithCapabilities overrides the default GetCapabilities function for a processor. +// The default GetCapabilities function returns mutable capabilities. +func WithCapabilities(capabilities consumer.Capabilities) Option { + return func(o *baseConsumer) { + o.capabilities = capabilities + } +} + +// Capabilities implementation of the base Consumer. +func (bs baseConsumer) Capabilities() consumer.Capabilities { + return bs.capabilities +} + +func newBaseConsumer(options ...Option) *baseConsumer { + bs := &baseConsumer{ + capabilities: consumer.Capabilities{MutatesData: false}, + } + + for _, op := range options { + op(bs) + } + + return bs +} diff --git a/internal/otel_collector/consumer/consumerhelper/doc.go b/internal/otel_collector/consumer/consumerhelper/doc.go new file mode 100644 index 00000000000..7dd8fa6fb8f --- /dev/null +++ b/internal/otel_collector/consumer/consumerhelper/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package consumerhelper defines types and functions used to create consumer +// Logs, Metrics, and Traces. +package consumerhelper diff --git a/internal/otel_collector/consumer/consumerhelper/logs.go b/internal/otel_collector/consumer/consumerhelper/logs.go new file mode 100644 index 00000000000..3b29b3338d5 --- /dev/null +++ b/internal/otel_collector/consumer/consumerhelper/logs.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumerhelper + +import ( + "context" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/model/pdata" +) + +// ConsumeLogsFunc is a helper function that is similar to ConsumeLogs. +type ConsumeLogsFunc func(ctx context.Context, ld pdata.Logs) error + +// ConsumeLogs calls f(ctx, ld). +func (f ConsumeLogsFunc) ConsumeLogs(ctx context.Context, ld pdata.Logs) error { + return f(ctx, ld) +} + +type baseLogs struct { + *baseConsumer + ConsumeLogsFunc +} + +// NewLogs returns a consumer.Logs configured with the provided options. +func NewLogs(consume ConsumeLogsFunc, options ...Option) (consumer.Logs, error) { + if consume == nil { + return nil, errNilFunc + } + return &baseLogs{ + baseConsumer: newBaseConsumer(options...), + ConsumeLogsFunc: consume, + }, nil +} diff --git a/internal/otel_collector/consumer/consumerhelper/metrics.go b/internal/otel_collector/consumer/consumerhelper/metrics.go new file mode 100644 index 00000000000..6db7931807d --- /dev/null +++ b/internal/otel_collector/consumer/consumerhelper/metrics.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumerhelper + +import ( + "context" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/model/pdata" +) + +// ConsumeMetricsFunc is a helper function that is similar to ConsumeMetrics. +type ConsumeMetricsFunc func(ctx context.Context, ld pdata.Metrics) error + +// ConsumeMetrics calls f(ctx, ld). +func (f ConsumeMetricsFunc) ConsumeMetrics(ctx context.Context, ld pdata.Metrics) error { + return f(ctx, ld) +} + +type baseMetrics struct { + *baseConsumer + ConsumeMetricsFunc +} + +// NewMetrics returns a consumer.Metrics configured with the provided options. +func NewMetrics(consume ConsumeMetricsFunc, options ...Option) (consumer.Metrics, error) { + if consume == nil { + return nil, errNilFunc + } + return &baseMetrics{ + baseConsumer: newBaseConsumer(options...), + ConsumeMetricsFunc: consume, + }, nil +} diff --git a/internal/otel_collector/consumer/consumerhelper/traces.go b/internal/otel_collector/consumer/consumerhelper/traces.go new file mode 100644 index 00000000000..4756fdfeded --- /dev/null +++ b/internal/otel_collector/consumer/consumerhelper/traces.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumerhelper + +import ( + "context" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/model/pdata" +) + +// ConsumeTracesFunc is a helper function that is similar to ConsumeTraces. +type ConsumeTracesFunc func(ctx context.Context, ld pdata.Traces) error + +// ConsumeTraces calls f(ctx, ld). +func (f ConsumeTracesFunc) ConsumeTraces(ctx context.Context, ld pdata.Traces) error { + return f(ctx, ld) +} + +type baseTraces struct { + *baseConsumer + ConsumeTracesFunc +} + +// NewTraces returns a consumer.Traces configured with the provided options. +func NewTraces(consume ConsumeTracesFunc, options ...Option) (consumer.Traces, error) { + if consume == nil { + return nil, errNilFunc + } + return &baseTraces{ + baseConsumer: newBaseConsumer(options...), + ConsumeTracesFunc: consume, + }, nil +} diff --git a/internal/otel_collector/consumer/consumertest/base_consumer.go b/internal/otel_collector/consumer/consumertest/base_consumer.go new file mode 100644 index 00000000000..420b205d919 --- /dev/null +++ b/internal/otel_collector/consumer/consumertest/base_consumer.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumertest + +import ( + "go.opentelemetry.io/collector/consumer" +) + +type nonMutatingConsumer struct{} + +// Capabilities returns the base consumer capabilities. +func (bc nonMutatingConsumer) Capabilities() consumer.Capabilities { + return consumer.Capabilities{MutatesData: false} +} diff --git a/internal/otel_collector/consumer/consumertest/consumer.go b/internal/otel_collector/consumer/consumertest/consumer.go new file mode 100644 index 00000000000..ee5125b4ba5 --- /dev/null +++ b/internal/otel_collector/consumer/consumertest/consumer.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumertest + +import ( + "context" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/model/pdata" +) + +// Consumer is a convenience interface that implements all consumer interfaces. +// It has a private function on it to forbid external users from implementing it +// and, as a result, to allow us to add extra functions without breaking +// compatibility. +type Consumer interface { + // Capabilities to implement the base consumer functionality. + Capabilities() consumer.Capabilities + // ConsumeTraces to implement the consumer.Traces. + ConsumeTraces(context.Context, pdata.Traces) error + // ConsumeMetrics to implement the consumer.Metrics. + ConsumeMetrics(context.Context, pdata.Metrics) error + // ConsumeLogs to implement the consumer.Logs. + ConsumeLogs(context.Context, pdata.Logs) error + unexported() +} + +var _ consumer.Logs = (Consumer)(nil) +var _ consumer.Metrics = (Consumer)(nil) +var _ consumer.Traces = (Consumer)(nil) diff --git a/internal/otel_collector/consumer/consumertest/doc.go b/internal/otel_collector/consumer/consumertest/doc.go new file mode 100644 index 00000000000..78a0f4aaf4f --- /dev/null +++ b/internal/otel_collector/consumer/consumertest/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package consumertest defines types and functions used to help test packages +// implementing the consumer package interfaces. +package consumertest diff --git a/internal/otel_collector/consumer/consumertest/err.go b/internal/otel_collector/consumer/consumertest/err.go new file mode 100644 index 00000000000..21d5a2d5fb0 --- /dev/null +++ b/internal/otel_collector/consumer/consumertest/err.go @@ -0,0 +1,45 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumertest + +import ( + "context" + + "go.opentelemetry.io/collector/model/pdata" +) + +type errConsumer struct { + nonMutatingConsumer + err error +} + +func (er *errConsumer) unexported() {} + +func (er *errConsumer) ConsumeTraces(context.Context, pdata.Traces) error { + return er.err +} + +func (er *errConsumer) ConsumeMetrics(context.Context, pdata.Metrics) error { + return er.err +} + +func (er *errConsumer) ConsumeLogs(context.Context, pdata.Logs) error { + return er.err +} + +// NewErr returns a Consumer that just drops all received data and returns the specified error to Consume* callers. +func NewErr(err error) Consumer { + return &errConsumer{err: err} +} diff --git a/internal/otel_collector/consumer/consumertest/nop.go b/internal/otel_collector/consumer/consumertest/nop.go new file mode 100644 index 00000000000..106b1c090b3 --- /dev/null +++ b/internal/otel_collector/consumer/consumertest/nop.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumertest + +import ( + "context" + + "go.opentelemetry.io/collector/model/pdata" +) + +var ( + nopInstance = &nopConsumer{} +) + +type nopConsumer struct { + nonMutatingConsumer +} + +func (nc *nopConsumer) unexported() {} + +func (nc *nopConsumer) ConsumeTraces(context.Context, pdata.Traces) error { + return nil +} + +func (nc *nopConsumer) ConsumeMetrics(context.Context, pdata.Metrics) error { + return nil +} + +func (nc *nopConsumer) ConsumeLogs(context.Context, pdata.Logs) error { + return nil +} + +// NewNop returns a Consumer that just drops all received data and returns no error. +func NewNop() Consumer { + return nopInstance +} diff --git a/internal/otel_collector/consumer/consumertest/sink.go b/internal/otel_collector/consumer/consumertest/sink.go new file mode 100644 index 00000000000..172417d90f0 --- /dev/null +++ b/internal/otel_collector/consumer/consumertest/sink.go @@ -0,0 +1,167 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumertest + +import ( + "context" + "sync" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/model/pdata" +) + +// TracesSink is a consumer.Traces that acts like a sink that +// stores all traces and allows querying them for testing. +type TracesSink struct { + nonMutatingConsumer + mu sync.Mutex + traces []pdata.Traces + spanCount int +} + +var _ consumer.Traces = (*TracesSink)(nil) + +// ConsumeTraces stores traces to this sink. +func (ste *TracesSink) ConsumeTraces(_ context.Context, td pdata.Traces) error { + ste.mu.Lock() + defer ste.mu.Unlock() + + ste.traces = append(ste.traces, td) + ste.spanCount += td.SpanCount() + + return nil +} + +// AllTraces returns the traces stored by this sink since last Reset. +func (ste *TracesSink) AllTraces() []pdata.Traces { + ste.mu.Lock() + defer ste.mu.Unlock() + + copyTraces := make([]pdata.Traces, len(ste.traces)) + copy(copyTraces, ste.traces) + return copyTraces +} + +// SpanCount returns the number of spans sent to this sink. +func (ste *TracesSink) SpanCount() int { + ste.mu.Lock() + defer ste.mu.Unlock() + return ste.spanCount +} + +// Reset deletes any stored data. +func (ste *TracesSink) Reset() { + ste.mu.Lock() + defer ste.mu.Unlock() + + ste.traces = nil + ste.spanCount = 0 +} + +// MetricsSink is a consumer.Metrics that acts like a sink that +// stores all metrics and allows querying them for testing. +type MetricsSink struct { + nonMutatingConsumer + mu sync.Mutex + metrics []pdata.Metrics + dataPointCount int +} + +var _ consumer.Metrics = (*MetricsSink)(nil) + +// ConsumeMetrics stores metrics to this sink. +func (sme *MetricsSink) ConsumeMetrics(_ context.Context, md pdata.Metrics) error { + sme.mu.Lock() + defer sme.mu.Unlock() + + sme.metrics = append(sme.metrics, md) + sme.dataPointCount += md.DataPointCount() + + return nil +} + +// AllMetrics returns the metrics stored by this sink since last Reset. +func (sme *MetricsSink) AllMetrics() []pdata.Metrics { + sme.mu.Lock() + defer sme.mu.Unlock() + + copyMetrics := make([]pdata.Metrics, len(sme.metrics)) + copy(copyMetrics, sme.metrics) + return copyMetrics +} + +// DataPointCount returns the number of metrics stored by this sink since last Reset. +func (sme *MetricsSink) DataPointCount() int { + sme.mu.Lock() + defer sme.mu.Unlock() + return sme.dataPointCount +} + +// Reset deletes any stored data. +func (sme *MetricsSink) Reset() { + sme.mu.Lock() + defer sme.mu.Unlock() + + sme.metrics = nil + sme.dataPointCount = 0 +} + +// LogsSink is a consumer.Logs that acts like a sink that +// stores all logs and allows querying them for testing. +type LogsSink struct { + nonMutatingConsumer + mu sync.Mutex + logs []pdata.Logs + logRecordCount int +} + +var _ consumer.Logs = (*LogsSink)(nil) + +// ConsumeLogs stores logs to this sink. +func (sle *LogsSink) ConsumeLogs(_ context.Context, ld pdata.Logs) error { + sle.mu.Lock() + defer sle.mu.Unlock() + + sle.logs = append(sle.logs, ld) + sle.logRecordCount += ld.LogRecordCount() + + return nil +} + +// AllLogs returns the logs stored by this sink since last Reset. +func (sle *LogsSink) AllLogs() []pdata.Logs { + sle.mu.Lock() + defer sle.mu.Unlock() + + copyLogs := make([]pdata.Logs, len(sle.logs)) + copy(copyLogs, sle.logs) + return copyLogs +} + +// LogRecordCount returns the number of log records stored by this sink since last Reset. +func (sle *LogsSink) LogRecordCount() int { + sle.mu.Lock() + defer sle.mu.Unlock() + return sle.logRecordCount +} + +// Reset deletes any stored data. +func (sle *LogsSink) Reset() { + sle.mu.Lock() + defer sle.mu.Unlock() + + sle.logs = nil + sle.logRecordCount = 0 +} diff --git a/internal/otel_collector/consumer/doc.go b/internal/otel_collector/consumer/doc.go new file mode 100644 index 00000000000..4dea0ed3b99 --- /dev/null +++ b/internal/otel_collector/consumer/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package consumer contains interfaces that receive and process consumerdata. +package consumer diff --git a/internal/otel_collector/docs/design.md b/internal/otel_collector/docs/design.md new file mode 100644 index 00000000000..119249fd966 --- /dev/null +++ b/internal/otel_collector/docs/design.md @@ -0,0 +1,232 @@ +# OpenTelemetry Collector Architecture + +This document describes the architecture design and implementation of +OpenTelemetry Collector. + +## Summary + +OpenTelemetry Collector is an executable that allows to receive telemetry data, optionally transform it and send the data further. + +The Collector supports several popular open-source protocols for telemetry data receiving and sending as well as offering a pluggable architecture for adding more protocols. + +Data receiving, transformation and sending is done using Pipelines. The Collector can be configured to have one or more Pipelines. Each Pipeline includes a set of Receivers that receive the data, a series of optional Processors that get the data from receivers and transform it and a set of Exporters which get the data from the Processors and send it further outside the Collector. The same receiver can feed data to multiple Pipelines and multiple pipelines can feed data into the same Exporter. + +## Pipelines + +Pipeline defines a path the data follows in the Collector starting from reception, then further processing or modification and finally exiting the Collector via exporters. + +Pipelines can operate on 2 telemetry data types: traces and metrics. The data type is a property of the pipeline defined by its configuration. Receivers, exporters and processors used in a pipeline must support the particular data type otherwise `ErrDataTypeIsNotSupported` will be reported when the configuration is loaded. A pipeline can be depicted the following way: + +![Pipelines](images/design-pipelines.png) + +There can be one or more receivers in a pipeline. Data from all receivers is pushed to the first processor, which performs a processing on it and then pushes it to the next processor (or it may drop the data, e.g. if it is a “sampling” processor) and so on until the last processor in the pipeline pushes the data to the exporters. Each exporter gets a copy of each data element. The last processor uses a `FanOutConnector` to fan out the data to multiple exporters. + +The pipeline is constructed during Collector startup based on pipeline definition in the config file. + +A pipeline configuration typically looks like this: + +```yaml +service: + pipelines: # section that can contain multiple subsections, one per pipeline + traces: # type of the pipeline + receivers: [otlp, jaeger, zipkin] + processors: [memory_limiter, batch] + exporters: [otlp, jaeger, zipkin] +``` + +The above example defines a pipeline for “traces” type of telemetry data, with 3 receivers, 2 processors and 3 exporters. + +For details of config file format see [this document](https://docs.google.com/document/d/1NeheFG7DmcUYo_h2vLtNRlia9x5wOJMlV4QKEK05FhQ/edit#). + +### Receivers + +Receivers typically listen on a network port and receive telemetry data. Usually one receiver is configured to send received data to one pipeline, however it is also possible to configure the same receiver to send the same received data to multiple pipelines. This can be done by simply listing the same receiver in the “receivers” key of several pipelines: + +```yaml +receivers: + opencensus: + endpoint: "0.0.0.0:55678" + +service: + pipelines: + traces: # a pipeline of “traces” type + receivers: [opencensus] + processors: [memory_limiter, batch] + exporters: [jaeger] + traces/2: # another pipeline of “traces” type + receivers: [opencensus] + processors: [batch] + exporters: [opencensus] +``` + +In the above example “opencensus” receiver will send the same data to pipeline “traces” and to pipeline “traces/2”. (Note: the configuration uses composite key names in the form of `type[/name]` as defined in [this document](https://docs.google.com/document/d/1NeheFG7DmcUYo_h2vLtNRlia9x5wOJMlV4QKEK05FhQ/edit#)). + +When the Collector loads this config the result will look like this (part of processors and exporters are omitted from the diagram for brevity): + + +![Receivers](images/design-receivers.png) + +Important: when the same receiver is referenced in more than one pipeline the Collector will create only one receiver instance at runtime that will send the data to `FanOutConnector` which in turn will send the data to the first processor of each pipeline. The data propagation from receiver to `FanOutConnector` and then to processors is via synchronous function call. This means that if one processor blocks the call the other pipelines that are attached to this receiver will be blocked from receiving the same data and the receiver itself will stop processing and forwarding newly received data. + +### Exporters + +Exporters typically forward the data they get to a destination on a network (but they can also send it elsewhere, e.g “logging” exporter writes the telemetry data to a local file). + +The configuration allows to have multiple exporters of the same type, even in the same pipeline. For example one can have 2 “opencensus” exporters defined each one sending to a different opencensus endpoint, e.g.: + +```yaml +exporters: + opencensus/1: + endpoint: "example.com:14250" + opencensus/2: + endpoint: "0.0.0.0:14250" +``` + +Usually an exporter gets the data from one pipeline, however it is possible to configure multiple pipelines to send data to the same exporter, e.g.: + +```yaml +exporters: + jaeger: + protocols: + grpc: + endpoint: "0.0.0.0:14250" + +service: + pipelines: + traces: # a pipeline of “traces” type + receivers: [zipkin] + processors: [memory_limiter] + exporters: [jaeger] + traces/2: # another pipeline of “traces” type + receivers: [otlp] + processors: [batch] + exporters: [jaeger] +``` + +In the above example “jaeger” exporter will get data from pipeline “traces” and from pipeline “traces/2”. When the Collector loads this config the result will look like this (part of processors and receivers are omitted from the diagram for brevity): + + +![Exporters](images/design-exporters.png) + +### Processors + +A pipeline can contain sequentially connected processors. The first processor gets the data from one or more receivers that are configured for the pipeline, the last processor sends the data to one or more exporters that are configured for the pipeline. All processors between the first and last receive the data strictly only from one preceding processor and send data strictly only to the succeeding processor. + +Processors can transform the data before forwarding it (i.e. add or remove attributes from spans), they can drop the data simply by deciding not to forward it (this is for example how “sampling” processor works), they can also generate new data (this is how for example how a “persistent-queue” processor can work after Collector restarts by reading previously saved data from a local file and forwarding it on the pipeline). + +The same name of the processor can be referenced in the “processors” key of multiple pipelines. In this case the same configuration will be used for each of these processors however each pipeline will always gets its own instance of the processor. Each of these processors will have its own state, the processors are never shared between pipelines. For example if “batch” processor is used in several pipelines each pipeline will have its own batch processor (although the batch processor will be configured exactly the same way if the reference the same key in the config file). As an example, given the following config: + +```yaml +processors: + batch: + send_batch_size: 10000 + timeout: 10s + +service: + pipelines: + traces: # a pipeline of “traces” type + receivers: [zipkin] + processors: [batch] + exporters: [jaeger] + traces/2: # another pipeline of “traces” type + receivers: [otlp] + processors: [batch] + exporters: [otlp] +``` + +When the Collector loads this config the result will look like this: + + +![Processors](images/design-processors.png) + +Note that each “batch” processor is an independent instance, although both are configured the same way, i.e. each have a send_batch_size of 10000. + +## Running as an Agent + +On a typical VM/container, there are user applications running in some +processes/pods with OpenTelemetry Library (Library). Previously, Library did +all the recording, collecting, sampling and aggregation on spans/stats/metrics, +and exported them to other persistent storage backends via the Library +exporters, or displayed them on local zpages. This pattern has several +drawbacks, for example: + +1. For each OpenTelemetry Library, exporters/zpages need to be re-implemented + in native languages. +2. In some programming languages (e.g Ruby, PHP), it is difficult to do the + stats aggregation in process. +3. To enable exporting OpenTelemetry spans/stats/metrics, application users + need to manually add library exporters and redeploy their binaries. This is + especially difficult when there’s already an incident and users want to use + OpenTelemetry to investigate what’s going on right away. +4. Application users need to take the responsibility in configuring and + initializing exporters. This is error-prone (e.g they may not set up the + correct credentials\monitored resources), and users may be reluctant to + “pollute” their code with OpenTelemetry. + +To resolve the issues above, you can run OpenTelemetry Collector as an Agent. +The Agent runs as a daemon in the VM/container and can be deployed independent +of Library. Once Agent is deployed and running, it should be able to retrieve +spans/stats/metrics from Library, export them to other backends. We MAY also +give Agent the ability to push configurations (e.g sampling probability) to +Library. For those languages that cannot do stats aggregation in process, they +should also be able to send raw measurements and have Agent do the aggregation. + + +![agent-architecture](images/design-collector-agent.png) + +For developers/maintainers of other libraries: Agent can also +accept spans/stats/metrics from other tracing/monitoring libraries, such as +Zipkin, Prometheus, etc. This is done by adding specific receivers. See +[Receivers](#receivers) for details. + +## Running as a Standalone Collector + +The OpenTelemetry Collector can run as a Standalone instance and receives spans +and metrics exported by one or more Agents or Libraries, or by +tasks/agents that emit in one of the supported protocols. The Collector is +configured to send data to the configured exporter(s). The following figure +summarizes the deployment architecture: + + +![OpenTelemetry Collector Architecture](images/design-collector-service.png "OpenTelemetry Collector Architecture") + +The OpenTelemetry Collector can also be deployed in other configurations, such +as receiving data from other agents or clients in one of the formats supported +by its receivers. + + +### OpenCensus Protocol + +TODO: move this section somewhere else since this document is intended to describe non-protocol specific functionality. + +OpenCensus Protocol uses a bi-directional gRPC +stream. Sender should initiate the connection, since there’s only one +dedicated port for Agent, while there could be multiple instrumented processes. By default, the Collector is available on port 55678. + +#### Protocol Workflow + +1. Sender will try to directly establish connections for Config and Export + streams. +2. As the first message in each stream, Sender must send its identifier. Each + identifier should uniquely identify Sender within the VM/container. If + there is no identifier in the first message, Collector should drop the whole + message and return an error to the client. In addition, the first message + MAY contain additional data (such as `Span`s). As long as it has a valid + identifier associated, Collector should handle the data properly, as if they + were sent in a subsequent message. Identifier is no longer needed once the + streams are established. +3. On Sender side, if connection to Collector failed, Sender should retry + indefinitely if possible, subject to available/configured memory buffer size. + (Reason: consider environments where the running applications are already + instrumented with OpenTelemetry Library but Collector is not deployed yet. + Sometime in the future, we can simply roll out the Collector to those + environments and Library would automatically connect to Collector with + indefinite retries. Zero changes are required to the applications.) + Depending on the language and implementation, retry can be done in either + background or a daemon thread. Retry should be performed at a fixed + frequency (rather than exponential backoff) to have a deterministic expected + connect time. +4. On Collector side, if an established stream were disconnected, the identifier of + the corresponding Sender would be considered expired. Sender needs to + start a new connection with a unique identifier (MAY be different than the + previous one). diff --git a/internal/otel_collector/docs/ga-roadmap.md b/internal/otel_collector/docs/ga-roadmap.md new file mode 100644 index 00000000000..5d2b3644125 --- /dev/null +++ b/internal/otel_collector/docs/ga-roadmap.md @@ -0,0 +1,172 @@ +# Collector GA Roadmap + +This document defines the roadmap followed by the OpenTelemetry Collector, +along with tentative dates and requirements for GA (stability). + +In this document, the term “OpenTelemetry Collector packages" refers to all the golang +modules and packages that are part of the “OpenTelemetry Collector” ecosystem which +include the [core](https://github.com/open-telemetry/opentelemetry-collector) and +[contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib). + +In this document, the terms "OpenTelemetry Collector" and "Collector" both specifically +refer to the entire OpenTelemetry Collector ecosystem’s including core and contrib. +These terms do not refer to the specification or the Client libraries in this document. + +## Current Status + +The OpenTelemetry Collector ecosystem right now has a lot of packages that are in different +stages of stability (experimental, alpha, beta, etc.). All these packages have different +public APIs/Interfaces (e.g. code API, configuration, etc.). + +A significant amount of legacy code was inherited from the Collector's ancestor +[OpenCensus Service](https://github.com/census-instrumentation/opencensus-service), since then +the Collector changed the internal data model and other significant changes were made. + +Trying to mark the entire ecosystem GA, at the same moment, will be a significant effort and +will take a significant amount of time. + +## Proposal + +This document proposes a GA Roadmap based on multiple phases, where different parts of the +collector will be released as stable at different moments of time. + +At this moment we are completely defining only the first two phases of the process, and the +next phases will be defined at a later stage once the Collector maintainers will have +better understanding of the process and implications. + +The primary focus is on the tracing parts. When other signal's data models (proto definition) +will be marked as stable, the amount of work necessary to stabilize their APIs will be minimal: +`pdata` is auto-generated so all changes that we do for trace will apply to all of them, +`consumer` is minimal interface, `component` as well. + +Metrics components such as (`receiver/prometheus`, `exporter/prometheusremotewrite`) are +explicitly left out of this roadmap document because metrics data model is not complete. +When that work finishes, we can add them to the Phase 3, or later. + +### Phase 1 + +**Tentative Date:** 2021-03-31 + +**Key Results:** At the end of this phase the Collector’s core API will be marked as Stable. + +At the end of this phase we want to achieve core APIs stability. This will allow developers +to implement custom components and extend the collector will be marked as stable. +The complete list of the packages/modules will be finalized during the first action item of +this phase, but the tentative list is: + +* `consumer` + * Official internal data model `pdata`. + * Interfaces and utils to build a Consumer for (trace, metrics, logs). +* `config` + * Core `config` including service definition, component definition will be stabilized. + * To be determined which config helpers will be marked as stable (e.g. configgrpc, etc.). +* `component` + * Interfaces and utils to build a Collector component (receiver, processor, exporter, extension). +* `obsreport` + * Focus on the public API of this package. It is out of scope to ensure stability for the + metrics emitted (focus in phase 2). +* `service` + * Public API to construct a OpenTelemetry Collector Service. + +**Action Items:** + +* Create a new milestone for this phase, create issues for all the other action items and add +them to the milestone. +* Agreement on all packages/modules that will be marked as stable during this phase. +* Write a version doc as per [version and stability document](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/versioning-and-stability.md). + * Previously it was discussed that for the Collector it is fine to release stable golang modules + that contain APIs marked as experimental. + * Define status schema (experimental/stable), what are they applicable to every module. +* Investigate if splitting into smaller, more granular, modules is possible. + * Define the modules schema, try to not break everyone. + See [here](https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository). + * Investigate how can we release multiple golang modules from the same repo, without asking + people that consume them to use a replace statement. See problem in contrib. + * Investigate how to release test-only utils. +* Review all public APIs and godocs for modules that we want to release in this phase. + * Fix all critical issues, and remove unnecessary (“When in doubt leave it out”) public APIs. + * Remove all already deprecated code from the stable modules. +* Transition to opentelemetry-go trace library from opencensus? +* Investigate if any config helper needs to be released as stable, if any do the review of +the public API, godoc and configuration. +* Investigate tools that check for API compatibility for go modules, enable them for modules +that we mark as stable. + +### Phase 2 + +**Tentative Date:** 2021-04-31 + +**Key Results:** At the end of this phase the Collector’s end-to-end support for OTLP traces +only be marked as GA. + +At the end of this phase we want to ensure that the Collector can be run in production, it can receive +OTLP trace traffic and emit OTLP trace traffic. The complete list of the packages/modules will be +finalized during the first part of this phase, but the tentative list is: + +* `receiver` + * `receiverhelper` - without scraper utils in this phase. + * `otlp` +* `processor` + * `processorhelper` + * `batch` + * `memory_limiter` +* `exporter` + * `exporterhelper` + * `otlp` + * `otlphttp` +* `extension` + * `extensionhelper` + * `healthcheck` +* `obsreport` + * Stabilize the observability metrics (user public metrics). + +**Action Items:** + +* Create a new milestone for this phase, create issues for all the other action items and add them +to the milestone. +* Agreement on all packages/modules that will be marked as stable during this phase. +* Review all public APIs and godocs for modules that we want to release in this phase. + * Fix all critical issues, and remove unnecessary (“When in doubt leave it out”) public APIs. + * Remove all already deprecated code from the stable modules. +* Review all public configuration for all the modules, fix issues. +* Setup a proper loadtest environment and continuously publish results. +* Ensure correctness tests produce the expected results, improve until confident that a binary +that passes them is good to be shipped. +* Enable security checks on every PR (currently some are ignored like `codeql`). + +### Phase 3 + +Tentative Date: 2021-05-31 +**Key Results:** At the end of this phase all Collector’s core components (receivers, +processors, exporters, extensions) for traces only will be marked as GA. + +At the end of this phase we want to ensure that the Collector can be run in production, it can receive the +trace traffic and emit OTLP trace traffic. The complete list of the packages/modules will be finalized +during the first part of this phase, but the tentative list is: + +* `receiver` + * `jaeger` + * `opencensus` + * `zipkin` +* `processor` + * `spantransformer` - there are good reasons to merge `attributes` and `span`. + * `resource` + * `filter` - we will consider offering a filter processor for all telemetry signals not just for metrics +* `exporter` + * `jaeger` + * `opencensus` + * `zipkin` +* `extension` + * `pprof` + * `zpages` + +TODO: Add action items list. + +### Phase N + +TODO: Add more phases if/when necessary. + +## Alternatives + +One alternative proposal is to try to GA all packages at the same time. This proposal was rejected +because of the complexity and size of the ecosystem that may force the GA process to take too much time. diff --git a/internal/otel_collector/docs/images/design-collector-agent.png b/internal/otel_collector/docs/images/design-collector-agent.png new file mode 100644 index 00000000000..7f50eb66fa6 Binary files /dev/null and b/internal/otel_collector/docs/images/design-collector-agent.png differ diff --git a/internal/otel_collector/docs/images/design-collector-service.png b/internal/otel_collector/docs/images/design-collector-service.png new file mode 100644 index 00000000000..83f75e950f8 Binary files /dev/null and b/internal/otel_collector/docs/images/design-collector-service.png differ diff --git a/internal/otel_collector/docs/images/design-exporters.png b/internal/otel_collector/docs/images/design-exporters.png new file mode 100644 index 00000000000..d19d41d36d3 Binary files /dev/null and b/internal/otel_collector/docs/images/design-exporters.png differ diff --git a/internal/otel_collector/docs/images/design-pipelines.png b/internal/otel_collector/docs/images/design-pipelines.png new file mode 100644 index 00000000000..1b58d7fc0d9 Binary files /dev/null and b/internal/otel_collector/docs/images/design-pipelines.png differ diff --git a/internal/otel_collector/docs/images/design-processors.png b/internal/otel_collector/docs/images/design-processors.png new file mode 100644 index 00000000000..8026ada37b6 Binary files /dev/null and b/internal/otel_collector/docs/images/design-processors.png differ diff --git a/internal/otel_collector/docs/images/design-receivers.png b/internal/otel_collector/docs/images/design-receivers.png new file mode 100644 index 00000000000..ca1f647bb69 Binary files /dev/null and b/internal/otel_collector/docs/images/design-receivers.png differ diff --git a/internal/otel_collector/docs/images/design-service-lifecycle.png b/internal/otel_collector/docs/images/design-service-lifecycle.png new file mode 100644 index 00000000000..ad55af383f0 Binary files /dev/null and b/internal/otel_collector/docs/images/design-service-lifecycle.png differ diff --git a/internal/otel_collector/docs/images/opentelemetry-service-deployment-models.png b/internal/otel_collector/docs/images/opentelemetry-service-deployment-models.png new file mode 100644 index 00000000000..b977c0de015 Binary files /dev/null and b/internal/otel_collector/docs/images/opentelemetry-service-deployment-models.png differ diff --git a/internal/otel_collector/docs/images/zpages-example.png b/internal/otel_collector/docs/images/zpages-example.png new file mode 100644 index 00000000000..168004dd6fc Binary files /dev/null and b/internal/otel_collector/docs/images/zpages-example.png differ diff --git a/internal/otel_collector/docs/metric-metadata.md b/internal/otel_collector/docs/metric-metadata.md new file mode 100644 index 00000000000..03b5d6dee20 --- /dev/null +++ b/internal/otel_collector/docs/metric-metadata.md @@ -0,0 +1,19 @@ +# Metric Receiver Metadata + +Receivers can contain a `metadata.yaml` file that documents the metrics that may be emitted by the receiver. + +Current examples: + +* [hostmetricsreceiver](../receiver/hostmetricsreceiver/metadata.yaml) + +See [metric-metadata.yaml](metric-metadata.yaml) for file format documentation. + +If adding a new receiver a `codegen.go` file should also be added to trigger the generation. See below for details. + +## Build + +When `go generate` is run (it is run automatically in the make build targets) there are a few special build directives in `codegen.go` files: + +`make install-tools` results in `cmd/mdatagen` being installed to `GOBIN` + +[/receiver/hostmetricsreceiver/codegen.go](../receiver/hostmetricsreceiver/codegen.go) Runs `mdatagen` for the `hostmetricsreceiver` metadata.yaml which generates [/receiver/hostmetricsreceiver/internal/metadata](../receiver/hostmetricsreceiver/internal/metadata) package which has Go files containing metric and label metadata. diff --git a/internal/otel_collector/docs/metric-metadata.yaml b/internal/otel_collector/docs/metric-metadata.yaml new file mode 100644 index 00000000000..4167b5df086 --- /dev/null +++ b/internal/otel_collector/docs/metric-metadata.yaml @@ -0,0 +1,33 @@ +# Required: name of the receiver. +name: + +# Optional: map of label definitions with the key being the label name and value +# being described below. +labels: + label.name: + # Optional: if the label name as described by the key is not the actual label + # value to be reported that value can be overridden here. + value: + # Required: description of the label. + description: + # Optional: array of label values if they are static values. + enum: + +# Required: map of metric names with the key being the metric name and value +# being described below. +metrics: + metric.name: + # Required: metric description + description: + # Required: metric type as defined by https://ucum.org/ucum.html + unit: + # Required + data: + # Required: one of int gauge, int sum, gauge, sum, or histogram. + type: + # Required for int sum and sum. + monotonic: # true | false + # Required for int sum, sum, and histogram. + aggregation: # delta | cumulative + # Optional: array of labels that were defined in the labels section that are emitted by this metric. + labels: diff --git a/internal/otel_collector/docs/migrating-from-opencensus.md b/internal/otel_collector/docs/migrating-from-opencensus.md new file mode 100644 index 00000000000..f8a3a952e44 --- /dev/null +++ b/internal/otel_collector/docs/migrating-from-opencensus.md @@ -0,0 +1,47 @@ +## Action Plan for Bootstraping from OpenCensus + +### Goals +We need to bootstrap the OpenTelemetry Collector using the existing OpenCensus Service codebase. We agreed to split the Service codebase into 2 parts: core and contrib. This bootstrapping is a good opportunity to do the splitting by only including in the OpenTelemetry Collector core the minimum number of receivers and exporters and moving the rest of functionality to a contrib package (most vendor-specific code). + +The contrib package and vendor-specific receivers and exporters will continue to be available and there is no intent to retire it. The intent is to have a clear decoupling in the codebase that facilitates independent contribution of new components in the future, allows to easily create customized versions of a Service and makes it clear that core contributors will be responsible for maintenance of the core while vendor-specific components will be maintained by corresponding vendors (note: this does not exclude dual participation at all - some developers will likely work for vendors and will also be core maintainers). + +# Migration Tasks + +This is the action plan that also shows the progress. Tick the boxes after the task is complete. + +[X] Copy all commits from https://github.com/census-instrumentation/opencensus-service to https://github.com/open-telemetry/opentelemetry-service +Make sure commit history is preserved. + +[X] Remove receivers and exporters that are not part of core. We will keep the following in the core: + +- Prometheus +- Jaeger (agent and collector ones) +- Zipkin +- OpenCensus, temporarily until OpenTelemetry one is available (we may want to keep OC for longer to facilitate migrations) + +[ ] Cleanly decouple `core` from `cmd` in the repository. `core` will contain all business logic. `cmd` will be just a main.go that executes the business logic and compiles to `otsvc` executable. + +`otsvc` will will only include receivers and exporters which we consider to be part of the core. + +The new codebase will contain improvements which are already in progress and which are aimed at making the codebase extensible and enable the splitting to core and contrib. This includes 3 initiatives: + +- Decoupling of receiver and exporter implementations from the core logic. + +- Introduction of receiver and exporter factories that can be individually registered to activate them. + +- Implementation of the [new configuration format](https://docs.google.com/document/d/1NeheFG7DmcUYo_h2vLtNRlia9x5wOJMlV4QKEK05FhQ/edit#) that makes use of factories and allows for greater flexibility in the configuration. + +The functionally of the new `otsvc` will heavily lean on existing implementation and will be mostly a superset of the current agent/collector functionality when considering core receivers and exporters only (however we will allow deviations if it saves significant implementation effort and makes the service better). + +[ ] Provide guidelines and example implementations for vendors to follow when they add new receivers and exporters to the contrib package. + +[ ] Create a new repository for contrib and copy all commits from https://github.com/census-instrumentation/opencensus-service to https://github.com/open-telemetry/opentelemetry-service +Make sure commit history is preserved. + +[ ] Cleanup the `contrib` repo to only contain additional vendor specific receivers and exporters. + +(Note: alternatively `contrib` can be a directory in the main repo - this is still open for discussion). + +[ ] Provide OpenCensus-to-OpenTelemetry Collector migration guidelines for end-users who want to migrate. This will include recommendations on configuration file migration. We will also consider the possibility to support old configuration format in the new binary. + +This approach allows us to have significant progress towards 2 stated goals in our [vision document](./vision.md): unify the codebase for agent and collector and make the service more extensible. diff --git a/internal/otel_collector/docs/monitoring.md b/internal/otel_collector/docs/monitoring.md new file mode 100644 index 00000000000..8605835b7cd --- /dev/null +++ b/internal/otel_collector/docs/monitoring.md @@ -0,0 +1,70 @@ +# Monitoring + +Many metrics are provided by the Collector for its monitoring. Below some +key recommendations for alerting and monitoring are listed. All metrics +referenced below are using the `--new-metrics` option that is enabled by +default. + +## Critical Monitoring + +### Data Loss + +Use rate of `otelcol_processor_dropped_spans > 0` and +`otelcol_processor_dropped_metric_points > 0` to detect data loss, depending on +the requirements set up a minimal time window before alerting, avoiding +notifications for small losses that are not considered outages or within the +desired reliability level. + +### Low on CPU Resources + +This depends on the CPU metrics available on the deployment, eg.: +`kube_pod_container_resource_limits_cpu_cores` for Kubernetes. Let's call it +`available_cores` below. The idea here is to have an upper bound of the number +of available cores, and the maximum expected ingestion rate considered safe, +let's call it `safe_rate`, per core. This should trigger increase of resources/ +instances (or raise an alert as appropriate) whenever +`(actual_rate/available_cores) < safe_rate`. + +The `safe_rate` depends on the specific configuration being used. +// TODO: Provide reference `safe_rate` for a few selected configurations. + +## Secondary Monitoring + +### Queue Length + +Most exporters offer a [queue/retry mechanism](../exporter/exporterhelper/README.md) +that is recommended as the retry mechanism for the Collector and as such should +be used in any production deployment. + +**TODO:** Add metric to monitor queue length. + +Currently, the queue/retry mechanism only supports logging for monitoring. Check +the logs for messages like `"Dropping data because sending_queue is full"`. + +### Receive Failures + +Sustained rates of `otelcol_receiver_refused_spans` and +`otelcol_receiver_refused_metric_points` indicate too many errors returned to +clients. Depending on the deployment and the client’s resilience this may +indicate data loss at the clients. + +Sustained rates of `otelcol_exporter_send_failed_spans` and +`otelcol_exporter_send_failed_metric_points` indicate that the Collector is not +able to export data as expected. +It doesn't imply data loss per se since there could be retries but a high rate +of failures could indicate issues with the network or backend receiving the +data. + +## Data Flow + +### Data Ingress + +The `otelcol_receiver_accepted_spans` and +`otelcol_receiver_accepted_metric_points` metrics provide information about +the data ingested by the Collector. + +### Data Egress + +The `otecol_exporter_sent_spans` and +`otelcol_exporter_sent_metric_points`metrics provide information about +the data exported by the Collector. diff --git a/internal/otel_collector/docs/observability.md b/internal/otel_collector/docs/observability.md new file mode 100644 index 00000000000..bf33f082e5f --- /dev/null +++ b/internal/otel_collector/docs/observability.md @@ -0,0 +1,91 @@ +# OpenTelemetry Collector Observability + +## Goal + +The goal of this document is to have a comprehensive description of observability of the Collector and changes needed to achieve observability part of our [vision](vision.md). + +## What Needs Observation + +The following elements of the Collector need to be observable. + +### Current Values + +- Resource consumption: CPU, RAM (in the future also IO - if we implement persistent queues) and any other metrics that may be available to Go apps (e.g. garbage size, etc). + +- Receiving data rate, broken down by receivers and by data type (traces/metrics). + +- Exporting data rate, broken down by exporters and by data type (traces/metrics). + +- Data drop rate due to throttling, broken down by data type. + +- Data drop rate due to invalid data received, broken down by data type. + +- Current throttling state: Not Throttled/Throttled by Downstream/Internally Saturated. + +- Incoming connection count, broken down by receiver. + +- Incoming connection rate (new connections per second), broken down by receiver. + +- In-memory queue size (in bytes and in units). Note: measurements in bytes may be difficult / expensive to obtain and should be used cautiously. + +- Persistent queue size (when supported). + +- End-to-end latency (from receiver input to exporter output). Note that with multiple receivers/exporters we potentially have NxM data paths, each with different latency (plus different pipelines in the future), so realistically we should likely expose the average of all data paths (perhaps broken down by pipeline). + +- Latency broken down by pipeline elements (including exporter network roundtrip latency for request/response protocols). + +“Rate” values must reflect the average rate of the last 10 seconds. Rates must exposed in bytes/sec and units/sec (e.g. spans/sec). + +Note: some of the current values and rates may be calculated as derivatives of cumulative values in the backend, so it is an open question if we want to expose them separately or no. + +### Cumulative Values + +- Total received data, broken down by receivers and by data type (traces/metrics). + +- Total exported data, broken down by exporters and by data type (traces/metrics). + +- Total dropped data due to throttling, broken down by data type. + +- Total dropped data due to invalid data received, broken down by data type. + +- Total incoming connection count, broken down by receiver. + +- Uptime since start. + +### Trace or Log on Events + +We want to generate the following events (log and/or send as a trace with additional data): + +- Collector started/stopped. + +- Collector reconfigured (if we support on-the-fly reconfiguration). + +- Begin dropping due to throttling (include throttling reason, e.g. local saturation, downstream saturation, downstream unavailable, etc). + +- Stop dropping due to throttling. + +- Begin dropping due to invalid data (include sample/first invalid data). + +- Stop dropping due to invalid data. + +- Crash detected (differentiate clean stopping and crash, possibly include crash data if available). + +For begin/stop events we need to define an appropriate hysteresis to avoid generating too many events. Note that begin/stop events cannot be detected in the backend simply as derivatives of current rates, the events include additional data that is not present in the current value. + +### Host Metrics + +The service should collect host resource metrics in addition to service's own process metrics. This may help to understand that the problem that we observe in the service is induced by a different process on the same host. + +## How We Expose Metrics/Traces + +Collector configuration must allow specifying the target for own metrics/traces (which can be different from the target of collected data). The metrics and traces must be clearly tagged to indicate that they are service’s own metrics (to avoid conflating with collected data in the backend). + +### Impact + +We need to be able to assess the impact of these observability improvements on the core performance of the Collector. + +### Configurable Level of Observability + +Some of the metrics/traces can be high volume and may not be desirable to always observe. We should consider adding an observability verboseness “level” that allows configuring the Collector to send more or less observability data (or even finer granularity to allow turning on/off specific metrics). + +The default level of observability must be defined in a way that has insignificant performance impact on the service. diff --git a/internal/otel_collector/docs/performance.md b/internal/otel_collector/docs/performance.md new file mode 100644 index 00000000000..98901196cd0 --- /dev/null +++ b/internal/otel_collector/docs/performance.md @@ -0,0 +1,72 @@ +# OpenTelemetry Collector Performance + +The performance numbers that follow were generated using version 0.1.3 of the +OpenTelemetry Collector, are applicable primarily to the OpenTelemetry Collector and +are measured only for traces. In the future, more configurations will be tested. + +Note with the OpenTelemetry Agent you can expect as good if not better performance +with lower resource utilization. This is because the OpenTelemetry Agent does not +today support features such as batching or retries and will not support +tail_sampling. + +It is important to note that the performance of the OpenTelemetry Collector depends +on a variety of factors including: + +* The receiving format: OpenTelemetry (55678), Jaeger thrift (14268) or Zipkin v2 JSON (9411) +* The size of the spans (tests are based on number of attributes): 20 +* Whether tail_sampling is enabled or not +* CPU / Memory allocation +* Operating System: Linux + +## Testing + +Testing was completed on Linux using the [Synthetic Load Generator +utility](https://github.com/Omnition/synthetic-load-generator) running for a +minimum of one hour (i.e. sustained rate). You can be reproduce these results in +your own environment using the parameters described in this document. It is +important to note that this utility has a few configurable parameters which can +impact the results of the tests. The parameters used are defined below. + +* FlushInterval(ms) [default: 1000] +* MaxQueueSize [default: 100] +* SubmissionRate(spans/sec): 100,000 + +## Results without tail-based sampling + +| Span
Format | CPU
(2+ GHz) | RAM
(GB) | Sustained
Rate | Recommended
Maximum | +| :---: | :---: | :---: | :---: | :---: | +| OpenTelemetry | 1 | 2 | ~12K | 10K | +| OpenTelemetry | 2 | 4 | ~24K | 20K | +| Jaeger Thrift | 1 | 2 | ~14K | 12K | +| Jaeger Thrift | 2 | 4 | ~27.5K | 24K | +| Zipkin v2 JSON | 1 | 2 | ~10.5K | 9K | +| Zipkin v2 JSON | 2 | 4 | ~22K | 18K | + +If you are NOT using tail-based sampling and you need higher rates then you can +either: + +* Divide traffic to different collector (e.g. by region) +* Scale-up by adding more resources (CPU/RAM) +* Scale-out by putting one or more collectors behind a load balancer or k8s +service + +## Results with tail-based sampling + +> Note: Additional memory is required for tail-based sampling + +| Span
Format | CPU
(2+ GHz) | RAM
(GB) | Sustained
Rate | Recommended
Maximum | +| :---: | :---: | :---: | :---: | :---: | +| OpenTelemetry | 1 | 2 | ~9K | 8K | +| OpenTelemetry | 2 | 4 | ~18K | 16K | +| Jaeger Thrift | 1 | 6 | ~11.5K | 10K | +| Jaeger Thrift | 2 | 8 | ~23K | 20K | +| Zipkin v2 JSON | 1 | 6 | ~8.5K | 7K | +| Zipkin v2 JSON | 2 | 8 | ~16K | 14K | + +If you are using tail-based sampling and you need higher rates then you can +either: + +* Scale-up by adding more resources (CPU/RAM) +* Scale-out by putting one or more collectors behind a load balancer or k8s +service, but the load balancer must support traceID-based routing (i.e. all +spans for a given traceID need to be received by the same collector instance) diff --git a/internal/otel_collector/docs/release.md b/internal/otel_collector/docs/release.md new file mode 100644 index 00000000000..6e1ce3e4995 --- /dev/null +++ b/internal/otel_collector/docs/release.md @@ -0,0 +1,39 @@ +# OpenTelemetry Collector Release Procedure + +Collector build and testing is currently fully automated. However there are still certain operations that need to be performed manually in order to make a release. + +We release both core and contrib collectors with the same versions where the contrib release uses the core release as a dependency. We’ve divided this process into two sections. A release engineer must first release the Core collector and then the Contrib collector. + +Important: Note that you’ll need to be able to sign git commits/tags in order to be able to release a collector version. Follow [this guide](https://docs.github.com/en/github/authenticating-to-github/signing-commits) to setup it up. + +Note: You’ll need to be an approver for both the repos in order to be able to make the release. This is required as you’ll need to push tags and commits directly to the upstream repo. + +## Releasing OpenTelemetry Core + +1. Update Contrib to use the latest in development version of Core. Run `make update-otel` in Contrib root directory and if it results in any changes, submit a PR. Get the PR approved and merged. This is to ensure that the latest core does not break contrib in any way. We’ll update it once more to the final release number later. Make sure contrib builds and end-to-end tests pass successfully after being merged and -dev docker images are published. + +1. Determine the version number that will be assigned to the release. Collector uses semver, with the exception that while we are still in Beta stage breaking changes are allowed without incrementing major version number. For breaking changes we increment the minor version number and set the patch number to 0. + +1. Prepare Core for release. Update CHANGELOG.md file and rename the Unreleased section to the new release name. Add a new unreleased section at top. + + Use commit history feature to get the list of commits since the last release to help understand what should be in the release notes, e.g.: https://github.com/open-telemetry/opentelemetry-collector-contrib/compare/${last_release}...main. Submit a PR with the changes and get the PR approved and merged. + +1. Make sure the current main branch build successfully passes (Core and Contrib). For Contrib also check that the spawn-stability-tests-job triggered by the main build-publish job also passes. Check that the corresponding "-dev" images exist in Dockerhub (Core and Contrib). + +1. Create a branch named release/ (e.g. release/v0.4.x) in Core from the changelog update commit and push to origin (not your fork). Wait for the release branch builds to pass successfully. + +1. Tag all the modules with the new release version by running the `make add-tag` command (e.g. `make add-tag TAG=v0.4.0`). Push them to origin (not your fork) with `git push --tags origin` (assuming origin refers to upstream open-telemetry project). Wait for the new tag build to pass successfully. This build will push new docker images to https://hub.docker.com/repository/docker/otel/opentelemetry-collector, create a Github release for the tag and push all the build artifacts to the Github release. + +1. Edit the newly auto-created Github release and copy release notes from the CHANGELOG.md file to the release. This step should be automated. CI can pull the release notes from the change log and use it as the body when creating the new release. + +## Releasing OpenTelemetry Contrib + +1. Prepare Contrib for release. Update CHANGELOG.md file and rename the Unreleased section to the new release name. Add a new unreleased section at top. Refer to Core release notes (assuming the previous release of Core and Contrib was also performed simultaneously), and in addition to that list changes that happened in the Contrib repo. + +1. Update the Core dependency to the Core version we just released with `make update-otel` command, e.g, `make update-otel OTEL_VERSION=v0.4.0`. Create a PR with both the changes, get it approved and merged. + +1. Create a branch named release/ (e.g. release/v0.4.x) in Core from the changelog update commit and push to origin (not your fork). Wait for the release branch builds to pass successfully. + +1. Tag all the modules with the new release version by running the `make add-tag` command (e.g. `make add-tag TAG=v0.4.0`). Push them to origin (not your fork) with `git push --tags origin` (assuming origin refers to upstream open-telemetry project). Wait for the new tag build to pass successfully. This build will push new docker images to https://hub.docker.com/repository/docker/otel/opentelemetry-collector-contrib, create a Github release for the tag and push all the build artifacts to the Github release. + +1. Edit the newly auto-created Github release and copy release notes from the CHANGELOG.md file to the release. This step should be automated. CI can pull the release notes from the change log and use it as the body when creating the new release. diff --git a/internal/otel_collector/docs/roadmap.md b/internal/otel_collector/docs/roadmap.md new file mode 100644 index 00000000000..7583ca36943 --- /dev/null +++ b/internal/otel_collector/docs/roadmap.md @@ -0,0 +1,27 @@ +# Long-term Roadmap + +This long-term roadmap (draft) is a vision document that reflects our +current desires. It is not a commitment to implement everything listed in this roadmap. +The primary purpose of this document is to ensure that all contributors work in alignment. +As our vision changes over time, maintainers reserve the right to add, modify, and _remove_ +items from this roadmap. + +Description|Status|Links| +-----------|------|-----| +**Testing**| +Metrics correctness tests|In progress|[#652](https://github.com/open-telemetry/opentelemetry-collector/issues/652) +| | +**New Formats**| +Complete OTLP/HTTP support| |[#882](https://github.com/open-telemetry/opentelemetry-collector/issues/882) +Add logs support for all primary core processors (attributes, batch, k8s_tagger, etc)|In progress| +| | +**5 Min to Value**| +Distribution packages for most common targets (e.g. Docker, RPM, Windows, etc)| +Detection and collection of environment metrics and tags on AWS|| +Detection and collection of k8s telemetry|In progress| +Host metric collection|In progress| +Support more application-specific metric collection (e.g. Kafka, Hadoop, etc) +| | +**Other Features**| +Graceful shutdown (pipeline draining)| |[#483](https://github.com/open-telemetry/opentelemetry-collector/issues/483) +Deprecate queue retry processor and enable queuing per exporter by default||[#1721](https://github.com/open-telemetry/opentelemetry-collector/issues/1721) diff --git a/internal/otel_collector/docs/security.md b/internal/otel_collector/docs/security.md new file mode 100644 index 00000000000..f2014b84453 --- /dev/null +++ b/internal/otel_collector/docs/security.md @@ -0,0 +1,198 @@ +# Security + +The OpenTelemetry Collector defaults to operating in a secure manner, but is +configuration driven. This document captures important security aspects and +considerations for the Collector. This document is intended for both end-users +and component developers. It assumes at least a basic understanding of the +Collector architecture and functionality. + +> Note: Please review the [configuration +> documentation](https://opentelemetry.io/docs/collector/configuration/) +> prior to this security document. + +## TL;DR + +### End-users + +- Configuration + - SHOULD only enable the minimum required components + - SHOULD ensure sensitive configuration information is stored securely +- Permissions + - SHOULD not run Collector as root/admin user + - MAY require privileged access for some components +- Receivers/Exporters + - SHOULD use encryption and authentication + - MAY pose a security risk if configuration parameters are modified improperly +- Processors + - SHOULD configure obfuscation/scrubbing of sensitive metadata + - SHOULD configure recommended processors +- Extensions + - SHOULD NOT expose sensitive health or telemetry data + +> For more information about securing the OpenTelemetry Collector, see +> [this](https://medium.com/opentelemetry/securing-your-opentelemetry-collector-1a4f9fa5bd6f) +> blog post. + +### Component Developers + +- Configuration + - MUST come from the central configuration file + - SHOULD use configuration helpers +- Permissions + - SHOULD minimize privileged access + - MUST document what required privileged access and why +- Receivers/Exporters + - MUST default to encrypted connections + - SHOULD leverage helper functions +- Extensions + - SHOULD NOT expose sensitive health or telemetry data by default + +## Configuration + +The Collector binary does not contain an embedded or default configuration and +MUST NOT start without a configuration file being specified. The configuration +file passed to the Collector MUST be validated prior to be loaded. If an +invalid configuration is detected, the Collector MUST fail to start as a +protective mechanism. + +> Note: Issue +> [#886](https://github.com/open-telemetry/opentelemetry-collector/issues/886) +> proposes adding a default configuration to the binary. + +The configuration drives the Collector's behavior and care should be taken to +ensure the configuration only enables the minimum set of capabilities and as +such exposes the minimum set of required ports. In addition, any incoming or +outgoing communication SHOULD leverage TLS and authentication. + +The Collector keeps the configuration in memory, but where the configuration is +loaded from at start time depends on the packaging used. For example, in +Kubernetes secrets and configmaps CAN be leveraged. In comparison, the Docker +image embeds the configuration in the container where is it not stored in an +encrypted manner by default. + +The configuration MAY contain sensitive information including: + +- Authentication information such as API tokens +- TLS certificates including private keys + +Sensitive information SHOULD be stored securely such as on an encrypted +filesystem or secret store. Environment variables CAN be used to handle +sensitive and non-sensitive data as the Collector MUST support environment +variable expansion. + +> For more information on environment variable expansion, see +> [this](https://opentelemetry.io/docs/collector/configuration/#configuration-environment-variables) +> documentation. + +Component developers MUST get configuration information from the Collector's +configuration file. Component developers SHOULD leverage [configuration helper +functions](https://github.com/open-telemetry/opentelemetry-collector/tree/main/config). + +More information about configuration is provided in the following sections. + +## Permissions + +The Collector supports running as a custom user and SHOULD NOT be run as a +root/admin user. For the majority of use-cases, the Collector SHOULD NOT require +privileged access to function. Some components MAY require privileged access +and care should be taken before enabling these components. Collector components +MAY require external permissions including network access or RBAC. + +Component developers SHOULD minimize privileged access requirements and MUST +document what requires privileged access and why. + +More information about permissions is provided in the following sections. + +## Receivers and Exporters + +Receivers and Exporters can be either push or pull-based. In either case, the +connection established SHOULD be over a secure and authenticated channel. +Unused receivers and exporters SHOULD be disabled to minimize the attack vector +of the Collector. + +Receivers and Exporters MAY expose buffer, queue, payload, and/or worker +settings via configuration parameters. If these settings are available, +end-users should proceed with caution before modifying the default values. +Improperly setting these values may expose the Collector to additional attack +vectors including resource exhaustion. + +> It is possible that a receiver MAY require the Collector run in a privileged +> mode in order to operate, which could be a security concern, but today this +> is not the case. + +Component developers MUST default to encrypted connections (via the `insecure: +false` configuration setting) and SHOULD leverage +[receiver](https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver/receiverhelper) +and +[exporter](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/exporterhelper) +helper functions. + +## Processors + +Processors sit between receivers and exporters. They are responsible for +processing the data in some way. From a security perspective, they are useful +in a couple ways. + +### Scrubbing sensitive data + +It is common for a Collector to be used to scrub sensitive data before +exporting it to a backend. This is especially important when sending the data +to a third-party backend. The Collector SHOULD be configured to obfuscate or +scrub sensitive data before exporting. + +> Note: Issue +> [#2466](https://github.com/open-telemetry/opentelemetry-collector/issues/2466) +> proposes adding default obfuscation or scrubbing of known sensitive metadata. + +### Safeguards around resource utilization + +In addition, processors offer safeguards around resource utilization. The +`batch` and especially `memory_limiter` processor help ensure that the +Collector is resource efficient and does not out of memory when overloaded. At +least these two processors SHOULD be enabled on every defined pipeline. + +> For more information on recommended processors and order, see +> [this](https://github.com/open-telemetry/opentelemetry-collector/tree/main/processor) +> documentation. + +## Extensions + +While receivers, processors, and exporters handle telemetry data directly, +extensions typical serve different needs. + +### Health and Telemetry + +The initial extensions provided health check information, Collector metrics and +traces, and the ability to generate and collect profiling data. When enabled +with their default settings, all of these extensions except the health check +extension are only accessibly locally to the Collector. Care should be taken +when configuring these extensions for remote access as sensitive information +may be exposed as a result. + +Component developers SHOULD NOT expose health or telemetry data outside the +Collector by default. + +### Forwarding + +A forwarding extension is typically used when some telemetry data not natively +supported by the Collector needs to be collected. For example, the +`http_forwarder` extension can receive and forward HTTP payloads. Forwarding +extensions are similar to receivers and exporters so the same security +considerations apply. + +### Observers + +An observer is capable of performing service discovery of endpoints. Other +components of the collector such as receivers MAY subscribe to these extensions +to be notified of endpoints coming or going. Observers MAY require certain +permissions in order to perform service discovery. For example, the +`k8s_observer` requires certain RBAC permissions in Kubernetes, while the +`host_observer` requires the Collector to run in privileged mode. + +### Subprocesses + +Extensions may also be used to run subprocesses. This can be useful when +collection mechanisms that cannot natively be run by the Collector (e.g. +FluentBit). Subprocesses expose a completely separate attack vector that would +depend on the subprocess itself. In general, care should be taken before +running any subprocesses alongside the Collector. diff --git a/internal/otel_collector/docs/service-extensions.md b/internal/otel_collector/docs/service-extensions.md new file mode 100644 index 00000000000..6d707a6d31e --- /dev/null +++ b/internal/otel_collector/docs/service-extensions.md @@ -0,0 +1,146 @@ +# OpenTelemetry Collector: Extensions + +Besides the pipeline elements (receivers, processors, and exporters) the Collector +uses various service extensions (e.g.: healthcheck, z-pages, etc). +This document describes the “extensions” design and how they are implemented. + +## Configuration and Interface + +The configuration follows the same pattern used for pipelines: a base +configuration type and the creation of factories to instantiate the extension +objects. + +In order to support generic service extensions an interface is defined +so the service can interact uniformly with these. At minimum service extensions +need to implement the interface that covers Start and Shutdown. + +In addition to this base interface there is support to notify extensions when +pipelines are “ready” and when they are about to be stopped, i.e.: “not ready” +to receive data. These are a necessary addition to allow implementing extensions +that indicate to LBs and external systems if the service instance is ready or +not to receive data +(e.g.: a [k8s readiness probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#define-readiness-probes)). +These state changes are under the control of the service server hosting +the extensions. + +There are more complex scenarios in which there can be notifications of state +changes from the extensions to their host. These more complex cases are not +supported at this moment, but this design doesn’t prevent such extensions in the +future[^1]. + + +## Collector State and Extensions + +The diagram below shows the basic state transitions of the OpenTelemetry Collector +and how it will interact with the service extensions. + +![ServiceLifeCycle](images/design-service-lifecycle.png) + + +## Configuration + +The config package will be extended to load the service extensions when the +configuration is loaded. The settings for service extensions will live in the +same configuration file as the pipeline elements. Below is an example of how +these sections would look like in the configuration file: + +```yaml + +# Example of the extensions available with the core Collector. The list below +# includes all configurable options and their respective default value. +extensions: + health_check: + port: 13133 + pprof: + endpoint: "localhost:1777" + block_profile_fraction: 0 + mutex_profile_fraction: 0 + zpages: + endpoint: "localhost:55679" + +# The service lists extensions not directly related to data pipelines, but used +# by the service. +service: + # extensions lists the extensions added to the service. They are started + # in the order presented below and stopped in the reverse order. + extensions: [health_check, pprof, zpages] +``` + +The configuration base type does not share any common fields. + +The configuration, analogous to pipelines, allows to have multiple extensions of +the same type. Implementers of extensions need to take care to return error +if it can only execute a single instance. (Note: the configuration uses composite +key names in the form of `type[/name]` +as defined in this [this document](https://docs.google.com/document/d/1NeheFG7DmcUYo_h2vLtNRlia9x5wOJMlV4QKEK05FhQ/edit#)). + +The factory follows the same pattern established for pipeline configuration: + +```go +// Factory is a factory interface for extensions to the service. +type Factory interface { + // Type gets the type of the extension created by this factory. + Type() string + + // CreateDefaultConfig creates the default configuration for the extension. + CreateDefaultConfig() config.Extension + + // CreateExtension creates a service extension based on the given config. + CreateExtension(logger *zap.Logger, cfg config.Extension) (component.Extension, error) +} +``` + + +## Extension Interface + +The interface defined below is the minimum required for +extensions in use on the service: + +```go +// ServiceExtension is the interface for objects hosted by the OpenTelemetry Collector that +// don't participate directly on data pipelines but provide some functionality +// to the service, examples: health check endpoint, z-pages, etc. +type ServiceExtension interface { + // Start the ServiceExtension object hosted by the given host. At this point in the + // process life-cycle the receivers are not started and the host did not + // receive any data yet. + Start(host Host) error + + // Shutdown the ServiceExtension instance. This happens after the pipelines were + // shutdown. + Shutdown() error +} + +// PipelineWatcher is an extra interface for ServiceExtension hosted by the OpenTelemetry +// Collector that is to be implemented by extensions interested in changes to pipeline +// states. Typically this will be used by extensions that change their behavior if data is +// being ingested or not, e.g.: a k8s readiness probe. +type PipelineWatcher interface { + // Ready notifies the ServiceExtension that all pipelines were built and the + // receivers were started, i.e.: the service is ready to receive data + // (notice that it may already have received data when this method is called). + Ready() error + + // NotReady notifies the ServiceExtension that all receivers are about to be stopped, + // i.e.: pipeline receivers will not accept new data. + // This is sent before receivers are stopped, so the ServiceExtension can take any + // appropriate action before that happens. + NotReady() error +} + +// Host represents the entity where the extension is being hosted. +// It is used to allow communication between the extension and its host. +type Host interface { + // ReportFatalError is used to report to the host that the extension + // encountered a fatal error (i.e.: an error that the instance can't recover + // from) after its start function had already returned. + ReportFatalError(err error) +} +``` + +## Notes + +[^1]: + This can be done by adding specific interfaces to extension types that support + those and having the service checking which of the extension instances support + each interface. diff --git a/internal/otel_collector/docs/troubleshooting.md b/internal/otel_collector/docs/troubleshooting.md new file mode 100644 index 00000000000..b612b30d377 --- /dev/null +++ b/internal/otel_collector/docs/troubleshooting.md @@ -0,0 +1,280 @@ +# Troubleshooting + +## Observability + +The Collector offers multiple ways to measure the health of the Collector +as well as investigate issues. + +### Logs + +Logs can be helpful in identifying issues. Always start by checking the log +output and looking for potential issues. + +The verbosity level, which defaults to `INFO` can also be adjusted by passing +the `--log-level` flag to the `otelcol` process. See `--help` for more details. + +```bash +$ otelcol --log-level DEBUG +``` + +### Metrics + +Prometheus metrics are exposed locally on port `8888` and path `/metrics`. + +For containerized environments it may be desirable to expose this port on a +public interface instead of just locally. The metrics address can be configured +by passing the `--metrics-addr` flag to the `otelcol` process. See `--help` for +more details. + +```bash +$ otelcol --metrics-addr 0.0.0.0:8888 +``` + +A grafana dashboard for these metrics can be found +[here](https://grafana.com/grafana/dashboards/11575). + +Also note that a Collector can be configured to scrape its own metrics and send +it through configured pipelines. For example: + +```yaml +receivers: + prometheus: + config: + scrape_configs: + - job_name: 'otelcol' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: '.*grpc_io.*' + action: drop +exporters: + logging: +service: + pipelines: + metrics: + receivers: [prometheus] + processors: [] + exporters: [logging] +``` + +### zPages + +The +[zpages](https://github.com/open-telemetry/opentelemetry-collector/tree/main/extension/zpagesextension/README.md) +extension, which if enabled is exposed locally on port `55679`, can be used to +check receivers and exporters trace operations via `/debug/tracez`. `zpages` +may contain error logs that the Collector does not emit. + +For containerized environments it may be desirable to expose this port on a +public interface instead of just locally. This can be configured via the +extensions configuration section. For example: + +```yaml +extensions: + zpages: + endpoint: 0.0.0.0:55679 +``` + +### Local exporters + +[Local +exporters](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter#general-information) +can be configured to inspect the data being processed by the Collector. + +For live troubleshooting purposes consider leveraging the `logging` exporter, +which can be used to confirm that data is being received, processed and +exported by the Collector. + +```yaml +receivers: + zipkin: +exporters: + logging: +service: + pipelines: + traces: + receivers: [zipkin] + processors: [] + exporters: [logging] +``` + +Get a Zipkin payload to test. For example create a file called `trace.json` +that contains: + +```json +[ + { + "traceId": "5982fe77008310cc80f1da5e10147519", + "parentId": "90394f6bcffb5d13", + "id": "67fae42571535f60", + "kind": "SERVER", + "name": "/m/n/2.6.1", + "timestamp": 1516781775726000, + "duration": 26000, + "localEndpoint": { + "serviceName": "api" + }, + "remoteEndpoint": { + "serviceName": "apip" + }, + "tags": { + "data.http_response_code": "201" + } + } +] +``` + +With the Collector running, send this payload to the Collector. For example: + +```bash +$ curl -X POST localhost:9411/api/v2/spans -H'Content-Type: application/json' -d @trace.json +``` + +You should see a log entry like the following from the Collector: + +```json +2020-11-11T04:12:33.089Z INFO loggingexporter/logging_exporter.go:296 TraceExporter {"#spans": 1} +``` + +You can also configure the `logging` exporter so the entire payload is printed: + +```yaml +exporters: + logging: + loglevel: debug +``` + +With the modified configuration if you re-run the test above the log output should look like: + +```json +2020-11-11T04:08:17.344Z DEBUG loggingexporter/logging_exporter.go:353 ResourceSpans #0 +Resource labels: + -> service.name: STRING(api) +InstrumentationLibrarySpans #0 +Span #0 + Trace ID : 5982fe77008310cc80f1da5e10147519 + Parent ID : 90394f6bcffb5d13 + ID : 67fae42571535f60 + Name : /m/n/2.6.1 + Kind : SPAN_KIND_SERVER + Start time : 2018-01-24 08:16:15.726 +0000 UTC + End time : 2018-01-24 08:16:15.752 +0000 UTC +Attributes: + -> data.http_response_code: STRING(201) +``` + +### Health Check + +The +[health_check](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/extension/healthcheckextension/README.md) +extension, which by default is available on all interfaces on port `13133`, can +be used to ensure the Collector is functioning properly. + +```yaml +extensions: + health_check: +service: + extensions: [health_check] +``` + +It returns a response like the following: + +```json +{ + "status": "Server available", + "upSince": "2020-11-11T04:12:31.6847174Z", + "uptime": "49.0132518s" +} +``` + +### pprof + +The +[pprof](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/extension/pprofextension/README.md) +extension, which by default is available locally on port `1777`, allows you to profile the +Collector as it runs. This is an advanced use-case that should not be needed in most circumstances. + +## Common Issues + +To see logs for the Collector: + +On a Linux systemd system, logs can be found using `journalctl`: +`journalctl | grep otelcol` + +or to find only errors: +`journalctl | grep otelcol | grep Error` + +### Collector exit/restart + +The Collector may exit/restart because: + +- Memory pressure due to missing or misconfigured + [memory_limiter](https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/memorylimiter/README.md) + processor. +- [Improperly sized](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/performance.md) + for load. +- Improperly configured (for example, a queue size configured higher + than available memory). +- Infrastructure resource limits (for example Kubernetes). + +### Data being dropped + +Data may be dropped for a variety of reasons, but most commonly because of an: + +- [Improperly sized Collector](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/performance.md) resulting in Collector being unable to process and export the data as fast as it is received. +- Exporter destination unavailable or accepting the data too slowly. + +To mitigate drops, it is highly recommended to configure the +[batch](https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/batchprocessor/README.md) +processor. In addition, it may be necessary to configure the [queued retry +options](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/exporterhelper#configuration) +on enabled exporters. + +### Receiving data not working + +If you are unable to receive data then this is likely because +either: + +- There is a network configuration issue +- The receiver configuration is incorrect +- The receiver is defined in the `receivers` section, but not enabled in any `pipelines` +- The client configuration is incorrect + +Check the Collector logs as well as `zpages` for potential issues. + +### Processing data not working + +Most processing issues are a result of either a misunderstanding of how the +processor works or a misconfiguration of the processor. + +Examples of misunderstanding include: + +- The attributes processors only work for "tags" on spans. Span name is + handled by the span processor. +- Processors for trace data (except tail sampling) work on individual spans. + +### Exporting data not working + +If you are unable to export to a destination then this is likely because +either: + +- There is a network configuration issue +- The exporter configuration is incorrect +- The destination is unavailable + +Check the collector logs as well as `zpages` for potential issues. + +More often than not, exporting data does not work because of a network +configuration issue. This could be due to a firewall, DNS, or proxy +issue. Note that the Collector does have +[proxy support](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter#proxy-support). + +### Startup failing in Windows Docker containers + +The process may fail to start in a Windows Docker container with the following +error: `The service process could not connect to the service controller`. In +this case the `NO_WINDOWS_SERVICE=1` environment variable should be set to force +the collector to be started as if it were running in an interactive terminal, +without attempting to run as a Windows service. diff --git a/internal/otel_collector/docs/vision.md b/internal/otel_collector/docs/vision.md new file mode 100644 index 00000000000..dfe9ab01934 --- /dev/null +++ b/internal/otel_collector/docs/vision.md @@ -0,0 +1,25 @@ +# OpenTelemetry Collector Long-term Vision + +The following are high-level items that define our long-term vision for OpenTelemetry Collector, what we aspire to achieve. This vision is our daily guidance when we design new features and make changes to the Collector. + +This is a living document that is expected to evolve over time. + +## Performant +Highly stable and performant under varying loads. Well-behaved under extreme load, with predictable, low resource consumption. + +## Observable +Expose own operational metrics in a clear way. Be an exemplar of observable service. Allow configuring the level of observability (more or less metrics, traces, logs, etc reported). See [more details](observability.md). + +## Multi-Data +Support traces, metrics, logs and other relevant data types. + +## Usable Out of the Box +Reasonable default configuration, supports popular protocols, runs and collects out of the box. + +## Extensible +Extensible and customizable without touching the core code. Can create custom agents based on the core and extend with own components. Welcoming 3rd party contribution policy. + +## Unified Codebase +One codebase for daemon (Agent) and standalone service (Collector). + +For more details on how we plan to achieve this vision please see the [Roadmap](roadmap.md). \ No newline at end of file diff --git a/internal/otel_collector/examples/README.md b/internal/otel_collector/examples/README.md new file mode 100644 index 00000000000..1c5da97badb --- /dev/null +++ b/internal/otel_collector/examples/README.md @@ -0,0 +1,5 @@ +# Examples + +Information on how the examples can be used can be found in the [Getting +Started +documentation](https://opentelemetry.io/docs/collector/getting-started/). diff --git a/internal/otel_collector/examples/k8s/otel-config.yaml b/internal/otel_collector/examples/k8s/otel-config.yaml new file mode 100644 index 00000000000..9f70e3e3448 --- /dev/null +++ b/internal/otel_collector/examples/k8s/otel-config.yaml @@ -0,0 +1,230 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: otel-agent-conf + labels: + app: opentelemetry + component: otel-agent-conf +data: + otel-agent-config: | + receivers: + otlp: + protocols: + grpc: + http: + exporters: + otlp: + endpoint: "otel-collector.default:4317" + insecure: true + sending_queue: + num_consumers: 4 + queue_size: 100 + retry_on_failure: + enabled: true + processors: + batch: + memory_limiter: + # Same as --mem-ballast-size-mib CLI argument + ballast_size_mib: 165 + # 80% of maximum memory up to 2G + limit_mib: 400 + # 25% of limit up to 2G + spike_limit_mib: 100 + check_interval: 5s + extensions: + zpages: {} + service: + extensions: [zpages] + pipelines: + traces: + receivers: [otlp] + processors: [memory_limiter, batch] + exporters: [otlp] +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: otel-agent + labels: + app: opentelemetry + component: otel-agent +spec: + selector: + matchLabels: + app: opentelemetry + component: otel-agent + template: + metadata: + labels: + app: opentelemetry + component: otel-agent + spec: + containers: + - command: + - "/otelcol" + - "--config=/conf/otel-agent-config.yaml" + # Memory Ballast size should be max 1/3 to 1/2 of memory. + - "--mem-ballast-size-mib=165" + image: otel/opentelemetry-collector-dev:latest + name: otel-agent + resources: + limits: + cpu: 500m + memory: 500Mi + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 55679 # ZPages endpoint. + - containerPort: 4317 # Default OpenTelemetry receiver port. + - containerPort: 8888 # Metrics. + volumeMounts: + - name: otel-agent-config-vol + mountPath: /conf + livenessProbe: + httpGet: + path: / + port: 13133 # Health Check extension default port. + readinessProbe: + httpGet: + path: / + port: 13133 # Health Check extension default port. + volumes: + - configMap: + name: otel-agent-conf + items: + - key: otel-agent-config + path: otel-agent-config.yaml + name: otel-agent-config-vol +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: otel-collector-conf + labels: + app: opentelemetry + component: otel-collector-conf +data: + otel-collector-config: | + receivers: + otlp: + protocols: + grpc: + http: + processors: + batch: + memory_limiter: + # Same as --mem-ballast-size-mib CLI argument + ballast_size_mib: 683 + # 80% of maximum memory up to 2G + limit_mib: 1500 + # 25% of limit up to 2G + spike_limit_mib: 512 + check_interval: 5s + extensions: + zpages: {} + exporters: + otlp: + endpoint: "http://someotlp.target.com:4317" # Replace with a real endpoint. + insecure: true + service: + extensions: [zpages] + pipelines: + traces/1: + receivers: [otlp] + processors: [memory_limiter, batch] + exporters: [otlp] +--- +apiVersion: v1 +kind: Service +metadata: + name: otel-collector + labels: + app: opentelemetry + component: otel-collector +spec: + ports: + - name: otlp # Default endpoint for OpenTelemetry gRPC receiver. + port: 4317 + protocol: TCP + targetPort: 4317 + - name: otlp # Default endpoint for OpenTelemetry HTTP receiver. + port: 4318 + protocol: TCP + targetPort: 4318 + - name: metrics # Default endpoint for querying metrics. + port: 8888 + selector: + component: otel-collector +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: otel-collector + labels: + app: opentelemetry + component: otel-collector +spec: + selector: + matchLabels: + app: opentelemetry + component: otel-collector + minReadySeconds: 5 + progressDeadlineSeconds: 120 + replicas: 1 #TODO - adjust this to your own requirements + template: + metadata: + labels: + app: opentelemetry + component: otel-collector + spec: + containers: + - command: + - "/otelcol" + - "--config=/conf/otel-collector-config.yaml" +# Memory Ballast size should be max 1/3 to 1/2 of memory. + - "--mem-ballast-size-mib=683" + image: otel/opentelemetry-collector-dev:latest + name: otel-collector + resources: + limits: + cpu: 1 + memory: 2Gi + requests: + cpu: 200m + memory: 400Mi + ports: + - containerPort: 55679 # Default endpoint for ZPages. + - containerPort: 4317 # Default endpoint for OpenTelemetry receiver. + - containerPort: 14250 # Default endpoint for Jaeger HTTP receiver. + - containerPort: 14268 # Default endpoint for Jaeger HTTP receiver. + - containerPort: 9411 # Default endpoint for Zipkin receiver. + - containerPort: 8888 # Default endpoint for querying metrics. + volumeMounts: + - name: otel-collector-config-vol + mountPath: /conf +# - name: otel-collector-secrets +# mountPath: /secrets + livenessProbe: + httpGet: + path: / + port: 13133 # Health Check extension default port. + readinessProbe: + httpGet: + path: / + port: 13133 # Health Check extension default port. + volumes: + - configMap: + name: otel-collector-conf + items: + - key: otel-collector-config + path: otel-collector-config.yaml + name: otel-collector-config-vol +# - secret: +# name: otel-collector-secrets +# items: +# - key: cert.pem +# path: cert.pem +# - key: key.pem +# path: key.pem diff --git a/internal/otel_collector/examples/local/otel-config.yaml b/internal/otel_collector/examples/local/otel-config.yaml new file mode 100644 index 00000000000..35bba1ba5c6 --- /dev/null +++ b/internal/otel_collector/examples/local/otel-config.yaml @@ -0,0 +1,37 @@ +extensions: + memory_ballast: + size_mib: 512 + zpages: + endpoint: 0.0.0.0:55679 + +receivers: + otlp: + protocols: + grpc: + http: + +processors: + batch: + memory_limiter: + # 75% of maximum memory up to 4G + limit_mib: 1536 + # 25% of limit up to 2G + spike_limit_mib: 512 + check_interval: 5s + +exporters: + logging: + logLevel: debug + +service: + pipelines: + traces: + receivers: [otlp] + processors: [memory_limiter, batch] + exporters: [logging] + metrics: + receivers: [otlp] + processors: [memory_limiter, batch] + exporters: [logging] + + extensions: [memory_ballast, zpages] diff --git a/internal/otel_collector/exporter/README.md b/internal/otel_collector/exporter/README.md new file mode 100644 index 00000000000..ed6c1efec8b --- /dev/null +++ b/internal/otel_collector/exporter/README.md @@ -0,0 +1,101 @@ +# General Information + +An exporter is how data gets sent to different systems/back-ends. Generally, an +exporter translates the internal format into another defined format. + +Available trace exporters (sorted alphabetically): + +- [OTLP gRPC](otlpexporter/README.md) +- [OTLP HTTP](otlphttpexporter/README.md) + +Available metric exporters (sorted alphabetically): + +- [OTLP gRPC](otlpexporter/README.md) +- [OTLP HTTP](otlphttpexporter/README.md) + +Available log exporters (sorted alphabetically): + +- [OTLP gRPC](otlpexporter/README.md) +- [OTLP HTTP](otlphttpexporter/README.md) + +Available local exporters (sorted alphabetically): + +- [File](fileexporter/README.md) +- [Logging](loggingexporter/README.md) + +The [contrib +repository](https://github.com/open-telemetry/opentelemetry-collector-contrib) +has more exporters available in its builds. + +## Configuring Exporters + +Exporters are configured via YAML under the top-level `exporters` tag. + +The following is a sample configuration for the `exampleexporter`. + +```yaml +exporters: + # Exporter 1. + # : + exampleexporter: + # : + endpoint: 1.2.3.4:8080 + # ... + # Exporter 2. + # /: + exampleexporter/settings: + # : + endpoint: 0.0.0.0:9211 +``` + +An exporter instance is referenced by its full name in other parts of the config, +such as in pipelines. A full name consists of the exporter type, '/' and the +name appended to the exporter type in the configuration. All exporter full names +must be unique. + +For the example above: + +- Exporter 1 has full name `exampleexporter`. +- Exporter 2 has full name `exampleexporter/settings`. + +Exporters are enabled upon being added to a pipeline. For example: + +```yaml +service: + pipelines: + # Valid pipelines are: traces, metrics or logs + # Trace pipeline 1. + traces: + receivers: [examplereceiver] + processors: [] + exporters: [exampleexporter, exampleexporter/settings] + # Trace pipeline 2. + traces/another: + receivers: [examplereceiver] + processors: [] + exporters: [exampleexporter, exampleexporter/settings] +``` + +## Data Ownership + +When multiple exporters are configured to send the same data (e.g. by configuring multiple +exporters for the same pipeline) the exporters will have a shared access to the data. +Exporters get access to this shared data when `ConsumeTraces`/`ConsumeMetrics`/`ConsumeLogs` +function is called. Exporters MUST NOT modify the `pdata.Traces`/`pdata.Metrics`/`pdata.Logs` argument of +these functions. If the exporter needs to modify the data while performing the exporting +the exporter can clone the data and perform the modification on the clone or use a +copy-on-write approach for individual sub-parts of `pdata.Traces`/`pdata.Metrics`/`pdata.Logs`. +Any approach that does not mutate the original `pdata.Traces`/`pdata.Metrics`/`pdata.Logs` is allowed. + +## Proxy Support + +Beyond standard YAML configuration as outlined in the individual READMEs above, +exporters that leverage the net/http package (all do today) also respect the +following proxy environment variables: + +- HTTP_PROXY +- HTTPS_PROXY +- NO_PROXY + +If set at Collector start time then exporters, regardless of protocol, +will or will not proxy traffic as defined by these environment variables. diff --git a/internal/otel_collector/exporter/doc.go b/internal/otel_collector/exporter/doc.go new file mode 100644 index 00000000000..15c7e94732d --- /dev/null +++ b/internal/otel_collector/exporter/doc.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package exporter contains implementations of Exporter components. +// +// To implement a custom exporter you will need to implement component.ExporterFactory +// interface and component.Exporter interface. +// +// To make the custom exporter part of the Collector build the factory must be added +// to defaultcomponents.Components() function. +package exporter diff --git a/internal/otel_collector/exporter/exporterhelper/README.md b/internal/otel_collector/exporter/exporterhelper/README.md new file mode 100644 index 00000000000..1b1bdc42639 --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/README.md @@ -0,0 +1,28 @@ +# Exporter Helper + +This is a helper exporter that other exporters can depend on. Today, it +primarily offers queued retries and resource attributes to metric labels conversion. + +> :warning: This exporter should not be added to a service pipeline. + +## Configuration + +The following configuration options can be modified: + +- `retry_on_failure` + - `enabled` (default = true) + - `initial_interval` (default = 5s): Time to wait after the first failure before retrying; ignored if `enabled` is `false` + - `max_interval` (default = 30s): Is the upper bound on backoff; ignored if `enabled` is `false` + - `max_elapsed_time` (default = 120s): Is the maximum amount of time spent trying to send a batch; ignored if `enabled` is `false` +- `sending_queue` + - `enabled` (default = true) + - `num_consumers` (default = 10): Number of consumers that dequeue batches; ignored if `enabled` is `false` + - `queue_size` (default = 5000): Maximum number of batches kept in memory before data; ignored if `enabled` is `false`; + User should calculate this as `num_seconds * requests_per_second` where: + - `num_seconds` is the number of seconds to buffer in case of a backend outage + - `requests_per_second` is the average number of requests per seconds. +- `resource_to_telemetry_conversion` + - `enabled` (default = false): If `enabled` is `true`, all the resource attributes will be converted to metric labels by default. +- `timeout` (default = 5s): Time to wait per individual attempt to send data to a backend. + +The full list of settings exposed for this helper exporter are documented [here](factory.go). diff --git a/internal/otel_collector/exporter/exporterhelper/common.go b/internal/otel_collector/exporter/exporterhelper/common.go new file mode 100644 index 00000000000..6ef4f8f6d34 --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/common.go @@ -0,0 +1,219 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exporterhelper + +import ( + "context" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenthelper" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumerhelper" + "go.opentelemetry.io/collector/obsreport" +) + +// TimeoutSettings for timeout. The timeout applies to individual attempts to send data to the backend. +type TimeoutSettings struct { + // Timeout is the timeout for every attempt to send data to the backend. + Timeout time.Duration `mapstructure:"timeout"` +} + +// DefaultTimeoutSettings returns the default settings for TimeoutSettings. +func DefaultTimeoutSettings() TimeoutSettings { + return TimeoutSettings{ + Timeout: 5 * time.Second, + } +} + +// request is an abstraction of an individual request (batch of data) independent of the type of the data (traces, metrics, logs). +type request interface { + // context returns the Context of the requests. + context() context.Context + // setContext updates the Context of the requests. + setContext(context.Context) + export(ctx context.Context) error + // Returns a new request may contain the items left to be sent if some items failed to process and can be retried. + // Otherwise, it should return the original request. + onError(error) request + // Returns the count of spans/metric points or log records. + count() int +} + +// requestSender is an abstraction of a sender for a request independent of the type of the data (traces, metrics, logs). +type requestSender interface { + send(req request) error +} + +// baseRequest is a base implementation for the request. +type baseRequest struct { + ctx context.Context +} + +func (req *baseRequest) context() context.Context { + return req.ctx +} + +func (req *baseRequest) setContext(ctx context.Context) { + req.ctx = ctx +} + +// baseSettings represents all the options that users can configure. +type baseSettings struct { + componentOptions []componenthelper.Option + consumerOptions []consumerhelper.Option + TimeoutSettings + QueueSettings + RetrySettings +} + +// fromOptions returns the internal options starting from the default and applying all configured options. +func fromOptions(options ...Option) *baseSettings { + // Start from the default options: + opts := &baseSettings{ + TimeoutSettings: DefaultTimeoutSettings(), + // TODO: Enable queuing by default (call DefaultQueueSettings) + QueueSettings: QueueSettings{Enabled: false}, + // TODO: Enable retry by default (call DefaultRetrySettings) + RetrySettings: RetrySettings{Enabled: false}, + } + + for _, op := range options { + op(opts) + } + + return opts +} + +// Option apply changes to baseSettings. +type Option func(*baseSettings) + +// WithStart overrides the default Start function for an exporter. +// The default start function does nothing and always returns nil. +func WithStart(start componenthelper.StartFunc) Option { + return func(o *baseSettings) { + o.componentOptions = append(o.componentOptions, componenthelper.WithStart(start)) + } +} + +// WithShutdown overrides the default Shutdown function for an exporter. +// The default shutdown function does nothing and always returns nil. +func WithShutdown(shutdown componenthelper.ShutdownFunc) Option { + return func(o *baseSettings) { + o.componentOptions = append(o.componentOptions, componenthelper.WithShutdown(shutdown)) + } +} + +// WithTimeout overrides the default TimeoutSettings for an exporter. +// The default TimeoutSettings is 5 seconds. +func WithTimeout(timeoutSettings TimeoutSettings) Option { + return func(o *baseSettings) { + o.TimeoutSettings = timeoutSettings + } +} + +// WithRetry overrides the default RetrySettings for an exporter. +// The default RetrySettings is to disable retries. +func WithRetry(retrySettings RetrySettings) Option { + return func(o *baseSettings) { + o.RetrySettings = retrySettings + } +} + +// WithQueue overrides the default QueueSettings for an exporter. +// The default QueueSettings is to disable queueing. +func WithQueue(queueSettings QueueSettings) Option { + return func(o *baseSettings) { + o.QueueSettings = queueSettings + } +} + +// WithCapabilities overrides the default Capabilities() function for a Consumer. +// The default is non-mutable data. +// TODO: Verify if we can change the default to be mutable as we do for processors. +func WithCapabilities(capabilities consumer.Capabilities) Option { + return func(o *baseSettings) { + o.consumerOptions = append(o.consumerOptions, consumerhelper.WithCapabilities(capabilities)) + } +} + +// baseExporter contains common fields between different exporter types. +type baseExporter struct { + component.Component + obsrep *obsExporter + sender requestSender + qrSender *queuedRetrySender +} + +func newBaseExporter(cfg config.Exporter, set component.ExporterCreateSettings, bs *baseSettings) *baseExporter { + be := &baseExporter{ + Component: componenthelper.New(bs.componentOptions...), + } + + be.obsrep = newObsExporter(obsreport.ExporterSettings{ + Level: configtelemetry.GetMetricsLevelFlagValue(), + ExporterID: cfg.ID(), + ExporterCreateSettings: set, + }) + be.qrSender = newQueuedRetrySender(cfg.ID().String(), bs.QueueSettings, bs.RetrySettings, &timeoutSender{cfg: bs.TimeoutSettings}, set.Logger) + be.sender = be.qrSender + + return be +} + +// wrapConsumerSender wraps the consumer sender (the sender that uses retries and timeout) with the given wrapper. +// This can be used to wrap with observability (create spans, record metrics) the consumer sender. +func (be *baseExporter) wrapConsumerSender(f func(consumer requestSender) requestSender) { + be.qrSender.consumerSender = f(be.qrSender.consumerSender) +} + +// Start all senders and exporter and is invoked during service start. +func (be *baseExporter) Start(ctx context.Context, host component.Host) error { + // First start the wrapped exporter. + if err := be.Component.Start(ctx, host); err != nil { + return err + } + + // If no error then start the queuedRetrySender. + return be.qrSender.start() +} + +// Shutdown all senders and exporter and is invoked during service shutdown. +func (be *baseExporter) Shutdown(ctx context.Context) error { + // First shutdown the queued retry sender + be.qrSender.shutdown() + // Last shutdown the wrapped exporter itself. + return be.Component.Shutdown(ctx) +} + +// timeoutSender is a request sender that adds a `timeout` to every request that passes this sender. +type timeoutSender struct { + cfg TimeoutSettings +} + +// send implements the requestSender interface +func (ts *timeoutSender) send(req request) error { + // Intentionally don't overwrite the context inside the request, because in case of retries deadline will not be + // updated because this deadline most likely is before the next one. + ctx := req.context() + if ts.cfg.Timeout > 0 { + var cancelFunc func() + ctx, cancelFunc = context.WithTimeout(req.context(), ts.cfg.Timeout) + defer cancelFunc() + } + return req.export(ctx) +} diff --git a/internal/otel_collector/exporter/exporterhelper/constants.go b/internal/otel_collector/exporter/exporterhelper/constants.go new file mode 100644 index 00000000000..2fb7511a438 --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/constants.go @@ -0,0 +1,32 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exporterhelper + +import ( + "errors" +) + +var ( + // errNilConfig is returned when an empty name is given. + errNilConfig = errors.New("nil config") + // errNilLogger is returned when a logger is nil + errNilLogger = errors.New("nil logger") + // errNilPushTraceData is returned when a nil PushTraces is given. + errNilPushTraceData = errors.New("nil PushTraces") + // errNilPushMetricsData is returned when a nil PushMetrics is given. + errNilPushMetricsData = errors.New("nil PushMetrics") + // errNilPushLogsData is returned when a nil PushLogs is given. + errNilPushLogsData = errors.New("nil PushLogs") +) diff --git a/internal/otel_collector/exporter/exporterhelper/doc.go b/internal/otel_collector/exporter/exporterhelper/doc.go new file mode 100644 index 00000000000..6cd2d2c4cfe --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package exporterhelper provides helper functions for exporters. +package exporterhelper diff --git a/internal/otel_collector/exporter/exporterhelper/factory.go b/internal/otel_collector/exporter/exporterhelper/factory.go new file mode 100644 index 00000000000..f267b40338b --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/factory.go @@ -0,0 +1,126 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exporterhelper + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/config" +) + +// FactoryOption apply changes to ExporterOptions. +type FactoryOption func(o *factory) + +// CreateDefaultConfig is the equivalent of component.ExporterFactory.CreateDefaultConfig() +type CreateDefaultConfig func() config.Exporter + +// CreateTracesExporter is the equivalent of component.ExporterFactory.CreateTracesExporter() +type CreateTracesExporter func(context.Context, component.ExporterCreateSettings, config.Exporter) (component.TracesExporter, error) + +// CreateMetricsExporter is the equivalent of component.ExporterFactory.CreateMetricsExporter() +type CreateMetricsExporter func(context.Context, component.ExporterCreateSettings, config.Exporter) (component.MetricsExporter, error) + +// CreateLogsExporter is the equivalent of component.ExporterFactory.CreateLogsExporter() +type CreateLogsExporter func(context.Context, component.ExporterCreateSettings, config.Exporter) (component.LogsExporter, error) + +type factory struct { + cfgType config.Type + createDefaultConfig CreateDefaultConfig + createTracesExporter CreateTracesExporter + createMetricsExporter CreateMetricsExporter + createLogsExporter CreateLogsExporter +} + +// WithTraces overrides the default "error not supported" implementation for CreateTracesReceiver. +func WithTraces(createTracesExporter CreateTracesExporter) FactoryOption { + return func(o *factory) { + o.createTracesExporter = createTracesExporter + } +} + +// WithMetrics overrides the default "error not supported" implementation for CreateMetricsReceiver. +func WithMetrics(createMetricsExporter CreateMetricsExporter) FactoryOption { + return func(o *factory) { + o.createMetricsExporter = createMetricsExporter + } +} + +// WithLogs overrides the default "error not supported" implementation for CreateLogsReceiver. +func WithLogs(createLogsExporter CreateLogsExporter) FactoryOption { + return func(o *factory) { + o.createLogsExporter = createLogsExporter + } +} + +// NewFactory returns a component.ExporterFactory. +func NewFactory( + cfgType config.Type, + createDefaultConfig CreateDefaultConfig, + options ...FactoryOption) component.ExporterFactory { + f := &factory{ + cfgType: cfgType, + createDefaultConfig: createDefaultConfig, + } + for _, opt := range options { + opt(f) + } + return f +} + +// Type gets the type of the Exporter config created by this factory. +func (f *factory) Type() config.Type { + return f.cfgType +} + +// CreateDefaultConfig creates the default configuration for processor. +func (f *factory) CreateDefaultConfig() config.Exporter { + return f.createDefaultConfig() +} + +// CreateTracesExporter creates a component.TracesExporter based on this config. +func (f *factory) CreateTracesExporter( + ctx context.Context, + set component.ExporterCreateSettings, + cfg config.Exporter) (component.TracesExporter, error) { + if f.createTracesExporter != nil { + return f.createTracesExporter(ctx, set, cfg) + } + return nil, componenterror.ErrDataTypeIsNotSupported +} + +// CreateMetricsExporter creates a component.MetricsExporter based on this config. +func (f *factory) CreateMetricsExporter( + ctx context.Context, + set component.ExporterCreateSettings, + cfg config.Exporter) (component.MetricsExporter, error) { + if f.createMetricsExporter != nil { + return f.createMetricsExporter(ctx, set, cfg) + } + return nil, componenterror.ErrDataTypeIsNotSupported +} + +// CreateLogsExporter creates a metrics processor based on this config. +func (f *factory) CreateLogsExporter( + ctx context.Context, + set component.ExporterCreateSettings, + cfg config.Exporter, +) (component.LogsExporter, error) { + if f.createLogsExporter != nil { + return f.createLogsExporter(ctx, set, cfg) + } + return nil, componenterror.ErrDataTypeIsNotSupported +} diff --git a/internal/otel_collector/exporter/exporterhelper/internal/bounded_queue.go b/internal/otel_collector/exporter/exporterhelper/internal/bounded_queue.go new file mode 100644 index 00000000000..ff1598f29ba --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/internal/bounded_queue.go @@ -0,0 +1,188 @@ +// Copyright The OpenTelemetry Authors +// Copyright (c) 2019 The Jaeger Authors. +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "sync" + "sync/atomic" + "unsafe" + + uatomic "go.uber.org/atomic" +) + +// Consumer consumes data from a bounded queue +type Consumer interface { + Consume(item interface{}) +} + +// BoundedQueue implements a producer-consumer exchange similar to a ring buffer queue, +// where the queue is bounded and if it fills up due to slow consumers, the new items written by +// the producer force the earliest items to be dropped. The implementation is actually based on +// channels, with a special Reaper goroutine that wakes up when the queue is full and consumers +// the items from the top of the queue until its size drops back to maxSize +type BoundedQueue struct { + workers int + stopWG sync.WaitGroup + size *uatomic.Uint32 + capacity *uatomic.Uint32 + stopped *uatomic.Uint32 + items *chan interface{} + onDroppedItem func(item interface{}) + factory func() Consumer + stopCh chan struct{} +} + +// NewBoundedQueue constructs the new queue of specified capacity, and with an optional +// callback for dropped items (e.g. useful to emit metrics). +func NewBoundedQueue(capacity int, onDroppedItem func(item interface{})) *BoundedQueue { + queue := make(chan interface{}, capacity) + return &BoundedQueue{ + onDroppedItem: onDroppedItem, + items: &queue, + stopCh: make(chan struct{}), + capacity: uatomic.NewUint32(uint32(capacity)), + stopped: uatomic.NewUint32(0), + size: uatomic.NewUint32(0), + } +} + +// StartConsumersWithFactory creates a given number of consumers consuming items +// from the queue in separate goroutines. +func (q *BoundedQueue) StartConsumersWithFactory(num int, factory func() Consumer) { + q.workers = num + q.factory = factory + var startWG sync.WaitGroup + for i := 0; i < q.workers; i++ { + q.stopWG.Add(1) + startWG.Add(1) + go func() { + startWG.Done() + defer q.stopWG.Done() + consumer := q.factory() + queue := *q.items + for { + select { + case item, ok := <-queue: + if ok { + q.size.Sub(1) + consumer.Consume(item) + } else { + // channel closed, finish worker + return + } + case <-q.stopCh: + // the whole queue is closing, finish worker + return + } + } + }() + } + startWG.Wait() +} + +// ConsumerFunc is an adapter to allow the use of +// a consume function callback as a Consumer. +type ConsumerFunc func(item interface{}) + +// Consume calls c(item) +func (c ConsumerFunc) Consume(item interface{}) { + c(item) +} + +// StartConsumers starts a given number of goroutines consuming items from the queue +// and passing them into the consumer callback. +func (q *BoundedQueue) StartConsumers(num int, callback func(item interface{})) { + q.StartConsumersWithFactory(num, func() Consumer { + return ConsumerFunc(callback) + }) +} + +// Produce is used by the producer to submit new item to the queue. Returns false in case of queue overflow. +func (q *BoundedQueue) Produce(item interface{}) bool { + if q.stopped.Load() != 0 { + q.onDroppedItem(item) + return false + } + + // we might have two concurrent backing queues at the moment + // their combined size is stored in q.size, and their combined capacity + // should match the capacity of the new queue + if q.Size() >= q.Capacity() { + // note that all items will be dropped if the capacity is 0 + q.onDroppedItem(item) + return false + } + + q.size.Add(1) + select { + case *q.items <- item: + return true + default: + // should not happen, as overflows should have been captured earlier + q.size.Sub(1) + if q.onDroppedItem != nil { + q.onDroppedItem(item) + } + return false + } +} + +// Stop stops all consumers, as well as the length reporter if started, +// and releases the items channel. It blocks until all consumers have stopped. +func (q *BoundedQueue) Stop() { + q.stopped.Store(1) // disable producer + close(q.stopCh) + q.stopWG.Wait() + close(*q.items) +} + +// Size returns the current size of the queue +func (q *BoundedQueue) Size() int { + return int(q.size.Load()) +} + +// Capacity returns capacity of the queue +func (q *BoundedQueue) Capacity() int { + return int(q.capacity.Load()) +} + +// Resize changes the capacity of the queue, returning whether the action was successful +func (q *BoundedQueue) Resize(capacity int) bool { + if capacity == q.Capacity() { + // noop + return false + } + + previous := *q.items + queue := make(chan interface{}, capacity) + + // swap queues + // #nosec + swapped := atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&q.items)), unsafe.Pointer(q.items), unsafe.Pointer(&queue)) + if swapped { + // start a new set of consumers, based on the information given previously + q.StartConsumersWithFactory(q.workers, q.factory) + + // gracefully drain the existing queue + close(previous) + + // update the capacity + q.capacity.Store(uint32(capacity)) + } + + return swapped +} diff --git a/internal/otel_collector/exporter/exporterhelper/logs.go b/internal/otel_collector/exporter/exporterhelper/logs.go new file mode 100644 index 00000000000..3873e3f7d0d --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/logs.go @@ -0,0 +1,117 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exporterhelper + +import ( + "context" + "errors" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/consumerhelper" + "go.opentelemetry.io/collector/model/pdata" +) + +type logsRequest struct { + baseRequest + ld pdata.Logs + pusher consumerhelper.ConsumeLogsFunc +} + +func newLogsRequest(ctx context.Context, ld pdata.Logs, pusher consumerhelper.ConsumeLogsFunc) request { + return &logsRequest{ + baseRequest: baseRequest{ctx: ctx}, + ld: ld, + pusher: pusher, + } +} + +func (req *logsRequest) onError(err error) request { + var logError consumererror.Logs + if consumererror.AsLogs(err, &logError) { + return newLogsRequest(req.ctx, logError.GetLogs(), req.pusher) + } + return req +} + +func (req *logsRequest) export(ctx context.Context) error { + return req.pusher(ctx, req.ld) +} + +func (req *logsRequest) count() int { + return req.ld.LogRecordCount() +} + +type logsExporter struct { + *baseExporter + consumer.Logs +} + +// NewLogsExporter creates an LogsExporter that records observability metrics and wraps every request with a Span. +func NewLogsExporter( + cfg config.Exporter, + set component.ExporterCreateSettings, + pusher consumerhelper.ConsumeLogsFunc, + options ...Option, +) (component.LogsExporter, error) { + if cfg == nil { + return nil, errNilConfig + } + + if set.Logger == nil { + return nil, errNilLogger + } + + if pusher == nil { + return nil, errNilPushLogsData + } + + bs := fromOptions(options...) + be := newBaseExporter(cfg, set, bs) + be.wrapConsumerSender(func(nextSender requestSender) requestSender { + return &logsExporterWithObservability{ + obsrep: be.obsrep, + nextSender: nextSender, + } + }) + + lc, err := consumerhelper.NewLogs(func(ctx context.Context, ld pdata.Logs) error { + req := newLogsRequest(ctx, ld, pusher) + err := be.sender.send(req) + if errors.Is(err, errSendingQueueIsFull) { + be.obsrep.recordLogsEnqueueFailure(req.context(), req.count()) + } + return err + }, bs.consumerOptions...) + + return &logsExporter{ + baseExporter: be, + Logs: lc, + }, err +} + +type logsExporterWithObservability struct { + obsrep *obsExporter + nextSender requestSender +} + +func (lewo *logsExporterWithObservability) send(req request) error { + req.setContext(lewo.obsrep.StartLogsOp(req.context())) + err := lewo.nextSender.send(req) + lewo.obsrep.EndLogsOp(req.context(), req.count(), err) + return err +} diff --git a/internal/otel_collector/exporter/exporterhelper/metrics.go b/internal/otel_collector/exporter/exporterhelper/metrics.go new file mode 100644 index 00000000000..a48af7ba398 --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/metrics.go @@ -0,0 +1,117 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exporterhelper + +import ( + "context" + "errors" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/consumerhelper" + "go.opentelemetry.io/collector/model/pdata" +) + +type metricsRequest struct { + baseRequest + md pdata.Metrics + pusher consumerhelper.ConsumeMetricsFunc +} + +func newMetricsRequest(ctx context.Context, md pdata.Metrics, pusher consumerhelper.ConsumeMetricsFunc) request { + return &metricsRequest{ + baseRequest: baseRequest{ctx: ctx}, + md: md, + pusher: pusher, + } +} + +func (req *metricsRequest) onError(err error) request { + var metricsError consumererror.Metrics + if consumererror.AsMetrics(err, &metricsError) { + return newMetricsRequest(req.ctx, metricsError.GetMetrics(), req.pusher) + } + return req +} + +func (req *metricsRequest) export(ctx context.Context) error { + return req.pusher(ctx, req.md) +} + +func (req *metricsRequest) count() int { + return req.md.DataPointCount() +} + +type metricsExporter struct { + *baseExporter + consumer.Metrics +} + +// NewMetricsExporter creates an MetricsExporter that records observability metrics and wraps every request with a Span. +func NewMetricsExporter( + cfg config.Exporter, + set component.ExporterCreateSettings, + pusher consumerhelper.ConsumeMetricsFunc, + options ...Option, +) (component.MetricsExporter, error) { + if cfg == nil { + return nil, errNilConfig + } + + if set.Logger == nil { + return nil, errNilLogger + } + + if pusher == nil { + return nil, errNilPushMetricsData + } + + bs := fromOptions(options...) + be := newBaseExporter(cfg, set, bs) + be.wrapConsumerSender(func(nextSender requestSender) requestSender { + return &metricsSenderWithObservability{ + obsrep: be.obsrep, + nextSender: nextSender, + } + }) + + mc, err := consumerhelper.NewMetrics(func(ctx context.Context, md pdata.Metrics) error { + req := newMetricsRequest(ctx, md, pusher) + err := be.sender.send(req) + if errors.Is(err, errSendingQueueIsFull) { + be.obsrep.recordMetricsEnqueueFailure(req.context(), req.count()) + } + return err + }, bs.consumerOptions...) + + return &metricsExporter{ + baseExporter: be, + Metrics: mc, + }, err +} + +type metricsSenderWithObservability struct { + obsrep *obsExporter + nextSender requestSender +} + +func (mewo *metricsSenderWithObservability) send(req request) error { + req.setContext(mewo.obsrep.StartMetricsOp(req.context())) + err := mewo.nextSender.send(req) + mewo.obsrep.EndMetricsOp(req.context(), req.count(), err) + return err +} diff --git a/internal/otel_collector/exporter/exporterhelper/obsreport.go b/internal/otel_collector/exporter/exporterhelper/obsreport.go new file mode 100644 index 00000000000..dc799156a17 --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/obsreport.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exporterhelper + +import ( + "context" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" + + "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" + "go.opentelemetry.io/collector/obsreport" +) + +// TODO: Incorporate this functionality along with tests from obsreport_test.go +// into existing `obsreport` package once its functionally is not exposed +// as public API. For now this part is kept private. + +// obsExporter is a helper to add observability to a component.Exporter. +type obsExporter struct { + *obsreport.Exporter + mutators []tag.Mutator +} + +// newObsExporter creates a new observability exporter. +func newObsExporter(cfg obsreport.ExporterSettings) *obsExporter { + return &obsExporter{ + obsreport.NewExporter(cfg), + []tag.Mutator{tag.Upsert(obsmetrics.TagKeyExporter, cfg.ExporterID.String(), tag.WithTTL(tag.TTLNoPropagation))}, + } +} + +// recordTracesEnqueueFailure records number of spans that failed to be added to the sending queue. +func (eor *obsExporter) recordTracesEnqueueFailure(ctx context.Context, numSpans int) { + _ = stats.RecordWithTags(ctx, eor.mutators, obsmetrics.ExporterFailedToEnqueueSpans.M(int64(numSpans))) +} + +// recordMetricsEnqueueFailure records number of metric points that failed to be added to the sending queue. +func (eor *obsExporter) recordMetricsEnqueueFailure(ctx context.Context, numMetricPoints int) { + _ = stats.RecordWithTags(ctx, eor.mutators, obsmetrics.ExporterFailedToEnqueueMetricPoints.M(int64(numMetricPoints))) +} + +// recordLogsEnqueueFailure records number of log records that failed to be added to the sending queue. +func (eor *obsExporter) recordLogsEnqueueFailure(ctx context.Context, numLogRecords int) { + _ = stats.RecordWithTags(ctx, eor.mutators, obsmetrics.ExporterFailedToEnqueueLogRecords.M(int64(numLogRecords))) +} diff --git a/internal/otel_collector/exporter/exporterhelper/queued_retry.go b/internal/otel_collector/exporter/exporterhelper/queued_retry.go new file mode 100644 index 00000000000..aed8ddd2406 --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/queued_retry.go @@ -0,0 +1,365 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exporterhelper + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/cenkalti/backoff/v4" + "go.opencensus.io/metric" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricproducer" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" +) + +var ( + r = metric.NewRegistry() + + queueSizeGauge, _ = r.AddInt64DerivedGauge( + obsmetrics.ExporterKey+"/queue_size", + metric.WithDescription("Current size of the retry queue (in batches)"), + metric.WithLabelKeys(obsmetrics.ExporterKey), + metric.WithUnit(metricdata.UnitDimensionless)) + + errSendingQueueIsFull = errors.New("sending_queue is full") +) + +func init() { + metricproducer.GlobalManager().AddProducer(r) +} + +// QueueSettings defines configuration for queueing batches before sending to the consumerSender. +type QueueSettings struct { + // Enabled indicates whether to not enqueue batches before sending to the consumerSender. + Enabled bool `mapstructure:"enabled"` + // NumConsumers is the number of consumers from the queue. + NumConsumers int `mapstructure:"num_consumers"` + // QueueSize is the maximum number of batches allowed in queue at a given time. + QueueSize int `mapstructure:"queue_size"` +} + +// DefaultQueueSettings returns the default settings for QueueSettings. +func DefaultQueueSettings() QueueSettings { + return QueueSettings{ + Enabled: true, + NumConsumers: 10, + // For 5000 queue elements at 100 requests/sec gives about 50 sec of survival of destination outage. + // This is a pretty decent value for production. + // User should calculate this from the perspective of how many seconds to buffer in case of a backend outage, + // multiply that by the number of requests per seconds. + QueueSize: 5000, + } +} + +// RetrySettings defines configuration for retrying batches in case of export failure. +// The current supported strategy is exponential backoff. +type RetrySettings struct { + // Enabled indicates whether to not retry sending batches in case of export failure. + Enabled bool `mapstructure:"enabled"` + // InitialInterval the time to wait after the first failure before retrying. + InitialInterval time.Duration `mapstructure:"initial_interval"` + // MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between + // consecutive retries will always be `MaxInterval`. + MaxInterval time.Duration `mapstructure:"max_interval"` + // MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch. + // Once this value is reached, the data is discarded. + MaxElapsedTime time.Duration `mapstructure:"max_elapsed_time"` +} + +// DefaultRetrySettings returns the default settings for RetrySettings. +func DefaultRetrySettings() RetrySettings { + return RetrySettings{ + Enabled: true, + InitialInterval: 5 * time.Second, + MaxInterval: 30 * time.Second, + MaxElapsedTime: 5 * time.Minute, + } +} + +type queuedRetrySender struct { + fullName string + cfg QueueSettings + consumerSender requestSender + queue *internal.BoundedQueue + retryStopCh chan struct{} + traceAttributes []attribute.KeyValue + logger *zap.Logger +} + +func createSampledLogger(logger *zap.Logger) *zap.Logger { + if logger.Core().Enabled(zapcore.DebugLevel) { + // Debugging is enabled. Don't do any sampling. + return logger + } + + // Create a logger that samples all messages to 1 per 10 seconds initially, + // and 1/100 of messages after that. + opts := zap.WrapCore(func(core zapcore.Core) zapcore.Core { + return zapcore.NewSamplerWithOptions( + core, + 10*time.Second, + 1, + 100, + ) + }) + return logger.WithOptions(opts) +} + +func newQueuedRetrySender(fullName string, qCfg QueueSettings, rCfg RetrySettings, nextSender requestSender, logger *zap.Logger) *queuedRetrySender { + retryStopCh := make(chan struct{}) + sampledLogger := createSampledLogger(logger) + traceAttr := attribute.String(obsmetrics.ExporterKey, fullName) + return &queuedRetrySender{ + fullName: fullName, + cfg: qCfg, + consumerSender: &retrySender{ + traceAttribute: traceAttr, + cfg: rCfg, + nextSender: nextSender, + stopCh: retryStopCh, + logger: sampledLogger, + }, + queue: internal.NewBoundedQueue(qCfg.QueueSize, func(item interface{}) {}), + retryStopCh: retryStopCh, + traceAttributes: []attribute.KeyValue{traceAttr}, + logger: sampledLogger, + } +} + +// start is invoked during service startup. +func (qrs *queuedRetrySender) start() error { + qrs.queue.StartConsumers(qrs.cfg.NumConsumers, func(item interface{}) { + req := item.(request) + _ = qrs.consumerSender.send(req) + }) + + // Start reporting queue length metric + if qrs.cfg.Enabled { + err := queueSizeGauge.UpsertEntry(func() int64 { + return int64(qrs.queue.Size()) + }, metricdata.NewLabelValue(qrs.fullName)) + if err != nil { + return fmt.Errorf("failed to create retry queue size metric: %v", err) + } + } + + return nil +} + +// send implements the requestSender interface +func (qrs *queuedRetrySender) send(req request) error { + if !qrs.cfg.Enabled { + err := qrs.consumerSender.send(req) + if err != nil { + qrs.logger.Error( + "Exporting failed. Dropping data. Try enabling sending_queue to survive temporary failures.", + zap.Int("dropped_items", req.count()), + ) + } + return err + } + + // Prevent cancellation and deadline to propagate to the context stored in the queue. + // The grpc/http based receivers will cancel the request context after this function returns. + req.setContext(noCancellationContext{Context: req.context()}) + + span := trace.SpanFromContext(req.context()) + if !qrs.queue.Produce(req) { + qrs.logger.Error( + "Dropping data because sending_queue is full. Try increasing queue_size.", + zap.Int("dropped_items", req.count()), + ) + span.AddEvent("Dropped item, sending_queue is full.", trace.WithAttributes(qrs.traceAttributes...)) + return errSendingQueueIsFull + } + + span.AddEvent("Enqueued item.", trace.WithAttributes(qrs.traceAttributes...)) + return nil +} + +// shutdown is invoked during service shutdown. +func (qrs *queuedRetrySender) shutdown() { + // Cleanup queue metrics reporting + if qrs.cfg.Enabled { + _ = queueSizeGauge.UpsertEntry(func() int64 { + return int64(0) + }, metricdata.NewLabelValue(qrs.fullName)) + } + + // First stop the retry goroutines, so that unblocks the queue workers. + close(qrs.retryStopCh) + + // Stop the queued sender, this will drain the queue and will call the retry (which is stopped) that will only + // try once every request. + qrs.queue.Stop() +} + +// TODO: Clean this by forcing all exporters to return an internal error type that always include the information about retries. +type throttleRetry struct { + err error + delay time.Duration +} + +func (t throttleRetry) Error() string { + return "Throttle (" + t.delay.String() + "), error: " + t.err.Error() +} + +func (t throttleRetry) Unwrap() error { + return t.err +} + +// NewThrottleRetry creates a new throttle retry error. +func NewThrottleRetry(err error, delay time.Duration) error { + return throttleRetry{ + err: err, + delay: delay, + } +} + +type retrySender struct { + traceAttribute attribute.KeyValue + cfg RetrySettings + nextSender requestSender + stopCh chan struct{} + logger *zap.Logger +} + +// send implements the requestSender interface +func (rs *retrySender) send(req request) error { + if !rs.cfg.Enabled { + err := rs.nextSender.send(req) + if err != nil { + rs.logger.Error( + "Exporting failed. Try enabling retry_on_failure config option.", + zap.Error(err), + ) + } + return err + } + + // Do not use NewExponentialBackOff since it calls Reset and the code here must + // call Reset after changing the InitialInterval (this saves an unnecessary call to Now). + expBackoff := backoff.ExponentialBackOff{ + InitialInterval: rs.cfg.InitialInterval, + RandomizationFactor: backoff.DefaultRandomizationFactor, + Multiplier: backoff.DefaultMultiplier, + MaxInterval: rs.cfg.MaxInterval, + MaxElapsedTime: rs.cfg.MaxElapsedTime, + Stop: backoff.Stop, + Clock: backoff.SystemClock, + } + expBackoff.Reset() + span := trace.SpanFromContext(req.context()) + retryNum := int64(0) + for { + span.AddEvent( + "Sending request.", + trace.WithAttributes(rs.traceAttribute, attribute.Int64("retry_num", retryNum))) + + err := rs.nextSender.send(req) + if err == nil { + return nil + } + + // Immediately drop data on permanent errors. + if consumererror.IsPermanent(err) { + rs.logger.Error( + "Exporting failed. The error is not retryable. Dropping data.", + zap.Error(err), + zap.Int("dropped_items", req.count()), + ) + return err + } + + // Give the request a chance to extract signal data to retry if only some data + // failed to process. + req = req.onError(err) + + backoffDelay := expBackoff.NextBackOff() + if backoffDelay == backoff.Stop { + // throw away the batch + err = fmt.Errorf("max elapsed time expired %w", err) + rs.logger.Error( + "Exporting failed. No more retries left. Dropping data.", + zap.Error(err), + zap.Int("dropped_items", req.count()), + ) + return err + } + + throttleErr := throttleRetry{} + isThrottle := errors.As(err, &throttleErr) + if isThrottle { + backoffDelay = max(backoffDelay, throttleErr.delay) + } + + backoffDelayStr := backoffDelay.String() + span.AddEvent( + "Exporting failed. Will retry the request after interval.", + trace.WithAttributes( + rs.traceAttribute, + attribute.String("interval", backoffDelayStr), + attribute.String("error", err.Error()))) + rs.logger.Info( + "Exporting failed. Will retry the request after interval.", + zap.Error(err), + zap.String("interval", backoffDelayStr), + ) + retryNum++ + + // back-off, but get interrupted when shutting down or request is cancelled or timed out. + select { + case <-req.context().Done(): + return fmt.Errorf("request is cancelled or timed out %w", err) + case <-rs.stopCh: + return fmt.Errorf("interrupted due to shutdown %w", err) + case <-time.After(backoffDelay): + } + } +} + +// max returns the larger of x or y. +func max(x, y time.Duration) time.Duration { + if x < y { + return y + } + return x +} + +type noCancellationContext struct { + context.Context +} + +func (noCancellationContext) Deadline() (deadline time.Time, ok bool) { + return +} + +func (noCancellationContext) Done() <-chan struct{} { + return nil +} + +func (noCancellationContext) Err() error { + return nil +} diff --git a/internal/otel_collector/exporter/exporterhelper/traces.go b/internal/otel_collector/exporter/exporterhelper/traces.go new file mode 100644 index 00000000000..3f63e3744b4 --- /dev/null +++ b/internal/otel_collector/exporter/exporterhelper/traces.go @@ -0,0 +1,119 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exporterhelper + +import ( + "context" + "errors" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/consumerhelper" + "go.opentelemetry.io/collector/model/pdata" +) + +type tracesRequest struct { + baseRequest + td pdata.Traces + pusher consumerhelper.ConsumeTracesFunc +} + +func newTracesRequest(ctx context.Context, td pdata.Traces, pusher consumerhelper.ConsumeTracesFunc) request { + return &tracesRequest{ + baseRequest: baseRequest{ctx: ctx}, + td: td, + pusher: pusher, + } +} + +func (req *tracesRequest) onError(err error) request { + var traceError consumererror.Traces + if consumererror.AsTraces(err, &traceError) { + return newTracesRequest(req.ctx, traceError.GetTraces(), req.pusher) + } + return req +} + +func (req *tracesRequest) export(ctx context.Context) error { + return req.pusher(ctx, req.td) +} + +func (req *tracesRequest) count() int { + return req.td.SpanCount() +} + +type traceExporter struct { + *baseExporter + consumer.Traces +} + +// NewTracesExporter creates a TracesExporter that records observability metrics and wraps every request with a Span. +func NewTracesExporter( + cfg config.Exporter, + set component.ExporterCreateSettings, + pusher consumerhelper.ConsumeTracesFunc, + options ...Option, +) (component.TracesExporter, error) { + + if cfg == nil { + return nil, errNilConfig + } + + if set.Logger == nil { + return nil, errNilLogger + } + + if pusher == nil { + return nil, errNilPushTraceData + } + + bs := fromOptions(options...) + be := newBaseExporter(cfg, set, bs) + be.wrapConsumerSender(func(nextSender requestSender) requestSender { + return &tracesExporterWithObservability{ + obsrep: be.obsrep, + nextSender: nextSender, + } + }) + + tc, err := consumerhelper.NewTraces(func(ctx context.Context, td pdata.Traces) error { + req := newTracesRequest(ctx, td, pusher) + err := be.sender.send(req) + if errors.Is(err, errSendingQueueIsFull) { + be.obsrep.recordTracesEnqueueFailure(req.context(), req.count()) + } + return err + }, bs.consumerOptions...) + + return &traceExporter{ + baseExporter: be, + Traces: tc, + }, err +} + +type tracesExporterWithObservability struct { + obsrep *obsExporter + nextSender requestSender +} + +func (tewo *tracesExporterWithObservability) send(req request) error { + req.setContext(tewo.obsrep.StartTracesOp(req.context())) + // Forward the data to the next consumer (this pusher is the next). + err := tewo.nextSender.send(req) + tewo.obsrep.EndTracesOp(req.context(), req.count(), err) + return err +} diff --git a/internal/otel_collector/exporter/loggingexporter/README.md b/internal/otel_collector/exporter/loggingexporter/README.md new file mode 100644 index 00000000000..1ad42f63daa --- /dev/null +++ b/internal/otel_collector/exporter/loggingexporter/README.md @@ -0,0 +1,29 @@ +# Logging Exporter + +Exports data to the console via zap.Logger. + +Supported pipeline types: traces, metrics, logs + +## Getting Started + +The following settings are optional: + +- `loglevel` (default = `info`): the log level of the logging export + (debug|info|warn|error). When set to `debug`, pipeline data is verbosely + logged. +- `sampling_initial` (default = `2`): number of messages initially logged each + second. +- `sampling_thereafter` (default = `500`): sampling rate after the initial + messages are logged (every Mth message is logged). Refer to [Zap + docs](https://godoc.org/go.uber.org/zap/zapcore#NewSampler) for more details. + on how sampling parameters impact number of messages. + +Example: + +```yaml +exporters: + logging: + loglevel: debug + sampling_initial: 5 + sampling_thereafter: 200 +``` diff --git a/internal/otel_collector/exporter/loggingexporter/config.go b/internal/otel_collector/exporter/loggingexporter/config.go new file mode 100644 index 00000000000..b89e54a319c --- /dev/null +++ b/internal/otel_collector/exporter/loggingexporter/config.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loggingexporter + +import ( + "go.opentelemetry.io/collector/config" +) + +// Config defines configuration for logging exporter. +type Config struct { + config.ExporterSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + + // LogLevel defines log level of the logging exporter; options are debug, info, warn, error. + LogLevel string `mapstructure:"loglevel"` + + // SamplingInitial defines how many samples are initially logged during each second. + SamplingInitial int `mapstructure:"sampling_initial"` + + // SamplingThereafter defines the sampling rate after the initial samples are logged. + SamplingThereafter int `mapstructure:"sampling_thereafter"` +} + +var _ config.Exporter = (*Config)(nil) + +// Validate checks if the exporter configuration is valid +func (cfg *Config) Validate() error { + return nil +} diff --git a/internal/otel_collector/exporter/loggingexporter/doc.go b/internal/otel_collector/exporter/loggingexporter/doc.go new file mode 100644 index 00000000000..d6652178b27 --- /dev/null +++ b/internal/otel_collector/exporter/loggingexporter/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package loggingexporter exports data to console as logs. +package loggingexporter diff --git a/internal/otel_collector/exporter/loggingexporter/factory.go b/internal/otel_collector/exporter/loggingexporter/factory.go new file mode 100644 index 00000000000..d7a3bcf502f --- /dev/null +++ b/internal/otel_collector/exporter/loggingexporter/factory.go @@ -0,0 +1,108 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loggingexporter + +import ( + "context" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +const ( + // The value of "type" key in configuration. + typeStr = "logging" + defaultSamplingInitial = 2 + defaultSamplingThereafter = 500 +) + +// NewFactory creates a factory for Logging exporter +func NewFactory() component.ExporterFactory { + return exporterhelper.NewFactory( + typeStr, + createDefaultConfig, + exporterhelper.WithTraces(createTracesExporter), + exporterhelper.WithMetrics(createMetricsExporter), + exporterhelper.WithLogs(createLogsExporter)) +} + +func createDefaultConfig() config.Exporter { + return &Config{ + ExporterSettings: config.NewExporterSettings(config.NewID(typeStr)), + LogLevel: "info", + SamplingInitial: defaultSamplingInitial, + SamplingThereafter: defaultSamplingThereafter, + } +} + +func createTracesExporter(_ context.Context, set component.ExporterCreateSettings, config config.Exporter) (component.TracesExporter, error) { + cfg := config.(*Config) + + exporterLogger, err := createLogger(cfg) + if err != nil { + return nil, err + } + + return newTracesExporter(config, cfg.LogLevel, exporterLogger, set) +} + +func createMetricsExporter(_ context.Context, set component.ExporterCreateSettings, config config.Exporter) (component.MetricsExporter, error) { + cfg := config.(*Config) + + exporterLogger, err := createLogger(cfg) + if err != nil { + return nil, err + } + + return newMetricsExporter(config, cfg.LogLevel, exporterLogger, set) +} + +func createLogsExporter(_ context.Context, set component.ExporterCreateSettings, config config.Exporter) (component.LogsExporter, error) { + cfg := config.(*Config) + + exporterLogger, err := createLogger(cfg) + if err != nil { + return nil, err + } + + return newLogsExporter(config, cfg.LogLevel, exporterLogger, set) +} + +func createLogger(cfg *Config) (*zap.Logger, error) { + var level zapcore.Level + err := (&level).UnmarshalText([]byte(cfg.LogLevel)) + if err != nil { + return nil, err + } + + // We take development config as the base since it matches the purpose + // of logging exporter being used for debugging reasons (so e.g. console encoder) + conf := zap.NewDevelopmentConfig() + conf.Level = zap.NewAtomicLevelAt(level) + conf.Sampling = &zap.SamplingConfig{ + Initial: cfg.SamplingInitial, + Thereafter: cfg.SamplingThereafter, + } + + logginglogger, err := conf.Build() + if err != nil { + return nil, err + } + return logginglogger, nil +} diff --git a/internal/otel_collector/exporter/loggingexporter/known_sync_error.go b/internal/otel_collector/exporter/loggingexporter/known_sync_error.go new file mode 100644 index 00000000000..cad34d12b3c --- /dev/null +++ b/internal/otel_collector/exporter/loggingexporter/known_sync_error.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows +// +build !windows + +package loggingexporter + +import ( + "syscall" +) + +// knownSyncError returns true if the given error is one of the known +// non-actionable errors returned by Sync on Linux and macOS: +// +// Linux: +// - sync /dev/stdout: invalid argument +// +// macOS: +// - sync /dev/stdout: inappropriate ioctl for device +// +func knownSyncError(err error) bool { + switch err { + case syscall.EINVAL, syscall.ENOTSUP, syscall.ENOTTY: + return true + } + return false +} diff --git a/internal/otel_collector/exporter/loggingexporter/known_sync_error_windows.go b/internal/otel_collector/exporter/loggingexporter/known_sync_error_windows.go new file mode 100644 index 00000000000..e2d38a098b8 --- /dev/null +++ b/internal/otel_collector/exporter/loggingexporter/known_sync_error_windows.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build windows +// +build windows + +package loggingexporter + +import "golang.org/x/sys/windows" + +// knownSyncError returns true if the given error is one of the known +// non-actionable errors returned by Sync on Windows: +// +// - sync /dev/stderr: The handle is invalid. +// +func knownSyncError(err error) bool { + return err == windows.ERROR_INVALID_HANDLE +} diff --git a/internal/otel_collector/exporter/loggingexporter/logging_exporter.go b/internal/otel_collector/exporter/loggingexporter/logging_exporter.go new file mode 100644 index 00000000000..daf0e49a95a --- /dev/null +++ b/internal/otel_collector/exporter/loggingexporter/logging_exporter.go @@ -0,0 +1,159 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loggingexporter + +import ( + "context" + "os" + "strings" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/internal/otlptext" + "go.opentelemetry.io/collector/model/pdata" +) + +type loggingExporter struct { + logger *zap.Logger + debug bool + logsMarshaler pdata.LogsMarshaler + metricsMarshaler pdata.MetricsMarshaler + tracesMarshaler pdata.TracesMarshaler +} + +func (s *loggingExporter) pushTraces(_ context.Context, td pdata.Traces) error { + s.logger.Info("TracesExporter", zap.Int("#spans", td.SpanCount())) + + if !s.debug { + return nil + } + + buf, err := s.tracesMarshaler.MarshalTraces(td) + if err != nil { + return err + } + s.logger.Debug(string(buf)) + return nil +} + +func (s *loggingExporter) pushMetrics(_ context.Context, md pdata.Metrics) error { + s.logger.Info("MetricsExporter", zap.Int("#metrics", md.MetricCount())) + + if !s.debug { + return nil + } + + buf, err := s.metricsMarshaler.MarshalMetrics(md) + if err != nil { + return err + } + s.logger.Debug(string(buf)) + return nil +} + +func (s *loggingExporter) pushLogs(_ context.Context, ld pdata.Logs) error { + s.logger.Info("LogsExporter", zap.Int("#logs", ld.LogRecordCount())) + + if !s.debug { + return nil + } + + buf, err := s.logsMarshaler.MarshalLogs(ld) + if err != nil { + return err + } + s.logger.Debug(string(buf)) + return nil +} + +func newLoggingExporter(level string, logger *zap.Logger) *loggingExporter { + return &loggingExporter{ + debug: strings.ToLower(level) == "debug", + logger: logger, + logsMarshaler: otlptext.NewTextLogsMarshaler(), + metricsMarshaler: otlptext.NewTextMetricsMarshaler(), + tracesMarshaler: otlptext.NewTextTracesMarshaler(), + } +} + +// newTracesExporter creates an exporter.TracesExporter that just drops the +// received data and logs debugging messages. +func newTracesExporter(config config.Exporter, level string, logger *zap.Logger, set component.ExporterCreateSettings) (component.TracesExporter, error) { + s := newLoggingExporter(level, logger) + return exporterhelper.NewTracesExporter( + config, + set, + s.pushTraces, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + // Disable Timeout/RetryOnFailure and SendingQueue + exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithRetry(exporterhelper.RetrySettings{Enabled: false}), + exporterhelper.WithQueue(exporterhelper.QueueSettings{Enabled: false}), + exporterhelper.WithShutdown(loggerSync(logger)), + ) +} + +// newMetricsExporter creates an exporter.MetricsExporter that just drops the +// received data and logs debugging messages. +func newMetricsExporter(config config.Exporter, level string, logger *zap.Logger, set component.ExporterCreateSettings) (component.MetricsExporter, error) { + s := newLoggingExporter(level, logger) + return exporterhelper.NewMetricsExporter( + config, + set, + s.pushMetrics, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + // Disable Timeout/RetryOnFailure and SendingQueue + exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithRetry(exporterhelper.RetrySettings{Enabled: false}), + exporterhelper.WithQueue(exporterhelper.QueueSettings{Enabled: false}), + exporterhelper.WithShutdown(loggerSync(logger)), + ) +} + +// newLogsExporter creates an exporter.LogsExporter that just drops the +// received data and logs debugging messages. +func newLogsExporter(config config.Exporter, level string, logger *zap.Logger, set component.ExporterCreateSettings) (component.LogsExporter, error) { + s := newLoggingExporter(level, logger) + return exporterhelper.NewLogsExporter( + config, + set, + s.pushLogs, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + // Disable Timeout/RetryOnFailure and SendingQueue + exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithRetry(exporterhelper.RetrySettings{Enabled: false}), + exporterhelper.WithQueue(exporterhelper.QueueSettings{Enabled: false}), + exporterhelper.WithShutdown(loggerSync(logger)), + ) +} + +func loggerSync(logger *zap.Logger) func(context.Context) error { + return func(context.Context) error { + // Currently Sync() return a different error depending on the OS. + // Since these are not actionable ignore them. + err := logger.Sync() + if osErr, ok := err.(*os.PathError); ok { + wrappedErr := osErr.Unwrap() + if knownSyncError(wrappedErr) { + err = nil + } + } + return err + } +} diff --git a/internal/otel_collector/exporter/loggingexporter/testdata/config.yaml b/internal/otel_collector/exporter/loggingexporter/testdata/config.yaml new file mode 100644 index 00000000000..5d825f3e526 --- /dev/null +++ b/internal/otel_collector/exporter/loggingexporter/testdata/config.yaml @@ -0,0 +1,22 @@ +receivers: + nop: + +processors: + nop: + +exporters: + logging: + logging/2: + loglevel: debug + sampling_initial: 10 + sampling_thereafter: 50 + +service: + pipelines: + traces: + receivers: [nop] + processors: [nop] + exporters: [logging] + metrics: + receivers: [nop] + exporters: [logging,logging/2] diff --git a/internal/otel_collector/exporter/otlpexporter/README.md b/internal/otel_collector/exporter/otlpexporter/README.md new file mode 100644 index 00000000000..08d93e3b390 --- /dev/null +++ b/internal/otel_collector/exporter/otlpexporter/README.md @@ -0,0 +1,52 @@ +# OTLP gRPC Exporter + +Exports data via gRPC using [OTLP]( +https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md) +format. By default, this exporter requires TLS and offers queued retry capabilities. + +:warning: OTLP metrics and logs formats are currently marked as "Alpha" and may change in +incompatible way any time. + +Supported pipeline types: traces, metrics + +## Getting Started + +The following settings are required: + +- `endpoint` (no default): host:port to which the exporter is going to send OTLP trace data, +using the gRPC protocol. The valid syntax is described +[here](https://github.com/grpc/grpc/blob/master/doc/naming.md). +If a scheme of `https` is used then client transport security is enabled and overrides the `insecure` setting. + +By default, TLS is enabled: + +- `insecure` (default = `false`): whether to enable client transport security for + the exporter's connection. + +As a result, the following parameters are also required: + +- `cert_file` (no default): path to the TLS cert to use for TLS required connections. Should + only be used if `insecure` is set to false. +- `key_file` (no default): path to the TLS key to use for TLS required connections. Should + only be used if `insecure` is set to false. + +Example: + +```yaml +exporters: + otlp: + endpoint: otelcol2:4317 + cert_file: file.cert + key_file: file.key + otlp/2: + endpoint: otelcol2:4317 + insecure: true +``` + +## Advanced Configuration + +Several helper files are leveraged to provide additional capabilities automatically: + +- [gRPC settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configgrpc/README.md) +- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md) +- [Queuing, retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md) diff --git a/internal/otel_collector/exporter/otlpexporter/cfg-schema.yaml b/internal/otel_collector/exporter/otlpexporter/cfg-schema.yaml new file mode 100644 index 00000000000..abecc7d95cb --- /dev/null +++ b/internal/otel_collector/exporter/otlpexporter/cfg-schema.yaml @@ -0,0 +1,151 @@ +type: '*otlpexporter.Config' +fields: +- name: timeout + type: time.Duration + kind: int64 + default: 5s + doc: | + Timeout is the timeout for every attempt to send data to the backend. +- name: sending_queue + type: exporterhelper.QueueSettings + kind: struct + fields: + - name: enabled + kind: bool + default: true + doc: | + Enabled indicates whether to not enqueue batches before sending to the consumerSender. + - name: num_consumers + kind: int + default: 10 + doc: | + NumConsumers is the number of consumers from the queue. + - name: queue_size + kind: int + default: 5000 + doc: | + QueueSize is the maximum number of batches allowed in queue at a given time. +- name: retry_on_failure + type: exporterhelper.RetrySettings + kind: struct + fields: + - name: enabled + kind: bool + default: true + doc: | + Enabled indicates whether to not retry sending batches in case of export failure. + - name: initial_interval + type: time.Duration + kind: int64 + default: 5s + doc: | + InitialInterval the time to wait after the first failure before retrying. + - name: max_interval + type: time.Duration + kind: int64 + default: 30s + doc: | + MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between + consecutive retries will always be `MaxInterval`. + - name: max_elapsed_time + type: time.Duration + kind: int64 + default: 5m0s + doc: | + MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch. + Once this value is reached, the data is discarded. +- name: endpoint + kind: string + doc: | + The target to which the exporter is going to send traces or metrics, + using the gRPC protocol. The valid syntax is described at + https://github.com/grpc/grpc/blob/master/doc/naming.md. +- name: compression + kind: string + doc: | + The compression key for supported compression types within + collector. Currently the only supported mode is `gzip`. +- name: ca_file + kind: string + doc: | + Path to the CA cert. For a client this verifies the server certificate. + For a server this verifies client certificates. If empty uses system root CA. + (optional) +- name: cert_file + kind: string + doc: | + Path to the TLS cert to use for TLS required connections. (optional) +- name: key_file + kind: string + doc: | + Path to the TLS key to use for TLS required connections. (optional) +- name: insecure + kind: bool + doc: | + In gRPC when set to true, this is used to disable the client transport security. + See https://godoc.org/google.golang.org/grpc#WithInsecure. + In HTTP, this disables verifying the server's certificate chain and host name + (InsecureSkipVerify in the tls Config). Please refer to + https://godoc.org/crypto/tls#Config for more information. + (optional, default false) +- name: server_name_override + kind: string + doc: | + ServerName requested by client for virtual hosting. + This sets the ServerName in the TLSConfig. Please refer to + https://godoc.org/crypto/tls#Config for more information. (optional) +- name: keepalive + type: '*configgrpc.KeepaliveClientConfig' + kind: ptr + doc: | + The keepalive parameters for gRPC client. See grpc.WithKeepaliveParams + (https://godoc.org/google.golang.org/grpc#WithKeepaliveParams). + fields: + - name: time + type: time.Duration + kind: int64 + - name: timeout + type: time.Duration + kind: int64 + - name: permit_without_stream + kind: bool +- name: read_buffer_size + kind: int + doc: | + ReadBufferSize for gRPC client. See grpc.WithReadBufferSize + (https://godoc.org/google.golang.org/grpc#WithReadBufferSize). +- name: write_buffer_size + kind: int + default: 524288 + doc: | + WriteBufferSize for gRPC gRPC. See grpc.WithWriteBufferSize + (https://godoc.org/google.golang.org/grpc#WithWriteBufferSize). +- name: wait_for_ready + kind: bool + doc: | + WaitForReady parameter configures client to wait for ready state before sending data. + (https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md) +- name: headers + type: map[string]string + kind: map + doc: | + The headers associated with gRPC requests. +- name: per_rpc_auth + type: '*configgrpc.PerRPCAuthConfig' + kind: ptr + doc: | + PerRPCAuth parameter configures the client to send authentication data on a per-RPC basis. + fields: + - name: type + kind: string + doc: | + AuthType represents the authentication type to use. Currently, only 'bearer' is supported. + - name: bearer_token + kind: string + doc: | + BearerToken specifies the bearer token to use for every RPC. +- name: balancer_name + kind: string + doc: | + Sets the balancer in grpclb_policy to discover the servers. Default is pick_first + https://github.com/grpc/grpc-go/blob/master/examples/features/load_balancing/README.md diff --git a/internal/otel_collector/exporter/otlpexporter/config.go b/internal/otel_collector/exporter/otlpexporter/config.go new file mode 100644 index 00000000000..90982935eff --- /dev/null +++ b/internal/otel_collector/exporter/otlpexporter/config.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpexporter + +import ( + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +// Config defines configuration for OpenCensus exporter. +type Config struct { + config.ExporterSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + exporterhelper.TimeoutSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + exporterhelper.QueueSettings `mapstructure:"sending_queue"` + exporterhelper.RetrySettings `mapstructure:"retry_on_failure"` + + configgrpc.GRPCClientSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. +} + +var _ config.Exporter = (*Config)(nil) + +// Validate checks if the exporter configuration is valid +func (cfg *Config) Validate() error { + return nil +} diff --git a/internal/otel_collector/exporter/otlpexporter/doc.go b/internal/otel_collector/exporter/otlpexporter/doc.go new file mode 100644 index 00000000000..41d4b7662fa --- /dev/null +++ b/internal/otel_collector/exporter/otlpexporter/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package otlpexporter exports data by using the OTLP format to a gPRC endpoint. +package otlpexporter diff --git a/internal/otel_collector/exporter/otlpexporter/factory.go b/internal/otel_collector/exporter/otlpexporter/factory.go new file mode 100644 index 00000000000..bb8f89bbf48 --- /dev/null +++ b/internal/otel_collector/exporter/otlpexporter/factory.go @@ -0,0 +1,122 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpexporter + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +const ( + // The value of "type" key in configuration. + typeStr = "otlp" +) + +// NewFactory creates a factory for OTLP exporter. +func NewFactory() component.ExporterFactory { + return exporterhelper.NewFactory( + typeStr, + createDefaultConfig, + exporterhelper.WithTraces(createTracesExporter), + exporterhelper.WithMetrics(createMetricsExporter), + exporterhelper.WithLogs(createLogsExporter)) +} + +func createDefaultConfig() config.Exporter { + return &Config{ + ExporterSettings: config.NewExporterSettings(config.NewID(typeStr)), + TimeoutSettings: exporterhelper.DefaultTimeoutSettings(), + RetrySettings: exporterhelper.DefaultRetrySettings(), + QueueSettings: exporterhelper.DefaultQueueSettings(), + GRPCClientSettings: configgrpc.GRPCClientSettings{ + Headers: map[string]string{}, + // We almost read 0 bytes, so no need to tune ReadBufferSize. + WriteBufferSize: 512 * 1024, + }, + } +} + +func createTracesExporter( + _ context.Context, + set component.ExporterCreateSettings, + cfg config.Exporter, +) (component.TracesExporter, error) { + oce, err := newExporter(cfg) + if err != nil { + return nil, err + } + oCfg := cfg.(*Config) + return exporterhelper.NewTracesExporter( + cfg, + set, + oce.pushTraces, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + exporterhelper.WithTimeout(oCfg.TimeoutSettings), + exporterhelper.WithRetry(oCfg.RetrySettings), + exporterhelper.WithQueue(oCfg.QueueSettings), + exporterhelper.WithStart(oce.start), + exporterhelper.WithShutdown(oce.shutdown)) +} + +func createMetricsExporter( + _ context.Context, + set component.ExporterCreateSettings, + cfg config.Exporter, +) (component.MetricsExporter, error) { + oce, err := newExporter(cfg) + if err != nil { + return nil, err + } + oCfg := cfg.(*Config) + return exporterhelper.NewMetricsExporter( + cfg, + set, + oce.pushMetrics, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + exporterhelper.WithTimeout(oCfg.TimeoutSettings), + exporterhelper.WithRetry(oCfg.RetrySettings), + exporterhelper.WithQueue(oCfg.QueueSettings), + exporterhelper.WithStart(oce.start), + exporterhelper.WithShutdown(oce.shutdown), + ) +} + +func createLogsExporter( + _ context.Context, + set component.ExporterCreateSettings, + cfg config.Exporter, +) (component.LogsExporter, error) { + oce, err := newExporter(cfg) + if err != nil { + return nil, err + } + oCfg := cfg.(*Config) + return exporterhelper.NewLogsExporter( + cfg, + set, + oce.pushLogs, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + exporterhelper.WithTimeout(oCfg.TimeoutSettings), + exporterhelper.WithRetry(oCfg.RetrySettings), + exporterhelper.WithQueue(oCfg.QueueSettings), + exporterhelper.WithStart(oce.start), + exporterhelper.WithShutdown(oce.shutdown), + ) +} diff --git a/internal/otel_collector/exporter/otlpexporter/otlp.go b/internal/otel_collector/exporter/otlpexporter/otlp.go new file mode 100644 index 00000000000..53ea09eb40d --- /dev/null +++ b/internal/otel_collector/exporter/otlpexporter/otlp.go @@ -0,0 +1,229 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpexporter + +import ( + "context" + "errors" + "fmt" + "time" + + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/model/otlpgrpc" + "go.opentelemetry.io/collector/model/pdata" +) + +type exporter struct { + // Input configuration. + config *Config + w *grpcSender +} + +// Crete new exporter and start it. The exporter will begin connecting but +// this function may return before the connection is established. +func newExporter(cfg config.Exporter) (*exporter, error) { + oCfg := cfg.(*Config) + + if oCfg.Endpoint == "" { + return nil, errors.New("OTLP exporter config requires an Endpoint") + } + + return &exporter{config: oCfg}, nil +} + +// start actually creates the gRPC connection. The client construction is deferred till this point as this +// is the only place we get hold of Extensions which are required to construct auth round tripper. +func (e *exporter) start(_ context.Context, host component.Host) (err error) { + e.w, err = newGrpcSender(e.config, host.GetExtensions()) + return +} + +func (e *exporter) shutdown(context.Context) error { + return e.w.stop() +} + +func (e *exporter) pushTraces(ctx context.Context, td pdata.Traces) error { + if err := e.w.exportTrace(ctx, td); err != nil { + return fmt.Errorf("failed to push trace data via OTLP exporter: %w", err) + } + return nil +} + +func (e *exporter) pushMetrics(ctx context.Context, md pdata.Metrics) error { + if err := e.w.exportMetrics(ctx, md); err != nil { + return fmt.Errorf("failed to push metrics data via OTLP exporter: %w", err) + } + return nil +} + +func (e *exporter) pushLogs(ctx context.Context, ld pdata.Logs) error { + if err := e.w.exportLogs(ctx, ld); err != nil { + return fmt.Errorf("failed to push log data via OTLP exporter: %w", err) + } + return nil +} + +type grpcSender struct { + // gRPC clients and connection. + traceExporter otlpgrpc.TracesClient + metricExporter otlpgrpc.MetricsClient + logExporter otlpgrpc.LogsClient + clientConn *grpc.ClientConn + metadata metadata.MD + callOptions []grpc.CallOption +} + +func newGrpcSender(config *Config, ext map[config.ComponentID]component.Extension) (*grpcSender, error) { + dialOpts, err := config.GRPCClientSettings.ToDialOptions(ext) + if err != nil { + return nil, err + } + + var clientConn *grpc.ClientConn + if clientConn, err = grpc.Dial(config.GRPCClientSettings.SanitizedEndpoint(), dialOpts...); err != nil { + return nil, err + } + + gs := &grpcSender{ + traceExporter: otlpgrpc.NewTracesClient(clientConn), + metricExporter: otlpgrpc.NewMetricsClient(clientConn), + logExporter: otlpgrpc.NewLogsClient(clientConn), + clientConn: clientConn, + metadata: metadata.New(config.GRPCClientSettings.Headers), + callOptions: []grpc.CallOption{ + grpc.WaitForReady(config.GRPCClientSettings.WaitForReady), + }, + } + return gs, nil +} + +func (gs *grpcSender) stop() error { + return gs.clientConn.Close() +} + +func (gs *grpcSender) exportTrace(ctx context.Context, td pdata.Traces) error { + _, err := gs.traceExporter.Export(gs.enhanceContext(ctx), td, gs.callOptions...) + return processError(err) +} + +func (gs *grpcSender) exportMetrics(ctx context.Context, md pdata.Metrics) error { + _, err := gs.metricExporter.Export(gs.enhanceContext(ctx), md, gs.callOptions...) + return processError(err) +} + +func (gs *grpcSender) exportLogs(ctx context.Context, ld pdata.Logs) error { + _, err := gs.logExporter.Export(gs.enhanceContext(ctx), ld, gs.callOptions...) + return processError(err) +} + +func (gs *grpcSender) enhanceContext(ctx context.Context) context.Context { + if gs.metadata.Len() > 0 { + return metadata.NewOutgoingContext(ctx, gs.metadata) + } + return ctx +} + +// Send a trace or metrics request to the server. "perform" function is expected to make +// the actual gRPC unary call that sends the request. This function implements the +// common OTLP logic around request handling such as retries and throttling. +func processError(err error) error { + if err == nil { + // Request is successful, we are done. + return nil + } + + // We have an error, check gRPC status code. + + st := status.Convert(err) + if st.Code() == codes.OK { + // Not really an error, still success. + return nil + } + + // Now, this is this a real error. + + if !shouldRetry(st.Code()) { + // It is not a retryable error, we should not retry. + return consumererror.Permanent(err) + } + + // Need to retry. + + // Check if server returned throttling information. + throttleDuration := getThrottleDuration(st) + if throttleDuration != 0 { + return exporterhelper.NewThrottleRetry(err, throttleDuration) + } + + return err +} + +func shouldRetry(code codes.Code) bool { + switch code { + case codes.OK: + // Success. This function should not be called for this code, the best we + // can do is tell the caller not to retry. + return false + + case codes.Canceled, + codes.DeadlineExceeded, + codes.PermissionDenied, + codes.Unauthenticated, + codes.ResourceExhausted, + codes.Aborted, + codes.OutOfRange, + codes.Unavailable, + codes.DataLoss: + // These are retryable errors. + return true + + case codes.Unknown, + codes.InvalidArgument, + codes.NotFound, + codes.AlreadyExists, + codes.FailedPrecondition, + codes.Unimplemented, + codes.Internal: + // These are fatal errors, don't retry. + return false + + default: + // Don't retry on unknown codes. + return false + } +} + +func getThrottleDuration(status *status.Status) time.Duration { + // See if throttling information is available. + for _, detail := range status.Details() { + if t, ok := detail.(*errdetails.RetryInfo); ok { + if t.RetryDelay.Seconds > 0 || t.RetryDelay.Nanos > 0 { + // We are throttled. Wait before retrying as requested by the server. + return time.Duration(t.RetryDelay.Seconds)*time.Second + time.Duration(t.RetryDelay.Nanos)*time.Nanosecond + } + return 0 + } + } + return 0 +} diff --git a/internal/otel_collector/exporter/otlpexporter/testdata/config.yaml b/internal/otel_collector/exporter/otlpexporter/testdata/config.yaml new file mode 100644 index 00000000000..1a9679f87af --- /dev/null +++ b/internal/otel_collector/exporter/otlpexporter/testdata/config.yaml @@ -0,0 +1,44 @@ +extensions: + nop: + +receivers: + nop: + +processors: + nop: + +exporters: + otlp: + otlp/2: + endpoint: "1.2.3.4:1234" + compression: "on" + ca_file: /var/lib/mycert.pem + timeout: 10s + sending_queue: + enabled: true + num_consumers: 2 + queue_size: 10 + retry_on_failure: + enabled: true + initial_interval: 10s + max_interval: 60s + max_elapsed_time: 10m + auth: + authenticator: nop + headers: + "can you have a . here?": "F0000000-0000-0000-0000-000000000000" + header1: 234 + another: "somevalue" + keepalive: + time: 20s + timeout: 30s + permit_without_stream: true + balancer_name: "round_robin" + +service: + extensions: [nop] + pipelines: + traces: + receivers: [nop] + processors: [nop] + exporters: [otlp] diff --git a/internal/otel_collector/exporter/otlpexporter/testdata/test_cert.pem b/internal/otel_collector/exporter/otlpexporter/testdata/test_cert.pem new file mode 100644 index 00000000000..b3842e597a2 --- /dev/null +++ b/internal/otel_collector/exporter/otlpexporter/testdata/test_cert.pem @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE----- +MIICpDCCAYwCCQC5oaFsqLW3GTANBgkqhkiG9w0BAQsFADAUMRIwEAYDVQQDDAls +b2NhbGhvc3QwHhcNMjEwNzE0MDAxMzU2WhcNMzEwNzEyMDAxMzU2WjAUMRIwEAYD +VQQDDAlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDO +mKaE1qg5VLMwaUnSzufT23rRJFbuy/HDXwsH63yZVSsISQkGjkBYBgrqAMtVnsI/ +l4gXtBWkZtJFs68Sbo9ps3W0PdB5+d12R5NUNA1rkZtx3jtEN33dpGhifug/TIZe +7Zr0G1z6gNoaEezk0Jpg4KsH7QpIeHPRhIZMyWeqddgD/qL4/ukaU4NOORuF3WoT +oo2LpI3jUq66mz2N2Inq0V/OX7BYB4Ur6EtjWh2baiUuw9fq+oLUlgZd6ypnugC/ ++YfgYqvWtRntmEr0Z+O4Kz81P2IpH/0h1RFhWyK6thVGa9cx6aseCp3V2cMXfGfc +z4n3Uvz87v+bZvGbcse/AgMBAAEwDQYJKoZIhvcNAQELBQADggEBAAlvNBNoqXUQ +ohR0eozIHGeJ94U7WK5zXf2NSvmRlwHzHXvUq6GKd+8Bv1foMjI6OpSOZmjtRGsc +rWET1WjSyQddRfqYazhWp1IyYu5LfATwPS+RXJAkWixKVfG+Ta2x6u+aT/bSZwEg +NwRerc6pyqv5UG8Z7Pe1kAxbgOwZv5KXAewIgTSbEkmIp1Dg8GhGeWD5pjYNCkJV +Na2KMAUWP3PeQzdSBKmBNpsRUALuSTxb5u7pl+PA7FLInTtDeyZn8xpO1GPBhbJE +trDbmTbj5YexOXEaQtGtZ6fwRw2jnUm8nqtXozxIomnVTBO8vLmZAUgyJ71trRw0 +gE9tH5Ndlug= +-----END CERTIFICATE----- diff --git a/internal/otel_collector/exporter/otlpexporter/testdata/test_key.pem b/internal/otel_collector/exporter/otlpexporter/testdata/test_key.pem new file mode 100644 index 00000000000..dedfad3df6e --- /dev/null +++ b/internal/otel_collector/exporter/otlpexporter/testdata/test_key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDOmKaE1qg5VLMw +aUnSzufT23rRJFbuy/HDXwsH63yZVSsISQkGjkBYBgrqAMtVnsI/l4gXtBWkZtJF +s68Sbo9ps3W0PdB5+d12R5NUNA1rkZtx3jtEN33dpGhifug/TIZe7Zr0G1z6gNoa +Eezk0Jpg4KsH7QpIeHPRhIZMyWeqddgD/qL4/ukaU4NOORuF3WoToo2LpI3jUq66 +mz2N2Inq0V/OX7BYB4Ur6EtjWh2baiUuw9fq+oLUlgZd6ypnugC/+YfgYqvWtRnt +mEr0Z+O4Kz81P2IpH/0h1RFhWyK6thVGa9cx6aseCp3V2cMXfGfcz4n3Uvz87v+b +ZvGbcse/AgMBAAECggEADeR39iDVKR3H+u5pl3JwZm+w35V4/w/ZzxB6FmtAcrMm +dKUspTM1onWtkDTDd5t4ZnxTG3zxo5+Cbkt571xd6na16Ivrk/g4aza+8n+Zk200 +LcEK7ThqD1h56H2uMmt78bA6pkWcx/+YKv6flndsmi0hcyP+eAcZirJFsa4teWna +P6rhI9zThc9OcecqGZIlmzJQ4cLbIO86QqkWW6yjKYg6riOb2g+i3e97ZngMCTcV +lni+sksLlXBNKPqh1AkiUFe4pInRBh4LGQ5rNSYswEqlQY0iW0u4Hs3HNou0On+8 +1T8m5wzKQ+23AN+vVRJ/MHssQiB/TPK92jXVgEz6eQKBgQD2GEb7NzDIxsAQZBQo +tt3jYitNcAEqMWeT7wxCMMue4wIrT6Fp6NuG5NMVqLglzx72m6TXg7YzZxPrAnlH +jblWI4sxwVC8BjjYyGud7qMuhUIZmI8aS9HuYW0ODSxkcpVVXd4HDUYKg7PafAkl +cj745E5KGD+qW44KASTTQ1SwRQKBgQDW6WLp/nPVPO5YEK4nzS7b1RRC8ypHiKd6 +LzhA2izgcsmO3F3Y5ZZ5rzeFbjgZiGFTUB/r1mgomI8kZyIGP1AN6o8oY9I89gHY +/DEEagIsFK5jAEoMeN0qbgqasOXpi+uUHCNidWa7OWOL9Rsh7dyVT54xcqMC2Qak +Vpoy5miiMwKBgQDuOHH9nF9M+5fQRhB9mQcRpWXlgBagkVKCkVR8fl+dXoIrCtpl +e1OGMNtki/42G1kNv3zCYm1tNMrDI5HjAf32tFF5yHguipdcwiXqq6aq0bQ6ssNT +4TFGYGkAwR/H3GNST5stmFvEsdjYFlmENiNfKyHd97spXZcReCn9l5/TQQKBgDRG +PpYWG4zBrmPjYskxonU8ZhpG1YDi34Hb3H4B06qgoSBLv9QTPD/K++FLxv+G6c1/ +DtSpqVo+iYrcPy1v1wQbisjTRv8nA5oI9c9SDcc1HJneJyTTfVBlxdSMtM/TBfFX +ys+XKO7fbbRMYVYmamIzJJJ4hOgba/8rRYSeANN7AoGBAMDdrT+ig3aDMratbAvY +lqsfN3AtxoZ+ZVQYyUbzTSZPZ/to9eNuBzhRKcQ3QfG95nrHb7OnWHa7+1kc4p/Q +jMgzJgRpajlES+F3CCMPgJIJg7Ev+yiSCJLP9ZOsC+E96bK265hUcDyCXwb3Wzmg +4L9sc1QsQW80QO/RnaEzGO51 +-----END PRIVATE KEY----- diff --git a/internal/otel_collector/exporter/otlphttpexporter/README.md b/internal/otel_collector/exporter/otlphttpexporter/README.md new file mode 100644 index 00000000000..32d515a088a --- /dev/null +++ b/internal/otel_collector/exporter/otlphttpexporter/README.md @@ -0,0 +1,52 @@ +# OTLP/HTTP Exporter + +Exports traces and/or metrics via HTTP using [OTLP]( +https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md) +format. + +*Important: OTLP metrics format is currently marked as "Alpha" and may change in +incompatible way any time.* + +The following settings are required: + +- `endpoint` (no default): The target base URL to send data to (e.g.: https://example.com:4318). + To send each signal a corresponding path will be added to this base URL, i.e. for traces + "/v1/traces" will appended, for metrics "/v1/metrics" will be appended, for logs + "/v1/logs" will be appended. + +The following settings can be optionally configured: + +- `traces_endpoint` (no default): The target URL to send trace data to (e.g.: https://example.com:4318/v1/traces). + If this setting is present the `endpoint` setting is ignored for traces. +- `metrics_endpoint` (no default): The target URL to send metric data to (e.g.: https://example.com:4318/v1/metrics). + If this setting is present the `endpoint` setting is ignored for metrics. +- `logs_endpoint` (no default): The target URL to send log data to (e.g.: https://example.com:4318/v1/logs). + If this setting is present the `endpoint` setting is ignored logs. + +- `insecure` (default = false): when set to true disables verifying the server's + certificate chain and host name. The connection is still encrypted but server identity + is not verified. +- `ca_file` path to the CA cert. For a client this verifies the server certificate. Should + only be used if `insecure` is set to false. +- `cert_file` path to the TLS cert to use for TLS required connections. Should + only be used if `insecure` is set to false. +- `key_file` path to the TLS key to use for TLS required connections. Should + only be used if `insecure` is set to false. + +- `compression` (default = none): Compression type to use (only gzip is supported today) + +- `timeout` (default = 30s): HTTP request time limit. For details see https://golang.org/pkg/net/http/#Client +- `read_buffer_size` (default = 0): ReadBufferSize for HTTP client. +- `write_buffer_size` (default = 512 * 1024): WriteBufferSize for HTTP client. + + +Example: + +```yaml +exporters: + otlphttp: + endpoint: https://example.com:4318/v1/traces +``` + +The full list of settings exposed for this exporter are documented [here](./config.go) +with detailed sample configurations [here](./testdata/config.yaml). diff --git a/internal/otel_collector/exporter/otlphttpexporter/config.go b/internal/otel_collector/exporter/otlphttpexporter/config.go new file mode 100644 index 00000000000..ccf7f52a162 --- /dev/null +++ b/internal/otel_collector/exporter/otlphttpexporter/config.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlphttpexporter + +import ( + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +// Config defines configuration for OTLP/HTTP exporter. +type Config struct { + config.ExporterSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + confighttp.HTTPClientSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + exporterhelper.QueueSettings `mapstructure:"sending_queue"` + exporterhelper.RetrySettings `mapstructure:"retry_on_failure"` + + // The URL to send traces to. If omitted the Endpoint + "/v1/traces" will be used. + TracesEndpoint string `mapstructure:"traces_endpoint"` + + // The URL to send metrics to. If omitted the Endpoint + "/v1/metrics" will be used. + MetricsEndpoint string `mapstructure:"metrics_endpoint"` + + // The URL to send logs to. If omitted the Endpoint + "/v1/logs" will be used. + LogsEndpoint string `mapstructure:"logs_endpoint"` + + // The compression key for supported compression types within + // collector. Currently the only supported mode is `gzip`. + Compression string `mapstructure:"compression"` +} + +var _ config.Exporter = (*Config)(nil) + +// Validate checks if the exporter configuration is valid +func (cfg *Config) Validate() error { + return nil +} diff --git a/internal/otel_collector/exporter/otlphttpexporter/doc.go b/internal/otel_collector/exporter/otlphttpexporter/doc.go new file mode 100644 index 00000000000..816df31ac35 --- /dev/null +++ b/internal/otel_collector/exporter/otlphttpexporter/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package otlphttpexporter exports data by using the OTLP format to an HTTP endpoint. +package otlphttpexporter diff --git a/internal/otel_collector/exporter/otlphttpexporter/factory.go b/internal/otel_collector/exporter/otlphttpexporter/factory.go new file mode 100644 index 00000000000..733714ea9db --- /dev/null +++ b/internal/otel_collector/exporter/otlphttpexporter/factory.go @@ -0,0 +1,157 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlphttpexporter + +import ( + "context" + "fmt" + "net/url" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +const ( + // The value of "type" key in configuration. + typeStr = "otlphttp" +) + +// NewFactory creates a factory for OTLP exporter. +func NewFactory() component.ExporterFactory { + return exporterhelper.NewFactory( + typeStr, + createDefaultConfig, + exporterhelper.WithTraces(createTracesExporter), + exporterhelper.WithMetrics(createMetricsExporter), + exporterhelper.WithLogs(createLogsExporter)) +} + +func createDefaultConfig() config.Exporter { + return &Config{ + ExporterSettings: config.NewExporterSettings(config.NewID(typeStr)), + RetrySettings: exporterhelper.DefaultRetrySettings(), + QueueSettings: exporterhelper.DefaultQueueSettings(), + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: "", + Timeout: 30 * time.Second, + Headers: map[string]string{}, + // We almost read 0 bytes, so no need to tune ReadBufferSize. + WriteBufferSize: 512 * 1024, + }, + } +} + +func composeSignalURL(oCfg *Config, signalOverrideURL string, signalName string) (string, error) { + switch { + case signalOverrideURL != "": + _, err := url.Parse(signalOverrideURL) + if err != nil { + return "", fmt.Errorf("%s_endpoint must be a valid URL", signalName) + } + return signalOverrideURL, nil + case oCfg.Endpoint == "": + return "", fmt.Errorf("either endpoint or %s_endpoint must be specified", signalName) + default: + return oCfg.Endpoint + "/v1/" + signalName, nil + } +} + +func createTracesExporter( + _ context.Context, + set component.ExporterCreateSettings, + cfg config.Exporter, +) (component.TracesExporter, error) { + oce, err := newExporter(cfg, set.Logger) + if err != nil { + return nil, err + } + oCfg := cfg.(*Config) + + oce.tracesURL, err = composeSignalURL(oCfg, oCfg.TracesEndpoint, "traces") + if err != nil { + return nil, err + } + + return exporterhelper.NewTracesExporter( + cfg, + set, + oce.pushTraces, + exporterhelper.WithStart(oce.start), + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + // explicitly disable since we rely on http.Client timeout logic. + exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithRetry(oCfg.RetrySettings), + exporterhelper.WithQueue(oCfg.QueueSettings)) +} + +func createMetricsExporter( + _ context.Context, + set component.ExporterCreateSettings, + cfg config.Exporter, +) (component.MetricsExporter, error) { + oce, err := newExporter(cfg, set.Logger) + if err != nil { + return nil, err + } + oCfg := cfg.(*Config) + + oce.metricsURL, err = composeSignalURL(oCfg, oCfg.MetricsEndpoint, "metrics") + if err != nil { + return nil, err + } + + return exporterhelper.NewMetricsExporter( + cfg, + set, + oce.pushMetrics, + exporterhelper.WithStart(oce.start), + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + // explicitly disable since we rely on http.Client timeout logic. + exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithRetry(oCfg.RetrySettings), + exporterhelper.WithQueue(oCfg.QueueSettings)) +} + +func createLogsExporter( + _ context.Context, + set component.ExporterCreateSettings, + cfg config.Exporter, +) (component.LogsExporter, error) { + oce, err := newExporter(cfg, set.Logger) + if err != nil { + return nil, err + } + oCfg := cfg.(*Config) + + oce.logsURL, err = composeSignalURL(oCfg, oCfg.LogsEndpoint, "logs") + if err != nil { + return nil, err + } + + return exporterhelper.NewLogsExporter( + cfg, + set, + oce.pushLogs, + exporterhelper.WithStart(oce.start), + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + // explicitly disable since we rely on http.Client timeout logic. + exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithRetry(oCfg.RetrySettings), + exporterhelper.WithQueue(oCfg.QueueSettings)) +} diff --git a/internal/otel_collector/exporter/otlphttpexporter/otlp.go b/internal/otel_collector/exporter/otlphttpexporter/otlp.go new file mode 100644 index 00000000000..53da6082acb --- /dev/null +++ b/internal/otel_collector/exporter/otlphttpexporter/otlp.go @@ -0,0 +1,215 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlphttpexporter + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "go.uber.org/zap" + "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/protobuf/proto" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/internal/middleware" + "go.opentelemetry.io/collector/model/otlp" + "go.opentelemetry.io/collector/model/pdata" +) + +type exporter struct { + // Input configuration. + config *Config + client *http.Client + tracesURL string + metricsURL string + logsURL string + logger *zap.Logger +} + +var ( + tracesMarshaler = otlp.NewProtobufTracesMarshaler() + metricsMarshaler = otlp.NewProtobufMetricsMarshaler() + logsMarshaler = otlp.NewProtobufLogsMarshaler() +) + +const ( + headerRetryAfter = "Retry-After" + maxHTTPResponseReadBytes = 64 * 1024 +) + +// Crete new exporter. +func newExporter(cfg config.Exporter, logger *zap.Logger) (*exporter, error) { + oCfg := cfg.(*Config) + + if oCfg.Endpoint != "" { + _, err := url.Parse(oCfg.Endpoint) + if err != nil { + return nil, errors.New("endpoint must be a valid URL") + } + } + + // client construction is deferred to start + return &exporter{ + config: oCfg, + logger: logger, + }, nil +} + +// start actually creates the HTTP client. The client construction is deferred till this point as this +// is the only place we get hold of Extensions which are required to construct auth round tripper. +func (e *exporter) start(_ context.Context, host component.Host) error { + client, err := e.config.HTTPClientSettings.ToClient(host.GetExtensions()) + if err != nil { + return err + } + + if e.config.Compression != "" { + if strings.ToLower(e.config.Compression) == configgrpc.CompressionGzip { + client.Transport = middleware.NewCompressRoundTripper(client.Transport) + } else { + return fmt.Errorf("unsupported compression type %q", e.config.Compression) + } + } + e.client = client + return nil +} + +func (e *exporter) pushTraces(ctx context.Context, td pdata.Traces) error { + request, err := tracesMarshaler.MarshalTraces(td) + if err != nil { + return consumererror.Permanent(err) + } + + return e.export(ctx, e.tracesURL, request) +} + +func (e *exporter) pushMetrics(ctx context.Context, md pdata.Metrics) error { + request, err := metricsMarshaler.MarshalMetrics(md) + if err != nil { + return consumererror.Permanent(err) + } + return e.export(ctx, e.metricsURL, request) +} + +func (e *exporter) pushLogs(ctx context.Context, ld pdata.Logs) error { + request, err := logsMarshaler.MarshalLogs(ld) + if err != nil { + return consumererror.Permanent(err) + } + + return e.export(ctx, e.logsURL, request) +} + +func (e *exporter) export(ctx context.Context, url string, request []byte) error { + e.logger.Debug("Preparing to make HTTP request", zap.String("url", url)) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(request)) + if err != nil { + return consumererror.Permanent(err) + } + req.Header.Set("Content-Type", "application/x-protobuf") + + resp, err := e.client.Do(req) + if err != nil { + return fmt.Errorf("failed to make an HTTP request: %w", err) + } + + defer func() { + // Discard any remaining response body when we are done reading. + io.CopyN(ioutil.Discard, resp.Body, maxHTTPResponseReadBytes) // nolint:errcheck + resp.Body.Close() + }() + + if resp.StatusCode >= 200 && resp.StatusCode <= 299 { + // Request is successful. + return nil + } + + respStatus := readResponse(resp) + + // Format the error message. Use the status if it is present in the response. + var formattedErr error + if respStatus != nil { + formattedErr = fmt.Errorf( + "error exporting items, request to %s responded with HTTP Status Code %d, Message=%s, Details=%v", + url, resp.StatusCode, respStatus.Message, respStatus.Details) + } else { + formattedErr = fmt.Errorf( + "error exporting items, request to %s responded with HTTP Status Code %d", + url, resp.StatusCode) + } + + // Check if the server is overwhelmed. + // See spec https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md#throttling-1 + if resp.StatusCode == http.StatusTooManyRequests || resp.StatusCode == http.StatusServiceUnavailable { + // Fallback to 0 if the Retry-After header is not present. This will trigger the + // default backoff policy by our caller (retry handler). + retryAfter := 0 + if val := resp.Header.Get(headerRetryAfter); val != "" { + if seconds, err2 := strconv.Atoi(val); err2 == nil { + retryAfter = seconds + } + } + // Indicate to our caller to pause for the specified number of seconds. + return exporterhelper.NewThrottleRetry(formattedErr, time.Duration(retryAfter)*time.Second) + } + + if resp.StatusCode == http.StatusBadRequest { + // Report the failure as permanent if the server thinks the request is malformed. + return consumererror.Permanent(formattedErr) + } + + // All other errors are retryable, so don't wrap them in consumererror.Permanent(). + return formattedErr +} + +// Read the response and decode the status.Status from the body. +// Returns nil if the response is empty or cannot be decoded. +func readResponse(resp *http.Response) *status.Status { + var respStatus *status.Status + if resp.StatusCode >= 400 && resp.StatusCode <= 599 { + // Request failed. Read the body. OTLP spec says: + // "Response body for all HTTP 4xx and HTTP 5xx responses MUST be a + // Protobuf-encoded Status message that describes the problem." + maxRead := resp.ContentLength + if maxRead == -1 || maxRead > maxHTTPResponseReadBytes { + maxRead = maxHTTPResponseReadBytes + } + respBytes := make([]byte, maxRead) + n, err := io.ReadFull(resp.Body, respBytes) + if err == nil && n > 0 { + // Decode it as Status struct. See https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md#failures + respStatus = &status.Status{} + err = proto.Unmarshal(respBytes, respStatus) + if err != nil { + respStatus = nil + } + } + } + + return respStatus +} diff --git a/internal/otel_collector/exporter/otlphttpexporter/testdata/config.yaml b/internal/otel_collector/exporter/otlphttpexporter/testdata/config.yaml new file mode 100644 index 00000000000..ca25dfe6e31 --- /dev/null +++ b/internal/otel_collector/exporter/otlphttpexporter/testdata/config.yaml @@ -0,0 +1,38 @@ +receivers: + nop: + +processors: + nop: + +exporters: + otlphttp: + otlphttp/2: + endpoint: "https://1.2.3.4:1234" + insecure: true + ca_file: /var/lib/mycert.pem + cert_file: certfile + key_file: keyfile + timeout: 10s + read_buffer_size: 123 + write_buffer_size: 345 + sending_queue: + enabled: true + num_consumers: 2 + queue_size: 10 + retry_on_failure: + enabled: true + initial_interval: 10s + max_interval: 60s + max_elapsed_time: 10m + headers: + "can you have a . here?": "F0000000-0000-0000-0000-000000000000" + header1: 234 + another: "somevalue" + compression: gzip + +service: + pipelines: + traces: + receivers: [nop] + processors: [nop] + exporters: [otlphttp] diff --git a/internal/otel_collector/exporter/otlphttpexporter/testdata/test_cert.pem b/internal/otel_collector/exporter/otlphttpexporter/testdata/test_cert.pem new file mode 100644 index 00000000000..b2e77b89d49 --- /dev/null +++ b/internal/otel_collector/exporter/otlphttpexporter/testdata/test_cert.pem @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIE6jCCAtICCQDVU4PtqpqADTANBgkqhkiG9w0BAQsFADA3MQswCQYDVQQGEwJV +UzETMBEGA1UECAwKY2FsaWZvcm5pYTETMBEGA1UECgwKb3BlbmNlbnN1czAeFw0x +OTAzMDQxODA3MjZaFw0yMDAzMDMxODA3MjZaMDcxCzAJBgNVBAYTAlVTMRMwEQYD +VQQIDApjYWxpZm9ybmlhMRMwEQYDVQQKDApvcGVuY2Vuc3VzMIICIjANBgkqhkiG +9w0BAQEFAAOCAg8AMIICCgKCAgEAy9JQiAOMzArcdiS4szbTuzg5yYijSSY6SvGj +XMs4/LEFLxgGmFfyHXxoVQzV26lTu/AiUFlZi4JY2qlkZyPwmmmSg4fmzikpVPiC +Vv9pvSIojs8gs0sHaOt40Q8ym43bNt3Mh8rYrs+XMERi6Ol9//j4LnfePkNU5uEo +qC8KQamckaMR6UEHFNunyOwvNBsipgTPldQUPGVnCsNKk8olYGAXS7DR25bgbPli +4T9VCSElsSPAODmyo+2MEDagVXa1vVYxKyO2k6oeBS0lsvdRqRTmGggcg0B/dk+a +H1CL9ful0cu9P3dQif+hfGay8udPkwDLPEq1+WnjJFut3Pmbk3SqUCas5iWt76kK +eKFh4k8fCy4yiaZxzvSbm9+bEBHAl0ZXd8pjvAsBfCKe6G9SBzE1DK4FjWiiEGCb +5dGsyTKr33q3DekLvT3LF8ZeON/13d9toucX9PqG2HDwMP/Fb4WjQIzOc/H9wIak +pf7u6QBDGUiCMmoDrp1d8RsI1RPbEhoywH0YlLmwgf+cr1dU7vlISf576EsGxFz4 ++/sZjIBvZBHn/x0MH+bs4J8V3vMujfDoRdhL07bK7q/AkEALUxljKEfoWeqiuVzK +F9BVv3xNhiua2kgPVbMNWPrQ5uotkNp8IykJ3QOuQ3p5pzxdGfpLd6f8gmJDmcbi +AI9dWTcCAwEAATANBgkqhkiG9w0BAQsFAAOCAgEAVVi4t/Sumre+AGTaU7np9dl2 +tpllbES5ixe6m2uezt5wAzYNNyuQ2mMG2XrSkMy5gvBZRT9nRNSmLV8VEcxZihG0 +YHS5soXnLL3Jdlwxp98WTDPvM1ntxcHyEyqrrg9YDfKn4sOrr5vo2yZzoKwtxtc7 +lue9JormVx7GxMi7NwaUtCbnwAIcqJJpFjt1EhmJOxGqTJPgUvTBdeGvRj30c6fk +pqpUdPbZ7RKPEtbLoMoCBujKnErv+H0G6Vp9WyCHN+Mi9uTMsGwH14cmJjmfwGDC +8/WF4LdlawFnf/arIp9YcVwcP91d4ywyvbuuo2M7qdosQ7k4uRZ3tyggLYShS3RW +BMEhMRDz9dM0oKGF+HnaS824BIh6O6Hn82Vt8uCKS7IbEX99/kkN1KcqqQe6Lwjq +tG/lm4K5yf+FJVDivpZ9mYTvqTBjhTaOp6m3HYSNJfS0hLQVvEuBNXd8bHiXkcLp +rmFOYUWsjxV1Qku3U5Rner0UpB2Fuw9nJcXuDgWG0gjwzAZ83y3du1VIZp0Ad8Vv +IYpaucbImGJszMtNXn3l72K1wvQVIhm9eRwYc3QteJzweHaDsbytZEoS/GhTrZIT +wRe5ZGrjJBJngRANRSm1BH8j6PjLem9mzPb2eytwJJA0lLhUk4vYproVvXcx0vow +5F+5VB1YB8/tbWePmpo= +-----END CERTIFICATE----- diff --git a/internal/otel_collector/extension/README.md b/internal/otel_collector/extension/README.md new file mode 100644 index 00000000000..4f9661e1414 --- /dev/null +++ b/internal/otel_collector/extension/README.md @@ -0,0 +1,32 @@ +# General Information + +Extensions provide capabilities on top of the primary functionality of the +collector. Generally, extensions are used for implementing components that can +be added to the Collector, but which do not require direct access to telemetry +data and are not part of the pipelines (like receivers, processors or +exporters). Example extensions are: Health Check extension that responds to +health check requests or PProf extension that allows fetching Collector's +performance profile. + +Supported service extensions (sorted alphabetically): + +- [Memory Ballast](ballastextension/README.md) +- [zPages](zpagesextension/README.md) + +The [contributors +repository](https://github.com/open-telemetry/opentelemetry-collector-contrib) +may have more extensions that can be added to custom builds of the Collector. + +## Ordering Extensions + +The order extensions are specified for the service is important as this is the +order in which each extension will be started and the reverse order in which they +will be shutdown. The ordering is determined in the `extensions` tag under the +`service` tag in the configuration file, example: + +```yaml +service: + # Extensions specified below are going to be loaded by the service in the + # order given below, and shutdown on reverse order. + extensions: [memory_ballast, zpages] +``` diff --git a/internal/otel_collector/extension/ballastextension/README.md b/internal/otel_collector/extension/ballastextension/README.md new file mode 100644 index 00000000000..78018259681 --- /dev/null +++ b/internal/otel_collector/extension/ballastextension/README.md @@ -0,0 +1,43 @@ +# Memory Ballast + +Memory Ballast extension enables applications to configure memory ballast for the process. For more details see: +- [Go memory ballast blogpost](https://blog.twitch.tv/go-memory-ballast-how-i-learnt-to-stop-worrying-and-love-the-heap-26c2462549a2) +- [Golang issue related to this](https://github.com/golang/go/issues/23044) + +The following settings can be configured: + +- `size_mib` (default = 0, disabled): Is the memory ballast size, in MiB. + Takes higher priority than `size_in_percentage` if both are specified at the same time. +- `size_in_percentage` (default = 0, disabled): Set the memory ballast based on the + total memory in percentage, value range is `1-100`. + It is supported in both containerized(eg, docker, k8s) and physical host environments. + +**How ballast size is calculated with percentage configuration** +When `size_in_percentage` is enabled with the value(1-100), the absolute `ballast_size` will be calculated by +`size_in_percentage * totalMemory / 100`. The `totalMemory` can be retrieved for hosts and containers(in docker, k8s, etc) by the following steps, +1. Look up Memory Cgroup subsystem on the target host or container, find out if there is any total memory limitation has been set for the running collector process. + Check the value in `memory.limit_in_bytes` file under cgroup memory files (eg, `/sys/fs/cgroup/memory/memory.limit_in_bytes`). + +2. If `memory.limit_in_bytes` is positive value other than `9223372036854771712`(`0x7FFFFFFFFFFFF000`). The `ballest_size` + will be calculated by `memory.limit_in_bytes * size_in_percentage / 100`. + If `memory.limit_in_bytes` value is `9223372036854771712`(`0x7FFFFFFFFFFFF000`), it indicates there is no memory limit has + been set for the collector process or the running container in cgroup. Then the `totalMemory` will be determined in next step. + +3. if there is no memory limit set in cgroup for the collector process or container where the collector is running. The total memory will be + calculated by `github.com/shirou/gopsutil/mem`[[link]](https://github.com/shirou/gopsutil/) on `mem.VirtualMemory().total` which is supported in multiple OS systems. + + +Example: +Config that uses 64 Mib of memory for the ballast: +```yaml +extensions: + memory_ballast: + size_mib: 64 +``` + +Config that uses 20% of the total memory for the ballast: +```yaml +extensions: + memory_ballast: + size_in_percentage: 20 +``` diff --git a/internal/otel_collector/extension/ballastextension/config.go b/internal/otel_collector/extension/ballastextension/config.go new file mode 100644 index 00000000000..fac10933375 --- /dev/null +++ b/internal/otel_collector/extension/ballastextension/config.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ballastextension + +import ( + "fmt" + + "go.opentelemetry.io/collector/config" +) + +// Config has the configuration for the ballast extension. +type Config struct { + config.ExtensionSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + + // SizeMiB is the size, in MiB, of the memory ballast + // to be created for this process. + SizeMiB uint64 `mapstructure:"size_mib"` + + // SizeInPercentage is the maximum amount of memory ballast, in %, targeted to be + // allocated. The fixed memory settings SizeMiB has a higher precedence. + SizeInPercentage uint64 `mapstructure:"size_in_percentage"` +} + +// Validate checks if the extension configuration is valid +func (cfg *Config) Validate() error { + // no need to validate less than 0 case for uint64 + if cfg.SizeInPercentage > 100 { + return fmt.Errorf("size_in_percentage is not in range 0 to 100") + } + return nil +} diff --git a/internal/otel_collector/extension/ballastextension/factory.go b/internal/otel_collector/extension/ballastextension/factory.go new file mode 100644 index 00000000000..ac69035d05e --- /dev/null +++ b/internal/otel_collector/extension/ballastextension/factory.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ballastextension + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/extension/extensionhelper" + "go.opentelemetry.io/collector/internal/iruntime" +) + +const ( + // The value of extension "type" in configuration. + typeStr = "memory_ballast" +) + +// memHandler returns the total memory of the target host/vm +var memHandler = iruntime.TotalMemory + +// NewFactory creates a factory for FluentBit extension. +func NewFactory() component.ExtensionFactory { + return extensionhelper.NewFactory( + typeStr, + createDefaultConfig, + createExtension) +} + +func createDefaultConfig() config.Extension { + return &Config{ + ExtensionSettings: config.NewExtensionSettings(config.NewID(typeStr)), + } +} + +func createExtension(_ context.Context, set component.ExtensionCreateSettings, cfg config.Extension) (component.Extension, error) { + return newMemoryBallast(cfg.(*Config), set.Logger, memHandler), nil +} diff --git a/internal/otel_collector/extension/ballastextension/memory_ballast.go b/internal/otel_collector/extension/ballastextension/memory_ballast.go new file mode 100644 index 00000000000..c7a37365ede --- /dev/null +++ b/internal/otel_collector/extension/ballastextension/memory_ballast.go @@ -0,0 +1,73 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ballastextension + +import ( + "context" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" +) + +const megaBytes = 1024 * 1024 + +type MemoryBallast struct { + cfg *Config + logger *zap.Logger + ballast []byte + ballastSizeBytes uint64 + getTotalMem func() (uint64, error) +} + +func (m *MemoryBallast) Start(_ context.Context, _ component.Host) error { + // absolute value supersedes percentage setting + if m.cfg.SizeMiB > 0 { + m.ballastSizeBytes = m.cfg.SizeMiB * megaBytes + } else { + totalMemory, err := m.getTotalMem() + if err != nil { + return err + } + ballastPercentage := m.cfg.SizeInPercentage + m.ballastSizeBytes = ballastPercentage * totalMemory / 100 + } + + if m.ballastSizeBytes > 0 { + m.ballast = make([]byte, m.ballastSizeBytes) + } + + m.logger.Info("Setting memory ballast", zap.Uint32("MiBs", uint32(m.ballastSizeBytes/megaBytes))) + + return nil +} + +func (m *MemoryBallast) Shutdown(_ context.Context) error { + m.ballast = nil + return nil +} + +func newMemoryBallast(cfg *Config, logger *zap.Logger, getTotalMem func() (uint64, error)) *MemoryBallast { + return &MemoryBallast{ + cfg: cfg, + logger: logger, + getTotalMem: getTotalMem, + } +} + +// GetBallastSize returns the current ballast memory setting in bytes +func (m *MemoryBallast) GetBallastSize() uint64 { + return m.ballastSizeBytes +} diff --git a/internal/otel_collector/extension/ballastextension/testdata/config.yaml b/internal/otel_collector/extension/ballastextension/testdata/config.yaml new file mode 100644 index 00000000000..d7cf835602d --- /dev/null +++ b/internal/otel_collector/extension/ballastextension/testdata/config.yaml @@ -0,0 +1,21 @@ +extensions: + memory_ballast: + memory_ballast/1: + size_mib: 123 + size_in_percentage: 20 + +# Data pipeline is required to load the config. +receivers: + nop: +processors: + nop: +exporters: + nop: + +service: + extensions: [memory_ballast/1] + pipelines: + traces: + receivers: [nop] + processors: [nop] + exporters: [nop] diff --git a/internal/otel_collector/extension/ballastextension/testdata/config_invalid.yaml b/internal/otel_collector/extension/ballastextension/testdata/config_invalid.yaml new file mode 100644 index 00000000000..c73a7a9975c --- /dev/null +++ b/internal/otel_collector/extension/ballastextension/testdata/config_invalid.yaml @@ -0,0 +1,19 @@ +extensions: + memory_ballast: + size_in_percentage: 200 + +# Data pipeline is required to load the config. +receivers: + nop: +processors: + nop: +exporters: + nop: + +service: + extensions: [memory_ballast] + pipelines: + traces: + receivers: [nop] + processors: [nop] + exporters: [nop] diff --git a/internal/otel_collector/extension/extensionhelper/factory.go b/internal/otel_collector/extension/extensionhelper/factory.go new file mode 100644 index 00000000000..271cba740a4 --- /dev/null +++ b/internal/otel_collector/extension/extensionhelper/factory.go @@ -0,0 +1,72 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package extensionhelper + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" +) + +// FactoryOption apply changes to ExporterOptions. +type FactoryOption func(o *factory) + +// CreateDefaultConfig is the equivalent of component.ExtensionFactory.CreateDefaultConfig() +type CreateDefaultConfig func() config.Extension + +// CreateServiceExtension is the equivalent of component.ExtensionFactory.CreateExtension() +type CreateServiceExtension func(context.Context, component.ExtensionCreateSettings, config.Extension) (component.Extension, error) + +type factory struct { + cfgType config.Type + createDefaultConfig CreateDefaultConfig + createServiceExtension CreateServiceExtension +} + +// NewFactory returns a component.ExtensionFactory. +func NewFactory( + cfgType config.Type, + createDefaultConfig CreateDefaultConfig, + createServiceExtension CreateServiceExtension, + options ...FactoryOption) component.ExtensionFactory { + f := &factory{ + cfgType: cfgType, + createDefaultConfig: createDefaultConfig, + createServiceExtension: createServiceExtension, + } + for _, opt := range options { + opt(f) + } + return f +} + +// Type gets the type of the Extension config created by this factory. +func (f *factory) Type() config.Type { + return f.cfgType +} + +// CreateDefaultConfig creates the default configuration for processor. +func (f *factory) CreateDefaultConfig() config.Extension { + return f.createDefaultConfig() +} + +// CreateExtension creates a component.TraceExtension based on this config. +func (f *factory) CreateExtension( + ctx context.Context, + set component.ExtensionCreateSettings, + cfg config.Extension) (component.Extension, error) { + return f.createServiceExtension(ctx, set, cfg) +} diff --git a/internal/otel_collector/extension/storage/Makefile b/internal/otel_collector/extension/storage/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/internal/otel_collector/extension/storage/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/internal/otel_collector/extension/storage/README.md b/internal/otel_collector/extension/storage/README.md new file mode 100644 index 00000000000..2214994ad14 --- /dev/null +++ b/internal/otel_collector/extension/storage/README.md @@ -0,0 +1,38 @@ +# Storage + +**Status: under development; This is currently just the interface** + +A storage extension persists state beyond the collector process. Other components can request a storage client from the storage extension and use it to manage state. + +The `storage.Extension` interface extends `component.Extension` by adding the following method: +``` +GetClient(context.Context, component.Kind, config.ComponentID, string) (Client, error) +``` + +The `storage.Client` interface contains the following methods: +``` +Get(context.Context, string) ([]byte, error) +Set(context.Context, string, []byte) error +Delete(context.Context, string) error +Close(context.Context) error +``` + +It is possible to execute several operations in a single transaction via `Batch`. The method takes a collection of +`Operation` arguments (each of which contains `Key`, `Value` and `Type` properties): +``` +Batch(context.Context, ...Operation) error +``` + +The elements itself can be created using: + +``` +SetOperation(string, []byte) Operation +GetOperation(string) Operation +DeleteOperation(string) Operation +``` + +Get operation results are stored in-place into the given Operation and can be retrieved using its `Value` property. + +Note: All methods should return error only if a problem occurred. (For example, if a file is no longer accessible, or if a remote service is unavailable.) + +Note: It is the responsibility of each component to `Close` a storage client that it has requested. diff --git a/internal/otel_collector/extension/storage/doc.go b/internal/otel_collector/extension/storage/doc.go new file mode 100644 index 00000000000..3590d5fcc3b --- /dev/null +++ b/internal/otel_collector/extension/storage/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package stanzareceiver implements a receiver that can be used by the +// Opentelemetry collector to receive logs using the stanza log agent +package storage diff --git a/internal/otel_collector/extension/storage/nop_client.go b/internal/otel_collector/extension/storage/nop_client.go new file mode 100644 index 00000000000..10ee9df821c --- /dev/null +++ b/internal/otel_collector/extension/storage/nop_client.go @@ -0,0 +1,51 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import "context" + +type nopClient struct{} + +var nopClientInstance Client = &nopClient{} + +// NewNopClient returns a nop client +func NewNopClient() Client { + return nopClientInstance +} + +// Get does nothing, and returns nil, nil +func (c nopClient) Get(context.Context, string) ([]byte, error) { + return nil, nil // no result, but no problem +} + +// Set does nothing and returns nil +func (c nopClient) Set(context.Context, string, []byte) error { + return nil // no problem +} + +// Delete does nothing and returns nil +func (c nopClient) Delete(context.Context, string) error { + return nil // no problem +} + +// Close does nothing and returns nil +func (c nopClient) Close(context.Context) error { + return nil +} + +// Batch does nothing, and returns nil, nil +func (c nopClient) Batch(context.Context, ...Operation) error { + return nil // no result, but no problem +} diff --git a/internal/otel_collector/extension/storage/storage.go b/internal/otel_collector/extension/storage/storage.go new file mode 100644 index 00000000000..e17768023e0 --- /dev/null +++ b/internal/otel_collector/extension/storage/storage.go @@ -0,0 +1,104 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" +) + +// Extension is the interface that storage extensions must implement +type Extension interface { + component.Extension + + // GetClient will create a client for use by the specified component. + // Each component can have multiple storages (e.g. one for each signal), + // which can be identified using storageName parameter. + // The component can use the client to manage state + GetClient(ctx context.Context, kind component.Kind, id config.ComponentID, storageName string) (Client, error) +} + +// Client is the interface that storage clients must implement +// All methods should return error only if a problem occurred. +// This mirrors the behavior of a golang map: +// - Set doesn't error if a key already exists - it just overwrites the value. +// - Get doesn't error if a key is not found - it just returns nil. +// - Delete doesn't error if the key doesn't exist - it just no-ops. +// Similarly: +// - Batch doesn't error if any of the above happens for either retrieved or updated keys +// This also provides a way to differentiate data operations +// [overwrite | not-found | no-op] from "real" problems +type Client interface { + + // Get will retrieve data from storage that corresponds to the + // specified key. It should return (nil, nil) if not found + Get(ctx context.Context, key string) ([]byte, error) + + // Set will store data. The data can be retrieved by the same + // component after a process restart, using the same key + Set(ctx context.Context, key string, value []byte) error + + // Delete will delete data associated with the specified key + Delete(ctx context.Context, key string) error + + // Batch handles specified operations in batch. Get operation results are put in-place + Batch(ctx context.Context, ops ...Operation) error + + // Close will release any resources held by the client + Close(ctx context.Context) error +} + +type opType int + +const ( + Get opType = iota + Set + Delete +) + +type operation struct { + // Key specifies key which is going to be get/set/deleted + Key string + // Value specifies value that is going to be set or holds result of get operation + Value []byte + // Type describes the operation type + Type opType +} + +type Operation *operation + +func SetOperation(key string, value []byte) Operation { + return &operation{ + Key: key, + Value: value, + Type: Set, + } +} + +func GetOperation(key string) Operation { + return &operation{ + Key: key, + Type: Get, + } +} + +func DeleteOperation(key string) Operation { + return &operation{ + Key: key, + Type: Delete, + } +} diff --git a/internal/otel_collector/extension/zpagesextension/README.md b/internal/otel_collector/extension/zpagesextension/README.md new file mode 100644 index 00000000000..7c60c5344ab --- /dev/null +++ b/internal/otel_collector/extension/zpagesextension/README.md @@ -0,0 +1,67 @@ +# zPages + +Enables an extension that serves zPages, an HTTP endpoint that provides live +data for debugging different components that were properly instrumented for such. +All core exporters and receivers provide some zPage instrumentation. + +zPages are useful for in-process diagnostics without having to depend on any +backend to examine traces or metrics. + +The following settings are required: + +- `endpoint` (default = localhost:55679): Specifies the HTTP endpoint that serves +zPages. Use localhost: to make it available only locally, or ":" to +make it available on all network interfaces. + +Example: +```yaml +extensions: + zpages: +``` + +The full list of settings exposed for this exporter are documented [here](./config.go) +with detailed sample configurations [here](./testdata/config.yaml). + +## Exposed zPages routes + +The collector exposes the following zPage routes: + +### ServiceZ + +ServiceZ gives an overview of the collector services by gives quick access to the +`pipelinez` and `extensionz` zPages. The page also provides build and runtime +information. + +Example URL: http://localhost:55679/debug/servicez + +### PipelineZ + +PipelineZ brings insight on the running pipelines running in the collector. You can +find information on type, if data is mutated and the receivers, processors and exporters +that are used for each pipeline. + +Example URL: http://localhost:55679/debug/pipelinez + +### ExtensionZ + +ExtensionZ shows the extensions that are active in the collector. + +Example URL: http://localhost:55679/debug/extensionz + +### TraceZ +The TraceZ route is available to examine and bucketize spans by latency buckets for +example + +(0us, 10us, 100us, 1ms, 10ms, 100ms, 1s, 10s, 1m] +They also allow you to quickly examine error samples + +Example URL: http://localhost:55679/debug/tracez + +### RpcZ +The Rpcz route is available to help examine statistics of remote procedure calls (RPCs) +that are properly instrumented. For example when using gRPC + +Example URL: http://localhost:55679/debug/rpcz + + + diff --git a/internal/otel_collector/extension/zpagesextension/config.go b/internal/otel_collector/extension/zpagesextension/config.go new file mode 100644 index 00000000000..79335bbe870 --- /dev/null +++ b/internal/otel_collector/extension/zpagesextension/config.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zpagesextension + +import ( + "errors" + + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/confignet" +) + +// Config has the configuration for the extension enabling the zPages extension. +type Config struct { + config.ExtensionSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + + // TCPAddr is the address and port in which the zPages will be listening to. + // Use localhost: to make it available only locally, or ":" to + // make it available on all network interfaces. + TCPAddr confignet.TCPAddr `mapstructure:",squash"` +} + +var _ config.Extension = (*Config)(nil) + +// Validate checks if the extension configuration is valid +func (cfg *Config) Validate() error { + if cfg.TCPAddr.Endpoint == "" { + return errors.New("\"endpoint\" is required when using the \"zpages\" extension") + } + return nil +} diff --git a/internal/otel_collector/extension/zpagesextension/doc.go b/internal/otel_collector/extension/zpagesextension/doc.go new file mode 100644 index 00000000000..7b312a5e9d2 --- /dev/null +++ b/internal/otel_collector/extension/zpagesextension/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package zpagesextension implements an extension that exposes zPages of +// properly instrumented components. +package zpagesextension diff --git a/internal/otel_collector/extension/zpagesextension/factory.go b/internal/otel_collector/extension/zpagesextension/factory.go new file mode 100644 index 00000000000..8fc2e48be3f --- /dev/null +++ b/internal/otel_collector/extension/zpagesextension/factory.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zpagesextension + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/extension/extensionhelper" +) + +const ( + // The value of extension "type" in configuration. + typeStr = "zpages" + + defaultEndpoint = "localhost:55679" +) + +// NewFactory creates a factory for Z-Pages extension. +func NewFactory() component.ExtensionFactory { + return extensionhelper.NewFactory( + typeStr, + createDefaultConfig, + createExtension) +} + +func createDefaultConfig() config.Extension { + return &Config{ + ExtensionSettings: config.NewExtensionSettings(config.NewID(typeStr)), + TCPAddr: confignet.TCPAddr{ + Endpoint: defaultEndpoint, + }, + } +} + +// createExtension creates the extension based on this config. +func createExtension(_ context.Context, set component.ExtensionCreateSettings, cfg config.Extension) (component.Extension, error) { + return newServer(cfg.(*Config), set.Logger), nil +} diff --git a/internal/otel_collector/extension/zpagesextension/testdata/config.yaml b/internal/otel_collector/extension/zpagesextension/testdata/config.yaml new file mode 100644 index 00000000000..8213a5b9730 --- /dev/null +++ b/internal/otel_collector/extension/zpagesextension/testdata/config.yaml @@ -0,0 +1,20 @@ +extensions: + zpages: + zpages/1: + endpoint: "localhost:56888" + +service: + extensions: [zpages/1] + pipelines: + traces: + receivers: [nop] + processors: [nop] + exporters: [nop] + +# Data pipeline is required to load the config. +receivers: + nop: +processors: + nop: +exporters: + nop: diff --git a/internal/otel_collector/extension/zpagesextension/zpagesextension.go b/internal/otel_collector/extension/zpagesextension/zpagesextension.go new file mode 100644 index 00000000000..d4b741dde00 --- /dev/null +++ b/internal/otel_collector/extension/zpagesextension/zpagesextension.go @@ -0,0 +1,80 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zpagesextension + +import ( + "context" + "net/http" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" +) + +type zpagesExtension struct { + config *Config + logger *zap.Logger + server http.Server + stopCh chan struct{} +} + +func (zpe *zpagesExtension) Start(_ context.Context, host component.Host) error { + zPagesMux := http.NewServeMux() + + hostZPages, ok := host.(interface { + RegisterZPages(mux *http.ServeMux, pathPrefix string) + }) + if ok { + zpe.logger.Info("Register Host's zPages") + hostZPages.RegisterZPages(zPagesMux, "/debug") + } else { + zpe.logger.Info("Host's zPages not available") + } + + // Start the listener here so we can have earlier failure if port is + // already in use. + ln, err := zpe.config.TCPAddr.Listen() + if err != nil { + return err + } + + zpe.logger.Info("Starting zPages extension", zap.Any("config", zpe.config)) + zpe.server = http.Server{Handler: zPagesMux} + zpe.stopCh = make(chan struct{}) + go func() { + defer close(zpe.stopCh) + + if err := zpe.server.Serve(ln); err != nil && err != http.ErrServerClosed { + host.ReportFatalError(err) + } + }() + + return nil +} + +func (zpe *zpagesExtension) Shutdown(context.Context) error { + err := zpe.server.Close() + if zpe.stopCh != nil { + <-zpe.stopCh + } + return err +} + +func newServer(config *Config, logger *zap.Logger) *zpagesExtension { + return &zpagesExtension{ + config: config, + logger: logger, + } +} diff --git a/internal/otel_collector/go.mod b/internal/otel_collector/go.mod new file mode 100644 index 00000000000..23dc70efd4b --- /dev/null +++ b/internal/otel_collector/go.mod @@ -0,0 +1,74 @@ +module go.opentelemetry.io/collector + +go 1.17 + +require ( + contrib.go.opencensus.io/exporter/prometheus v0.4.0 + github.com/cenkalti/backoff/v4 v4.1.1 + github.com/gogo/protobuf v1.3.2 + github.com/google/uuid v1.3.0 + github.com/gorilla/mux v1.8.0 + github.com/knadh/koanf v1.2.1 + github.com/magiconair/properties v1.8.5 + github.com/mitchellh/mapstructure v1.4.1 + github.com/prometheus/common v0.30.0 + github.com/rs/cors v1.8.0 + github.com/shirou/gopsutil v3.21.7+incompatible + github.com/spf13/cast v1.4.1 + github.com/spf13/cobra v1.2.1 + github.com/stretchr/testify v1.7.0 + go.opencensus.io v0.23.0 + go.opentelemetry.io/collector/model v0.34.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.22.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.22.0 + go.opentelemetry.io/contrib/zpages v0.22.0 + go.opentelemetry.io/otel v1.0.0-RC2 + go.opentelemetry.io/otel/oteltest v1.0.0-RC2 + go.opentelemetry.io/otel/sdk v1.0.0-RC2 + go.opentelemetry.io/otel/trace v1.0.0-RC2 + go.uber.org/atomic v1.9.0 + go.uber.org/zap v1.19.0 + golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 + google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08 + google.golang.org/grpc v1.40.0 + google.golang.org/protobuf v1.27.1 + gopkg.in/yaml.v2 v2.4.0 +) + +require ( + github.com/StackExchange/wmi v1.2.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.1.1 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/felixge/httpsnoop v1.0.2 // indirect + github.com/fsnotify/fsnotify v1.4.9 // indirect + github.com/go-kit/log v0.1.0 // indirect + github.com/go-logfmt/logfmt v0.5.0 // indirect + github.com/go-ole/go-ole v1.2.5 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/kr/pretty v0.3.0 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.11.0 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/procfs v0.6.0 // indirect + github.com/prometheus/statsd_exporter v0.21.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/tklauser/go-sysconf v0.3.5 // indirect + github.com/tklauser/numcpus v0.2.2 // indirect + go.opentelemetry.io/contrib v0.22.0 // indirect + go.opentelemetry.io/otel/internal/metric v0.22.0 // indirect + go.opentelemetry.io/otel/metric v0.22.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + golang.org/x/net v0.0.0-20210614182718-04defd469f4e // indirect + golang.org/x/text v0.3.6 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect +) + +replace go.opentelemetry.io/collector/model => ./model + +retract v0.32.0 // Contains incomplete metrics transition to proto 0.9.0, random components are not working. diff --git a/internal/otel_collector/go.sum b/internal/otel_collector/go.sum new file mode 100644 index 00000000000..29bc8042dac --- /dev/null +++ b/internal/otel_collector/go.sum @@ -0,0 +1,821 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +contrib.go.opencensus.io/exporter/prometheus v0.4.0 h1:0QfIkj9z/iVZgK31D9H9ohjjIDApI2GOPScCKwxedbs= +contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o= +github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0 h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= +github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= +github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/knadh/koanf v1.2.1 h1:tVR+BbAM5PA2YkB0OMyfSnEsmt3uygpn3R0WB6jKw7s= +github.com/knadh/koanf v1.2.1/go.mod h1:xpPTwMhsA/aaQLAilyCCqfpEiY1gpa160AiCuWHJUjY= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= +github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/statsd_exporter v0.21.0 h1:hA05Q5RFeIjgwKIYEdFd59xu5Wwaznf33yKI+pyX6T8= +github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= +github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rs/cors v1.8.0 h1:P2KMzcFwrPoSjkF1WLRPsp3UMLyql8L4v9hQpVeK5so= +github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shirou/gopsutil v3.21.7+incompatible h1:g/wcPHcuCQvHSePVofjQljd2vX4ty0+J6VoMB+NPcdk= +github.com/shirou/gopsutil v3.21.7+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= +github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= +github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA= +github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/contrib v0.22.0 h1:0F7gDEjgb1WGn4ODIjaCAg75hmqF+UN0LiVgwxsCodc= +go.opentelemetry.io/contrib v0.22.0/go.mod h1:EH4yDYeNoaTqn/8yCWQmfNB78VHfGX2Jt2bvnvzBlGM= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.22.0 h1:TjqELdtCtlOJQrTnXd2y+RP6wXKZUnnJer0HR0CSo18= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.22.0/go.mod h1:KjqwX4uJNaj479ZjFpADOMJKOM4rBXq4kN7nbeuGKrY= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.22.0 h1:WHjZguqT+3UjTgFum33hWZYybDVnx8u9q5/kQDfaGTs= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.22.0/go.mod h1:o3MuU25bYroYnc2TOKe8mTk8f9X1oPFO6C5RCoPKtSU= +go.opentelemetry.io/contrib/zpages v0.22.0 h1:4TA4lwy/lFIdbNTqbDI1XDxQXc2CtTonIH/5R7G+sTo= +go.opentelemetry.io/contrib/zpages v0.22.0/go.mod h1:pO7VUk5qoCiekzXk0XCuQcKQsKBHyjx9KFIW1Vlc8dw= +go.opentelemetry.io/otel v1.0.0-RC1/go.mod h1:x9tRa9HK4hSSq7jf2TKbqFbtt58/TGk0f9XiEYISI1I= +go.opentelemetry.io/otel v1.0.0-RC2 h1:SHhxSjB+omnGZPgGlKe+QMp3MyazcOHdQ8qwo89oKbg= +go.opentelemetry.io/otel v1.0.0-RC2/go.mod h1:w1thVQ7qbAy8MHb0IFj8a5Q2QU0l2ksf8u/CN8m3NOM= +go.opentelemetry.io/otel/internal/metric v0.22.0 h1:Q9bS02XRykSRIbggaU4hVF9oWOP9PyILu26zJWoKmk0= +go.opentelemetry.io/otel/internal/metric v0.22.0/go.mod h1:7qVuMihW/ktMonEfOvBXuh6tfMvvEyoIDgeJNRloYbQ= +go.opentelemetry.io/otel/metric v0.22.0 h1:/qv10BzznqEifrXBwsTT370OCN1PRgt+mnjzMwxJKrQ= +go.opentelemetry.io/otel/metric v0.22.0/go.mod h1:KcsUkBiYGW003DJ+ugd2aqIRIfjabD9jeOUXqsAtrq0= +go.opentelemetry.io/otel/oteltest v1.0.0-RC1/go.mod h1:+eoIG0gdEOaPNftuy1YScLr1Gb4mL/9lpDkZ0JjMRq4= +go.opentelemetry.io/otel/oteltest v1.0.0-RC2 h1:xNKqMhlZYkASSyvF4JwObZFMq0jhFN3c3SP+2rCzVPk= +go.opentelemetry.io/otel/oteltest v1.0.0-RC2/go.mod h1:kiQ4tw5tAL4JLTbcOYwK1CWI1HkT5aiLzHovgOVnz/A= +go.opentelemetry.io/otel/sdk v1.0.0-RC2 h1:ROuteeSCBaZNjiT9JcFzZepmInDvLktR28Y6qKo8bCs= +go.opentelemetry.io/otel/sdk v1.0.0-RC2/go.mod h1:fgwHyiDn4e5k40TD9VX243rOxXR+jzsWBZYA2P5jpEw= +go.opentelemetry.io/otel/trace v1.0.0-RC1/go.mod h1:86UHmyHWFEtWjfWPSbu0+d0Pf9Q6e1U+3ViBOc+NXAg= +go.opentelemetry.io/otel/trace v1.0.0-RC2 h1:dunAP0qDULMIT82atj34m5RgvsIK6LcsXf1c/MsYg1w= +go.opentelemetry.io/otel/trace v1.0.0-RC2/go.mod h1:JPQ+z6nNw9mqEGT8o3eoPTdnNI+Aj5JcxEsVGREIAy4= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210611083646-a4fc73990273/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08 h1:pc16UedxnxXXtGxHCSUhafAoVHQZ0yXl8ZelMH4EETc= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/internal/otel_collector/internal/buildscripts/compare-apidiff.sh b/internal/otel_collector/internal/buildscripts/compare-apidiff.sh new file mode 100644 index 00000000000..c60098fb4bf --- /dev/null +++ b/internal/otel_collector/internal/buildscripts/compare-apidiff.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash + +# This script is used to compare API state snapshots to the current package state in order to validate releases are not breaking backwards compatibility. + +usage() { + echo "Usage: $0" + echo + echo "-c Check-incompatibility mode. Script will fail if an incompatible change is found. Default: 'false'" + echo "-p Package to generate API state snapshot of. Default: ''" + echo "-d directory where prior states will be read from. Default: './internal/data/apidiff'" + exit 1 +} + +package="" +input_dir="./internal/data/apidiff" +check_only=false + + +while getopts "cp:d:" o; do + case "${o}" in + c) + check_only=true + ;; + p) + package=$OPTARG + ;; + d) + input_dir=$OPTARG + ;; + *) + usage + ;; + esac +done +shift $((OPTIND-1)) + +if [ -z $package ]; then + usage +fi + +set -e + +if [ -e $input_dir/$package/apidiff.state ]; then + changes=$(apidiff $input_dir/$package/apidiff.state $package) + if [ ! -z "$changes" -a "$changes"!=" " ]; then + SUB='Incompatible changes:' + if [ $check_only = true ] && [[ "$changes" =~ .*"$SUB".* ]]; then + echo "Incompatible Changes Found." + echo "Check the logs in the GitHub Action log group: 'Compare-States'." + exit 1 + else + echo "Changes found in $package:" + echo "$changes" + fi + fi +fi diff --git a/internal/otel_collector/internal/buildscripts/gen-apidiff.sh b/internal/otel_collector/internal/buildscripts/gen-apidiff.sh new file mode 100644 index 00000000000..5726d471997 --- /dev/null +++ b/internal/otel_collector/internal/buildscripts/gen-apidiff.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash + +# This script is used to create API state snapshots used to validate releases are not breaking backwards compatibility. + +usage() { + echo "Usage: $0" + echo + echo "-d Dry-run mode. No project files will not be modified. Default: 'false'" + echo "-p Package to generate API state snapshot of. Default: ''" + echo "-o Output directory where state will be written to. Default: './internal/data/apidiff'" + exit 1 +} + +dry_run=false +package="" +output_dir="./internal/data/apidiff" + + +while getopts "dp:o:" o; do + case "${o}" in + d) + dry_run=true + ;; + p) + package=$OPTARG + ;; + o) + output_dir=$OPTARG + ;; + *) + usage + ;; + esac +done +shift $((OPTIND-1)) + +if [ -z $package ]; then + usage +fi + +set -ex + +# Create temp dir for generated files. +tmp_dir=$(mktemp -d -t apidiff) +clean_up() { + ARG=$? + if [ $dry_run = true ]; then + echo "Dry-run complete. Generated files can be found in $tmp_dir" + else + rm -rf "$tmp_dir" + fi + exit $ARG +} +trap clean_up EXIT + +mkdir -p $tmp_dir/$package + +apidiff -w $tmp_dir/$package/apidiff.state $package + +# Copy files if not in dry-run mode. +if [ $dry_run = false ]; then + mkdir -p "$output_dir/$package" && \ + cp "$tmp_dir/$package/apidiff.state" \ + "$output_dir/$package" +fi diff --git a/internal/otel_collector/internal/buildscripts/gen-certs.sh b/internal/otel_collector/internal/buildscripts/gen-certs.sh new file mode 100644 index 00000000000..e5266367bf2 --- /dev/null +++ b/internal/otel_collector/internal/buildscripts/gen-certs.sh @@ -0,0 +1,125 @@ +#!/usr/bin/env bash + +# This script is used to create the CA, server and client's certificates and keys required by unit tests. +# These certificates use the Subject Alternative Name extension rather than the Common Name, which will be unsupported from Go 1.15. + +usage() { + echo "Usage: $0 [-d]" + echo + echo "-d Dry-run mode. No project files will not be modified. Default: 'false'" + echo "-m Domain name to use in the certificate. Default: 'localhost'" + echo "-o Output directory where certificates will be written to. Default: '.'; the current directory" + exit 1 +} + +dry_run=false +domain="localhost" +output_dir="." + +while getopts "dm:o:" o; do + case "${o}" in + d) + dry_run=true + ;; + m) + domain=$OPTARG + ;; + o) + output_dir=$OPTARG + ;; + *) + usage + ;; + esac +done +shift $((OPTIND-1)) + +set -ex + +# Create temp dir for generated files. +tmp_dir=$(mktemp -d -t certificates) +clean_up() { + ARG=$? + if [ $dry_run = true ]; then + echo "Dry-run complete. Generated files can be found in $tmp_dir" + else + rm -rf "$tmp_dir" + fi + exit $ARG +} +trap clean_up EXIT + +gen_ssl_conf() { + domain_name=$1 + output_file=$2 + + cat << EOF > "$output_file" +[ req ] +prompt = no +default_bits = 2048 +distinguished_name = req_distinguished_name +req_extensions = req_ext + +[ req_distinguished_name ] +countryName = AU +stateOrProvinceName = Australia +localityName = Sydney +organizationName = MyOrgName +commonName = MyCommonName + +[ req_ext ] +subjectAltName = @alt_names + +[alt_names] +DNS.1 = $domain_name +EOF +} + +# Generate config files. +gen_ssl_conf "$domain" "$tmp_dir/ssl.conf" + +# Create CA (accept defaults from prompts). +openssl genrsa -out "$tmp_dir/ca.key" 2048 +openssl req -new -key "$tmp_dir/ca.key" -x509 -days 3650 -out "$tmp_dir/ca.crt" -config "$tmp_dir/ssl.conf" + +# Create client and server keys. +openssl genrsa -out "$tmp_dir/server.key" 2048 +openssl genrsa -out "$tmp_dir/client.key" 2048 + +# Create certificate sign request using the above created keys. +openssl req -new -nodes -key "$tmp_dir/server.key" -out "$tmp_dir/server.csr" -config "$tmp_dir/ssl.conf" +openssl req -new -nodes -key "$tmp_dir/client.key" -out "$tmp_dir/client.csr" -config "$tmp_dir/ssl.conf" + +# Creating the client and server certificates. +openssl x509 -req \ + -sha256 \ + -days 3650 \ + -in "$tmp_dir/server.csr" \ + -signkey "$tmp_dir/server.key" \ + -out "$tmp_dir/server.crt" \ + -extensions req_ext \ + -CA "$tmp_dir/ca.crt" \ + -CAkey "$tmp_dir/ca.key" \ + -CAcreateserial \ + -extfile "$tmp_dir/ssl.conf" +openssl x509 -req \ + -sha256 \ + -days 3650 \ + -in "$tmp_dir/client.csr" \ + -signkey "$tmp_dir/client.key" \ + -out "$tmp_dir/client.crt" \ + -extensions req_ext \ + -CA "$tmp_dir/ca.crt" \ + -CAkey "$tmp_dir/ca.key" \ + -CAcreateserial \ + -extfile "$tmp_dir/ssl.conf" + +# Copy files if not in dry-run mode. +if [ $dry_run = false ]; then + cp "$tmp_dir/ca.crt" \ + "$tmp_dir/client.crt" \ + "$tmp_dir/client.key" \ + "$tmp_dir/server.crt" \ + "$tmp_dir/server.key" \ + "$output_dir" +fi diff --git a/internal/otel_collector/internal/cgroups/cgroup.go b/internal/otel_collector/internal/cgroups/cgroup.go new file mode 100644 index 00000000000..2e2702a9bd0 --- /dev/null +++ b/internal/otel_collector/internal/cgroups/cgroup.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Keep the original Uber license. + +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import ( + "bufio" + "io" + "os" + "path/filepath" + "strconv" +) + +// CGroup represents the data structure for a Linux control group. +type CGroup struct { + path string +} + +// NewCGroup returns a new *CGroup from a given path. +func NewCGroup(path string) *CGroup { + return &CGroup{path: path} +} + +// Path returns the path of the CGroup*. +func (cg *CGroup) Path() string { + return cg.path +} + +// ParamPath returns the path of the given cgroup param under itself. +func (cg *CGroup) ParamPath(param string) string { + return filepath.Join(cg.path, param) +} + +// readFirstLine reads the first line from a cgroup param file. +func (cg *CGroup) readFirstLine(param string) (string, error) { + paramFile, err := os.Open(cg.ParamPath(param)) + if err != nil { + return "", err + } + defer paramFile.Close() + + scanner := bufio.NewScanner(paramFile) + if scanner.Scan() { + return scanner.Text(), nil + } + if err := scanner.Err(); err != nil { + return "", err + } + return "", io.ErrUnexpectedEOF +} + +// readInt parses the first line from a cgroup param file as int. +func (cg *CGroup) readInt(param string) (int, error) { + text, err := cg.readFirstLine(param) + if err != nil { + return 0, err + } + return strconv.Atoi(text) +} diff --git a/internal/otel_collector/internal/cgroups/cgroups.go b/internal/otel_collector/internal/cgroups/cgroups.go new file mode 100644 index 00000000000..8eb3c18345c --- /dev/null +++ b/internal/otel_collector/internal/cgroups/cgroups.go @@ -0,0 +1,123 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Keep the original Uber license. + +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +const ( + // _cgroupFSType is the Linux CGroup file system type used in + // `/proc/$PID/mountinfo`. + _cgroupFSType = "cgroup" + // _cgroupSubsysCPU is the CPU CGroup subsystem. + _cgroupSubsysCPU = "cpu" + // _cgroupSubsysCPUAcct is the CPU accounting CGroup subsystem. + _cgroupSubsysCPUAcct = "cpuacct" + // _cgroupSubsysCPUSet is the CPUSet CGroup subsystem. + _cgroupSubsysCPUSet = "cpuset" + // _cgroupSubsysMemory is the Memory CGroup subsystem. + _cgroupSubsysMemory = "memory" + + _cgroupMemoryLimitBytes = "memory.limit_in_bytes" +) + +const ( + _procPathCGroup = "/proc/self/cgroup" + _procPathMountInfo = "/proc/self/mountinfo" +) + +// CGroups is a map that associates each CGroup with its subsystem name. +type CGroups map[string]*CGroup + +// NewCGroups returns a new *CGroups from given `mountinfo` and `cgroup` files +// under for some process under `/proc` file system (see also proc(5) for more +// information). +func NewCGroups(procPathMountInfo, procPathCGroup string) (CGroups, error) { + cgroupSubsystems, err := parseCGroupSubsystems(procPathCGroup) + if err != nil { + return nil, err + } + + cgroups := make(CGroups) + newMountPoint := func(mp *MountPoint) error { + if mp.FSType != _cgroupFSType { + return nil + } + + for _, opt := range mp.SuperOptions { + subsys, exists := cgroupSubsystems[opt] + if !exists { + continue + } + + cgroupPath, err := mp.Translate(subsys.Name) + if err != nil { + return err + } + cgroups[opt] = NewCGroup(cgroupPath) + } + + return nil + } + + if err := parseMountInfo(procPathMountInfo, newMountPoint); err != nil { + return nil, err + } + return cgroups, nil +} + +// NewCGroupsForCurrentProcess returns a new *CGroups instance for the current +// process. +func NewCGroupsForCurrentProcess() (CGroups, error) { + return NewCGroups(_procPathMountInfo, _procPathCGroup) +} + +// MemoryQuota returns the total memory a +// It is a result of `memory.limit_in_bytes`. If the value of +// `memory.limit_in_bytes` was not set (-1) or (9223372036854771712), the method returns `(-1, false, nil)`. +func (cg CGroups) MemoryQuota() (int64, bool, error) { + memCGroup, exists := cg[_cgroupSubsysMemory] + if !exists { + return -1, false, nil + } + + memLimitBytes, err := memCGroup.readInt(_cgroupMemoryLimitBytes) + if defined := memLimitBytes > 0; err != nil || !defined { + return -1, defined, err + } + return int64(memLimitBytes), true, nil +} diff --git a/internal/otel_collector/internal/cgroups/doc.go b/internal/otel_collector/internal/cgroups/doc.go new file mode 100644 index 00000000000..c47417a93db --- /dev/null +++ b/internal/otel_collector/internal/cgroups/doc.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Keep the original Uber license. + +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package cgroups provides utilities to access Linux control group (CGroups) +// parameters (total memory, for example) for a given process. +// The original implementation is taken from https://github.com/uber-go/automaxprocs +package cgroups diff --git a/internal/otel_collector/internal/cgroups/errors.go b/internal/otel_collector/internal/cgroups/errors.go new file mode 100644 index 00000000000..c28bbab53d2 --- /dev/null +++ b/internal/otel_collector/internal/cgroups/errors.go @@ -0,0 +1,68 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Keep the original Uber license. + +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import "fmt" + +type cgroupSubsysFormatInvalidError struct { + line string +} + +type mountPointFormatInvalidError struct { + line string +} + +type pathNotExposedFromMountPointError struct { + mountPoint string + root string + path string +} + +func (err cgroupSubsysFormatInvalidError) Error() string { + return fmt.Sprintf("invalid format for CGroupSubsys: %q", err.line) +} + +func (err mountPointFormatInvalidError) Error() string { + return fmt.Sprintf("invalid format for MountPoint: %q", err.line) +} + +func (err pathNotExposedFromMountPointError) Error() string { + return fmt.Sprintf("path %q is not a descendant of mount point root %q and cannot be exposed from %q", err.path, err.root, err.mountPoint) +} diff --git a/internal/otel_collector/internal/cgroups/mountpoint.go b/internal/otel_collector/internal/cgroups/mountpoint.go new file mode 100644 index 00000000000..ce952352cc9 --- /dev/null +++ b/internal/otel_collector/internal/cgroups/mountpoint.go @@ -0,0 +1,183 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Keep the original Uber license. + +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import ( + "bufio" + "os" + "path/filepath" + "strconv" + "strings" +) + +const ( + _mountInfoSep = " " + _mountInfoOptsSep = "," + _mountInfoOptionalFieldsSep = "-" +) + +const ( + _miFieldIDMountID = iota + _miFieldIDParentID + _miFieldIDDeviceID + _miFieldIDRoot + _miFieldIDMountPoint + _miFieldIDOptions + _miFieldIDOptionalFields + + _miFieldCountFirstHalf +) + +const ( + _miFieldOffsetFSType = iota + _miFieldOffsetMountSource + _miFieldOffsetSuperOptions + + _miFieldCountSecondHalf +) + +const _miFieldCountMin = _miFieldCountFirstHalf + _miFieldCountSecondHalf + +// MountPoint is the data structure for the mount points in +// `/proc/$PID/mountinfo`. See also proc(5) for more information. +type MountPoint struct { + MountID int + ParentID int + DeviceID string + Root string + MountPoint string + Options []string + OptionalFields []string + FSType string + MountSource string + SuperOptions []string +} + +// NewMountPointFromLine parses a line read from `/proc/$PID/mountinfo` and +// returns a new *MountPoint. +func NewMountPointFromLine(line string) (*MountPoint, error) { + fields := strings.Split(line, _mountInfoSep) + + if len(fields) < _miFieldCountMin { + return nil, mountPointFormatInvalidError{line} + } + + mountID, err := strconv.Atoi(fields[_miFieldIDMountID]) + if err != nil { + return nil, err + } + + parentID, err := strconv.Atoi(fields[_miFieldIDParentID]) + if err != nil { + return nil, err + } + + for i, field := range fields[_miFieldIDOptionalFields:] { + if field == _mountInfoOptionalFieldsSep { + fsTypeStart := _miFieldIDOptionalFields + i + 1 + + if len(fields) != fsTypeStart+_miFieldCountSecondHalf { + return nil, mountPointFormatInvalidError{line} + } + + miFieldIDFSType := _miFieldOffsetFSType + fsTypeStart + miFieldIDMountSource := _miFieldOffsetMountSource + fsTypeStart + miFieldIDSuperOptions := _miFieldOffsetSuperOptions + fsTypeStart + + return &MountPoint{ + MountID: mountID, + ParentID: parentID, + DeviceID: fields[_miFieldIDDeviceID], + Root: fields[_miFieldIDRoot], + MountPoint: fields[_miFieldIDMountPoint], + Options: strings.Split(fields[_miFieldIDOptions], _mountInfoOptsSep), + OptionalFields: fields[_miFieldIDOptionalFields:(fsTypeStart - 1)], + FSType: fields[miFieldIDFSType], + MountSource: fields[miFieldIDMountSource], + SuperOptions: strings.Split(fields[miFieldIDSuperOptions], _mountInfoOptsSep), + }, nil + } + } + + return nil, mountPointFormatInvalidError{line} +} + +// Translate converts an absolute path inside the *MountPoint's file system to +// the host file system path in the mount namespace the *MountPoint belongs to. +func (mp *MountPoint) Translate(absPath string) (string, error) { + relPath, err := filepath.Rel(mp.Root, absPath) + + if err != nil { + return "", err + } + if relPath == ".." || strings.HasPrefix(relPath, "../") { + return "", pathNotExposedFromMountPointError{ + mountPoint: mp.MountPoint, + root: mp.Root, + path: absPath, + } + } + + return filepath.Join(mp.MountPoint, relPath), nil +} + +// parseMountInfo parses procPathMountInfo (usually at `/proc/$PID/mountinfo`) +// and yields parsed *MountPoint into newMountPoint. +func parseMountInfo(procPathMountInfo string, newMountPoint func(*MountPoint) error) error { + mountInfoFile, err := os.Open(filepath.Clean(procPathMountInfo)) + if err != nil { + return err + } + defer mountInfoFile.Close() + + scanner := bufio.NewScanner(mountInfoFile) + + for scanner.Scan() { + mountPoint, err := NewMountPointFromLine(scanner.Text()) + if err != nil { + return err + } + if err := newMountPoint(mountPoint); err != nil { + return err + } + } + + return scanner.Err() +} diff --git a/internal/otel_collector/internal/cgroups/subsys.go b/internal/otel_collector/internal/cgroups/subsys.go new file mode 100644 index 00000000000..0eb68f758a8 --- /dev/null +++ b/internal/otel_collector/internal/cgroups/subsys.go @@ -0,0 +1,120 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Keep the original Uber license. + +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import ( + "bufio" + "os" + "path/filepath" + "strconv" + "strings" +) + +const ( + _cgroupSep = ":" + _cgroupSubsysSep = "," +) + +const ( + _csFieldIDID = iota + _csFieldIDSubsystems + _csFieldIDName + _csFieldCount +) + +// CGroupSubsys represents the data structure for entities in +// `/proc/$PID/cgroup`. See also proc(5) for more information. +type CGroupSubsys struct { + ID int + Subsystems []string + Name string +} + +// NewCGroupSubsysFromLine returns a new *CGroupSubsys by parsing a string in +// the format of `/proc/$PID/cgroup` +func NewCGroupSubsysFromLine(line string) (*CGroupSubsys, error) { + fields := strings.Split(line, _cgroupSep) + + if len(fields) != _csFieldCount { + return nil, cgroupSubsysFormatInvalidError{line} + } + + id, err := strconv.Atoi(fields[_csFieldIDID]) + if err != nil { + return nil, err + } + + cgroup := &CGroupSubsys{ + ID: id, + Subsystems: strings.Split(fields[_csFieldIDSubsystems], _cgroupSubsysSep), + Name: fields[_csFieldIDName], + } + + return cgroup, nil +} + +// parseCGroupSubsystems parses procPathCGroup (usually at `/proc/$PID/cgroup`) +// and returns a new map[string]*CGroupSubsys. +func parseCGroupSubsystems(procPathCGroup string) (map[string]*CGroupSubsys, error) { + cgroupFile, err := os.Open(filepath.Clean(procPathCGroup)) + if err != nil { + return nil, err + } + defer cgroupFile.Close() + + scanner := bufio.NewScanner(cgroupFile) + subsystems := make(map[string]*CGroupSubsys) + + for scanner.Scan() { + cgroup, err := NewCGroupSubsysFromLine(scanner.Text()) + if err != nil { + return nil, err + } + for _, subsys := range cgroup.Subsystems { + subsystems[subsys] = cgroup + } + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return subsystems, nil +} diff --git a/internal/otel_collector/internal/cgroups/testdata/cgroups/cpu/cpu.cfs_period_us b/internal/otel_collector/internal/cgroups/testdata/cgroups/cpu/cpu.cfs_period_us new file mode 100644 index 00000000000..f7393e847d3 --- /dev/null +++ b/internal/otel_collector/internal/cgroups/testdata/cgroups/cpu/cpu.cfs_period_us @@ -0,0 +1 @@ +100000 diff --git a/internal/otel_collector/internal/cgroups/testdata/cgroups/cpu/cpu.cfs_quota_us b/internal/otel_collector/internal/cgroups/testdata/cgroups/cpu/cpu.cfs_quota_us new file mode 100644 index 00000000000..26f3b3ddf28 --- /dev/null +++ b/internal/otel_collector/internal/cgroups/testdata/cgroups/cpu/cpu.cfs_quota_us @@ -0,0 +1 @@ +600000 diff --git a/internal/otel_collector/internal/cgroups/testdata/cgroups/empty/cpu.cfs_quota_us b/internal/otel_collector/internal/cgroups/testdata/cgroups/empty/cpu.cfs_quota_us new file mode 100644 index 00000000000..e69de29bb2d diff --git a/internal/otel_collector/internal/cgroups/testdata/cgroups/invalid/cpu.cfs_quota_us b/internal/otel_collector/internal/cgroups/testdata/cgroups/invalid/cpu.cfs_quota_us new file mode 100644 index 00000000000..f43dfb15698 --- /dev/null +++ b/internal/otel_collector/internal/cgroups/testdata/cgroups/invalid/cpu.cfs_quota_us @@ -0,0 +1 @@ +non-an-integer diff --git a/internal/otel_collector/internal/cgroups/testdata/cgroups/undefined-period/cpu.cfs_quota_us b/internal/otel_collector/internal/cgroups/testdata/cgroups/undefined-period/cpu.cfs_quota_us new file mode 100644 index 00000000000..959e88a89af --- /dev/null +++ b/internal/otel_collector/internal/cgroups/testdata/cgroups/undefined-period/cpu.cfs_quota_us @@ -0,0 +1 @@ +800000 diff --git a/internal/otel_collector/internal/cgroups/testdata/cgroups/undefined/cpu.cfs_period_us b/internal/otel_collector/internal/cgroups/testdata/cgroups/undefined/cpu.cfs_period_us new file mode 100644 index 00000000000..f7393e847d3 --- /dev/null +++ b/internal/otel_collector/internal/cgroups/testdata/cgroups/undefined/cpu.cfs_period_us @@ -0,0 +1 @@ +100000 diff --git a/internal/otel_collector/internal/cgroups/testdata/cgroups/undefined/cpu.cfs_quota_us b/internal/otel_collector/internal/cgroups/testdata/cgroups/undefined/cpu.cfs_quota_us new file mode 100644 index 00000000000..3a2e3f4984a --- /dev/null +++ b/internal/otel_collector/internal/cgroups/testdata/cgroups/undefined/cpu.cfs_quota_us @@ -0,0 +1 @@ +-1 diff --git a/internal/otel_collector/internal/cgroups/testdata/proc/cgroups/cgroup b/internal/otel_collector/internal/cgroups/testdata/proc/cgroups/cgroup new file mode 100644 index 00000000000..1724dc83892 --- /dev/null +++ b/internal/otel_collector/internal/cgroups/testdata/proc/cgroups/cgroup @@ -0,0 +1,3 @@ +3:memory:/docker/large +2:cpu,cpuacct:/docker +1:cpuset:/ diff --git a/internal/otel_collector/internal/cgroups/testdata/proc/cgroups/mountinfo b/internal/otel_collector/internal/cgroups/testdata/proc/cgroups/mountinfo new file mode 100644 index 00000000000..e68af08a576 --- /dev/null +++ b/internal/otel_collector/internal/cgroups/testdata/proc/cgroups/mountinfo @@ -0,0 +1,8 @@ +1 0 8:1 / / rw,noatime shared:1 - ext4 /dev/sda1 rw,errors=remount-ro,data=reordered +2 1 0:1 / /dev rw,relatime shared:2 - devtmpfs udev rw,size=10240k,nr_inodes=16487629,mode=755 +3 1 0:2 / /proc rw,nosuid,nodev,noexec,relatime shared:3 - proc proc rw +4 1 0:3 / /sys rw,nosuid,nodev,noexec,relatime shared:4 - sysfs sysfs rw +5 4 0:4 / /sys/fs/cgroup ro,nosuid,nodev,noexec shared:5 - tmpfs tmpfs ro,mode=755 +6 5 0:5 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:6 - cgroup cgroup rw,cpuset +7 5 0:6 /docker /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:7 - cgroup cgroup rw,cpu,cpuacct +8 5 0:7 /docker /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:8 - cgroup cgroup rw,memory diff --git a/internal/otel_collector/internal/cgroups/testdata/proc/invalid-cgroup/cgroup b/internal/otel_collector/internal/cgroups/testdata/proc/invalid-cgroup/cgroup new file mode 100644 index 00000000000..6d9b22bd764 --- /dev/null +++ b/internal/otel_collector/internal/cgroups/testdata/proc/invalid-cgroup/cgroup @@ -0,0 +1,2 @@ +1:cpu:/cpu +invalid-line: diff --git a/internal/otel_collector/internal/cgroups/testdata/proc/invalid-mountinfo/mountinfo b/internal/otel_collector/internal/cgroups/testdata/proc/invalid-mountinfo/mountinfo new file mode 100644 index 00000000000..3c8dabe4c91 --- /dev/null +++ b/internal/otel_collector/internal/cgroups/testdata/proc/invalid-mountinfo/mountinfo @@ -0,0 +1 @@ +1 0 8:1 / / rw,noatime shared:1 - ext4 /dev/sda1 diff --git a/internal/otel_collector/internal/cgroups/testdata/proc/untranslatable/cgroup b/internal/otel_collector/internal/cgroups/testdata/proc/untranslatable/cgroup new file mode 100644 index 00000000000..44519662184 --- /dev/null +++ b/internal/otel_collector/internal/cgroups/testdata/proc/untranslatable/cgroup @@ -0,0 +1,2 @@ +1:cpu:/docker +2:cpuacct:/docker diff --git a/internal/otel_collector/internal/cgroups/testdata/proc/untranslatable/mountinfo b/internal/otel_collector/internal/cgroups/testdata/proc/untranslatable/mountinfo new file mode 100644 index 00000000000..245daae6eb4 --- /dev/null +++ b/internal/otel_collector/internal/cgroups/testdata/proc/untranslatable/mountinfo @@ -0,0 +1,2 @@ +31 23 0:24 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime shared:1 - cgroup cgroup rw,cpu +32 23 0:25 /docker/0123456789abcdef /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime shared:2 - cgroup cgroup rw,cpuacct diff --git a/internal/otel_collector/internal/collector/telemetry/telemetry.go b/internal/otel_collector/internal/collector/telemetry/telemetry.go new file mode 100644 index 00000000000..a16cab4fe73 --- /dev/null +++ b/internal/otel_collector/internal/collector/telemetry/telemetry.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package telemetry controls the telemetry settings to be used in the collector. +package telemetry + +import ( + "flag" +) + +const ( + metricsAddrCfg = "metrics-addr" + metricsPrefixCfg = "metrics-prefix" +) + +var ( + // Command-line flags that control publication of telemetry data. + metricsAddrPtr *string + metricsPrefixPtr *string + + addInstanceIDPtr *bool +) + +func Flags(flags *flag.FlagSet) { + // At least until we can use a generic, i.e.: OpenCensus, metrics exporter + // we default to Prometheus at port 8888, if not otherwise specified. + metricsAddrPtr = flags.String( + metricsAddrCfg, + GetMetricsAddrDefault(), + "[address]:port for exposing collector telemetry.") + + metricsPrefixPtr = flags.String( + metricsPrefixCfg, + "otelcol", + "Prefix to the metrics generated by the collector.") + + addInstanceIDPtr = flags.Bool( + "add-instance-id", + true, + "Flag to control the addition of 'service.instance.id' to the collector metrics.") +} + +// GetMetricsAddrDefault returns the default metrics bind address and port depending on +// the current build type. +func GetMetricsAddrDefault() string { + return ":8888" +} + +func GetAddInstanceID() bool { + return *addInstanceIDPtr +} + +func GetMetricsAddr() string { + return *metricsAddrPtr +} + +func GetMetricsPrefix() string { + return *metricsPrefixPtr +} diff --git a/internal/otel_collector/internal/internalconsumertest/err_or_sink_consumer.go b/internal/otel_collector/internal/internalconsumertest/err_or_sink_consumer.go new file mode 100644 index 00000000000..ddb2ca5cb72 --- /dev/null +++ b/internal/otel_collector/internal/internalconsumertest/err_or_sink_consumer.go @@ -0,0 +1,80 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internalconsumertest + +import ( + "context" + "sync" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/model/pdata" +) + +type ErrOrSinkConsumer struct { + *consumertest.TracesSink + *consumertest.MetricsSink + mu sync.Mutex + consumeError error // to be returned by ConsumeTraces, if set +} + +// SetConsumeError sets an error that will be returned by the Consume function. +func (esc *ErrOrSinkConsumer) SetConsumeError(err error) { + esc.mu.Lock() + defer esc.mu.Unlock() + esc.consumeError = err +} + +func (esc *ErrOrSinkConsumer) Capabilities() consumer.Capabilities { + return consumer.Capabilities{MutatesData: false} +} + +// ConsumeTraces stores traces to this sink. +func (esc *ErrOrSinkConsumer) ConsumeTraces(ctx context.Context, td pdata.Traces) error { + esc.mu.Lock() + defer esc.mu.Unlock() + + if esc.consumeError != nil { + return esc.consumeError + } + + return esc.TracesSink.ConsumeTraces(ctx, td) +} + +// ConsumeMetrics stores metrics to this sink. +func (esc *ErrOrSinkConsumer) ConsumeMetrics(ctx context.Context, md pdata.Metrics) error { + esc.mu.Lock() + defer esc.mu.Unlock() + + if esc.consumeError != nil { + return esc.consumeError + } + + return esc.MetricsSink.ConsumeMetrics(ctx, md) +} + +// Reset deletes any stored in the sinks, resets error to nil. +func (esc *ErrOrSinkConsumer) Reset() { + esc.mu.Lock() + defer esc.mu.Unlock() + + esc.consumeError = nil + if esc.TracesSink != nil { + esc.TracesSink.Reset() + } + if esc.MetricsSink != nil { + esc.MetricsSink.Reset() + } +} diff --git a/internal/otel_collector/internal/iruntime/mem_info.go b/internal/otel_collector/internal/iruntime/mem_info.go new file mode 100644 index 00000000000..53019e8dd44 --- /dev/null +++ b/internal/otel_collector/internal/iruntime/mem_info.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iruntime + +import ( + "github.com/shirou/gopsutil/mem" +) + +// readMemInfo returns the total memory +// supports in linux, darwin and windows +func readMemInfo() (uint64, error) { + vmStat, err := mem.VirtualMemory() + return vmStat.Total, err +} diff --git a/internal/otel_collector/internal/iruntime/total_memory_linux.go b/internal/otel_collector/internal/iruntime/total_memory_linux.go new file mode 100644 index 00000000000..8048dcf8f87 --- /dev/null +++ b/internal/otel_collector/internal/iruntime/total_memory_linux.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux +// +build linux + +package iruntime + +import "go.opentelemetry.io/collector/internal/cgroups" + +// unlimitedMemorySize defines the bytes size when memory limit is not set +// for the container and process with cgroups +const unlimitedMemorySize = 9223372036854771712 + +// TotalMemory returns total available memory. +// This implementation is meant for linux and uses cgroups to determine available memory. +func TotalMemory() (uint64, error) { + cgroups, err := cgroups.NewCGroupsForCurrentProcess() + if err != nil { + return 0, err + } + memoryQuota, defined, err := cgroups.MemoryQuota() + if err != nil || !defined { + return 0, err + } + + if memoryQuota == unlimitedMemorySize { + totalMem, err := readMemInfo() + if err != nil { + return 0, err + } + return totalMem, nil + } + + return uint64(memoryQuota), nil +} diff --git a/internal/otel_collector/internal/iruntime/total_memory_other.go b/internal/otel_collector/internal/iruntime/total_memory_other.go new file mode 100644 index 00000000000..9e5c2e93c00 --- /dev/null +++ b/internal/otel_collector/internal/iruntime/total_memory_other.go @@ -0,0 +1,23 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !linux +// +build !linux + +package iruntime + +// TotalMemory returns total available memory for non-linux platforms. +func TotalMemory() (uint64, error) { + return readMemInfo() +} diff --git a/internal/otel_collector/internal/middleware/compression.go b/internal/otel_collector/internal/middleware/compression.go new file mode 100644 index 00000000000..8b0f3cdd09e --- /dev/null +++ b/internal/otel_collector/internal/middleware/compression.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "bytes" + "compress/gzip" + "compress/zlib" + "io" + "net/http" +) + +const ( + headerContentEncoding = "Content-Encoding" + headerValueGZIP = "gzip" +) + +type CompressRoundTripper struct { + http.RoundTripper +} + +func NewCompressRoundTripper(rt http.RoundTripper) *CompressRoundTripper { + return &CompressRoundTripper{ + RoundTripper: rt, + } +} + +func (r *CompressRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + + if req.Header.Get(headerContentEncoding) != "" { + // If the header already specifies a content encoding then skip compression + // since we don't want to compress it again. This is a safeguard that normally + // should not happen since CompressRoundTripper is not intended to be used + // with http clients which already do their own compression. + return r.RoundTripper.RoundTrip(req) + } + + // Gzip the body. + buf := bytes.NewBuffer([]byte{}) + gzipWriter := gzip.NewWriter(buf) + _, copyErr := io.Copy(gzipWriter, req.Body) + closeErr := req.Body.Close() + + if err := gzipWriter.Close(); err != nil { + return nil, err + } + + if copyErr != nil { + return nil, copyErr + } + if closeErr != nil { + return nil, closeErr + } + + // Create a new request since the docs say that we cannot modify the "req" + // (see https://golang.org/pkg/net/http/#RoundTripper). + cReq, err := http.NewRequestWithContext(req.Context(), req.Method, req.URL.String(), buf) + if err != nil { + return nil, err + } + + // Clone the headers and add gzip encoding header. + cReq.Header = req.Header.Clone() + cReq.Header.Add(headerContentEncoding, headerValueGZIP) + + return r.RoundTripper.RoundTrip(cReq) +} + +type ErrorHandler func(w http.ResponseWriter, r *http.Request, errorMsg string, statusCode int) + +type decompressor struct { + errorHandler ErrorHandler +} + +type DecompressorOption func(d *decompressor) + +func WithErrorHandler(e ErrorHandler) DecompressorOption { + return func(d *decompressor) { + d.errorHandler = e + } +} + +// HTTPContentDecompressor is a middleware that offloads the task of handling compressed +// HTTP requests by identifying the compression format in the "Content-Encoding" header and re-writing +// request body so that the handlers further in the chain can work on decompressed data. +// It supports gzip and deflate/zlib compression. +func HTTPContentDecompressor(h http.Handler, opts ...DecompressorOption) http.Handler { + d := &decompressor{} + for _, o := range opts { + o(d) + } + if d.errorHandler == nil { + d.errorHandler = defaultErrorHandler + } + return d.wrap(h) +} + +func (d *decompressor) wrap(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + newBody, err := newBodyReader(r) + if err != nil { + d.errorHandler(w, r, err.Error(), http.StatusBadRequest) + return + } + if newBody != nil { + defer newBody.Close() + // "Content-Encoding" header is removed to avoid decompressing twice + // in case the next handler(s) have implemented a similar mechanism. + r.Header.Del("Content-Encoding") + // "Content-Length" is set to -1 as the size of the decompressed body is unknown. + r.Header.Del("Content-Length") + r.ContentLength = -1 + r.Body = newBody + } + h.ServeHTTP(w, r) + }) +} + +func newBodyReader(r *http.Request) (io.ReadCloser, error) { + switch r.Header.Get("Content-Encoding") { + case "gzip": + gr, err := gzip.NewReader(r.Body) + if err != nil { + return nil, err + } + return gr, nil + case "deflate", "zlib": + zr, err := zlib.NewReader(r.Body) + if err != nil { + return nil, err + } + return zr, nil + } + return nil, nil +} + +// defaultErrorHandler writes the error message in plain text. +func defaultErrorHandler(w http.ResponseWriter, _ *http.Request, errMsg string, statusCode int) { + http.Error(w, errMsg, statusCode) +} diff --git a/internal/otel_collector/internal/obsreportconfig/obsmetrics/obs_exporter.go b/internal/otel_collector/internal/obsreportconfig/obsmetrics/obs_exporter.go new file mode 100644 index 00000000000..e85f6ef119f --- /dev/null +++ b/internal/otel_collector/internal/obsreportconfig/obsmetrics/obs_exporter.go @@ -0,0 +1,97 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package obsmetrics + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/tag" +) + +const ( + // ExporterKey used to identify exporters in metrics and traces. + ExporterKey = "exporter" + + // SentSpansKey used to track spans sent by exporters. + SentSpansKey = "sent_spans" + // FailedToSendSpansKey used to track spans that failed to be sent by exporters. + FailedToSendSpansKey = "send_failed_spans" + // FailedToEnqueueSpansKey used to track spans that failed to be added to the sending queue. + FailedToEnqueueSpansKey = "enqueue_failed_spans" + + // SentMetricPointsKey used to track metric points sent by exporters. + SentMetricPointsKey = "sent_metric_points" + // FailedToSendMetricPointsKey used to track metric points that failed to be sent by exporters. + FailedToSendMetricPointsKey = "send_failed_metric_points" + // FailedToEnqueueMetricPointsKey used to track metric points that failed to be added to the sending queue. + FailedToEnqueueMetricPointsKey = "enqueue_failed_metric_points" + + // SentLogRecordsKey used to track logs sent by exporters. + SentLogRecordsKey = "sent_log_records" + // FailedToSendLogRecordsKey used to track logs that failed to be sent by exporters. + FailedToSendLogRecordsKey = "send_failed_log_records" + // FailedToEnqueueLogRecordsKey used to track logs records that failed to be added to the sending queue. + FailedToEnqueueLogRecordsKey = "enqueue_failed_log_records" +) + +var ( + TagKeyExporter, _ = tag.NewKey(ExporterKey) + + ExporterPrefix = ExporterKey + NameSep + ExportTraceDataOperationSuffix = NameSep + "traces" + ExportMetricsOperationSuffix = NameSep + "metrics" + ExportLogsOperationSuffix = NameSep + "logs" + + // Exporter metrics. Any count of data items below is in the final format + // that they were sent, reasoning: reconciliation is easier if measurements + // on backend and exporter are expected to be the same. Translation issues + // that result in a different number of elements should be reported in a + // separate way. + ExporterSentSpans = stats.Int64( + ExporterPrefix+SentSpansKey, + "Number of spans successfully sent to destination.", + stats.UnitDimensionless) + ExporterFailedToSendSpans = stats.Int64( + ExporterPrefix+FailedToSendSpansKey, + "Number of spans in failed attempts to send to destination.", + stats.UnitDimensionless) + ExporterFailedToEnqueueSpans = stats.Int64( + ExporterPrefix+FailedToEnqueueSpansKey, + "Number of spans failed to be added to the sending queue.", + stats.UnitDimensionless) + ExporterSentMetricPoints = stats.Int64( + ExporterPrefix+SentMetricPointsKey, + "Number of metric points successfully sent to destination.", + stats.UnitDimensionless) + ExporterFailedToSendMetricPoints = stats.Int64( + ExporterPrefix+FailedToSendMetricPointsKey, + "Number of metric points in failed attempts to send to destination.", + stats.UnitDimensionless) + ExporterFailedToEnqueueMetricPoints = stats.Int64( + ExporterPrefix+FailedToEnqueueMetricPointsKey, + "Number of metric points failed to be added to the sending queue.", + stats.UnitDimensionless) + ExporterSentLogRecords = stats.Int64( + ExporterPrefix+SentLogRecordsKey, + "Number of log record successfully sent to destination.", + stats.UnitDimensionless) + ExporterFailedToSendLogRecords = stats.Int64( + ExporterPrefix+FailedToSendLogRecordsKey, + "Number of log records in failed attempts to send to destination.", + stats.UnitDimensionless) + ExporterFailedToEnqueueLogRecords = stats.Int64( + ExporterPrefix+FailedToEnqueueLogRecordsKey, + "Number of log records failed to be added to the sending queue.", + stats.UnitDimensionless) +) diff --git a/internal/otel_collector/internal/obsreportconfig/obsmetrics/obs_processor.go b/internal/otel_collector/internal/obsreportconfig/obsmetrics/obs_processor.go new file mode 100644 index 00000000000..ea4a9a51751 --- /dev/null +++ b/internal/otel_collector/internal/obsreportconfig/obsmetrics/obs_processor.go @@ -0,0 +1,79 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package obsmetrics + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/tag" +) + +const ( + // ProcessorKey is the key used to identify processors in metrics and traces. + ProcessorKey = "processor" + + // DroppedSpansKey is the key used to identify spans dropped by the Collector. + DroppedSpansKey = "dropped_spans" + + // DroppedMetricPointsKey is the key used to identify metric points dropped by the Collector. + DroppedMetricPointsKey = "dropped_metric_points" + + // DroppedLogRecordsKey is the key used to identify log records dropped by the Collector. + DroppedLogRecordsKey = "dropped_log_records" +) + +var ( + TagKeyProcessor, _ = tag.NewKey(ProcessorKey) + + ProcessorPrefix = ProcessorKey + NameSep + + // Processor metrics. Any count of data items below is in the internal format + // of the collector since processors only deal with internal format. + ProcessorAcceptedSpans = stats.Int64( + ProcessorPrefix+AcceptedSpansKey, + "Number of spans successfully pushed into the next component in the pipeline.", + stats.UnitDimensionless) + ProcessorRefusedSpans = stats.Int64( + ProcessorPrefix+RefusedSpansKey, + "Number of spans that were rejected by the next component in the pipeline.", + stats.UnitDimensionless) + ProcessorDroppedSpans = stats.Int64( + ProcessorPrefix+DroppedSpansKey, + "Number of spans that were dropped.", + stats.UnitDimensionless) + ProcessorAcceptedMetricPoints = stats.Int64( + ProcessorPrefix+AcceptedMetricPointsKey, + "Number of metric points successfully pushed into the next component in the pipeline.", + stats.UnitDimensionless) + ProcessorRefusedMetricPoints = stats.Int64( + ProcessorPrefix+RefusedMetricPointsKey, + "Number of metric points that were rejected by the next component in the pipeline.", + stats.UnitDimensionless) + ProcessorDroppedMetricPoints = stats.Int64( + ProcessorPrefix+DroppedMetricPointsKey, + "Number of metric points that were dropped.", + stats.UnitDimensionless) + ProcessorAcceptedLogRecords = stats.Int64( + ProcessorPrefix+AcceptedLogRecordsKey, + "Number of log records successfully pushed into the next component in the pipeline.", + stats.UnitDimensionless) + ProcessorRefusedLogRecords = stats.Int64( + ProcessorPrefix+RefusedLogRecordsKey, + "Number of log records that were rejected by the next component in the pipeline.", + stats.UnitDimensionless) + ProcessorDroppedLogRecords = stats.Int64( + ProcessorPrefix+DroppedLogRecordsKey, + "Number of log records that were dropped.", + stats.UnitDimensionless) +) diff --git a/internal/otel_collector/internal/obsreportconfig/obsmetrics/obs_receiver.go b/internal/otel_collector/internal/obsreportconfig/obsmetrics/obs_receiver.go new file mode 100644 index 00000000000..8cc7bf8b296 --- /dev/null +++ b/internal/otel_collector/internal/obsreportconfig/obsmetrics/obs_receiver.go @@ -0,0 +1,86 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package obsmetrics + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/tag" +) + +const ( + // ReceiverKey used to identify receivers in metrics and traces. + ReceiverKey = "receiver" + // TransportKey used to identify the transport used to received the data. + TransportKey = "transport" + // FormatKey used to identify the format of the data received. + FormatKey = "format" + + // AcceptedSpansKey used to identify spans accepted by the Collector. + AcceptedSpansKey = "accepted_spans" + // RefusedSpansKey used to identify spans refused (ie.: not ingested) by the Collector. + RefusedSpansKey = "refused_spans" + + // AcceptedMetricPointsKey used to identify metric points accepted by the Collector. + AcceptedMetricPointsKey = "accepted_metric_points" + // RefusedMetricPointsKey used to identify metric points refused (ie.: not ingested) by the + // Collector. + RefusedMetricPointsKey = "refused_metric_points" + + // AcceptedLogRecordsKey used to identify log records accepted by the Collector. + AcceptedLogRecordsKey = "accepted_log_records" + // RefusedLogRecordsKey used to identify log records refused (ie.: not ingested) by the + // Collector. + RefusedLogRecordsKey = "refused_log_records" +) + +var ( + TagKeyReceiver, _ = tag.NewKey(ReceiverKey) + TagKeyTransport, _ = tag.NewKey(TransportKey) + + ReceiverPrefix = ReceiverKey + NameSep + ReceiveTraceDataOperationSuffix = NameSep + "TraceDataReceived" + ReceiverMetricsOperationSuffix = NameSep + "MetricsReceived" + ReceiverLogsOperationSuffix = NameSep + "LogsReceived" + + // Receiver metrics. Any count of data items below is in the original format + // that they were received, reasoning: reconciliation is easier if measurement + // on clients and receiver are expected to be the same. Translation issues + // that result in a different number of elements should be reported in a + // separate way. + ReceiverAcceptedSpans = stats.Int64( + ReceiverPrefix+AcceptedSpansKey, + "Number of spans successfully pushed into the pipeline.", + stats.UnitDimensionless) + ReceiverRefusedSpans = stats.Int64( + ReceiverPrefix+RefusedSpansKey, + "Number of spans that could not be pushed into the pipeline.", + stats.UnitDimensionless) + ReceiverAcceptedMetricPoints = stats.Int64( + ReceiverPrefix+AcceptedMetricPointsKey, + "Number of metric points successfully pushed into the pipeline.", + stats.UnitDimensionless) + ReceiverRefusedMetricPoints = stats.Int64( + ReceiverPrefix+RefusedMetricPointsKey, + "Number of metric points that could not be pushed into the pipeline.", + stats.UnitDimensionless) + ReceiverAcceptedLogRecords = stats.Int64( + ReceiverPrefix+AcceptedLogRecordsKey, + "Number of log records successfully pushed into the pipeline.", + stats.UnitDimensionless) + ReceiverRefusedLogRecords = stats.Int64( + ReceiverPrefix+RefusedLogRecordsKey, + "Number of log records that could not be pushed into the pipeline.", + stats.UnitDimensionless) +) diff --git a/internal/otel_collector/internal/obsreportconfig/obsmetrics/obs_scraper.go b/internal/otel_collector/internal/obsreportconfig/obsmetrics/obs_scraper.go new file mode 100644 index 00000000000..dedbe4953ef --- /dev/null +++ b/internal/otel_collector/internal/obsreportconfig/obsmetrics/obs_scraper.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package obsmetrics + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/tag" +) + +const ( + // ScraperKey used to identify scrapers in metrics and traces. + ScraperKey = "scraper" + + // ScrapedMetricPointsKey used to identify metric points scraped by the + // Collector. + ScrapedMetricPointsKey = "scraped_metric_points" + // ErroredMetricPointsKey used to identify metric points errored (i.e. + // unable to be scraped) by the Collector. + ErroredMetricPointsKey = "errored_metric_points" +) + +const ( + ScraperPrefix = ScraperKey + NameSep + ScraperMetricsOperationSuffix = NameSep + "MetricsScraped" +) + +var ( + TagKeyScraper, _ = tag.NewKey(ScraperKey) + + ScraperScrapedMetricPoints = stats.Int64( + ScraperPrefix+ScrapedMetricPointsKey, + "Number of metric points successfully scraped.", + stats.UnitDimensionless) + ScraperErroredMetricPoints = stats.Int64( + ScraperPrefix+ErroredMetricPointsKey, + "Number of metric points that were unable to be scraped.", + stats.UnitDimensionless) +) diff --git a/internal/otel_collector/internal/obsreportconfig/obsmetrics/obsmetrics.go b/internal/otel_collector/internal/obsreportconfig/obsmetrics/obsmetrics.go new file mode 100644 index 00000000000..fe7ea208116 --- /dev/null +++ b/internal/otel_collector/internal/obsreportconfig/obsmetrics/obsmetrics.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package obsmetrics defines the obsreport metrics for each components +// all the metrics is in OpenCensus format which will be replaced with OTEL Metrics +// in the future +package obsmetrics + +const ( + NameSep = "/" +) diff --git a/internal/otel_collector/internal/obsreportconfig/obsreportconfig.go b/internal/otel_collector/internal/obsreportconfig/obsreportconfig.go new file mode 100644 index 00000000000..cf51bf6ecfa --- /dev/null +++ b/internal/otel_collector/internal/obsreportconfig/obsreportconfig.go @@ -0,0 +1,127 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package obsreportconfig + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" +) + +var ( + Level = configtelemetry.LevelBasic +) + +// ObsMetrics wraps OpenCensus View for Collector observability metrics +type ObsMetrics struct { + Views []*view.View +} + +// Configure is used to control the settings that will be used by the obsreport +// package. +func Configure(level configtelemetry.Level) *ObsMetrics { + Level = level + var views []*view.View + + if Level != configtelemetry.LevelNone { + obsMetricViews := allViews() + views = append(views, obsMetricViews.Views...) + } + + return &ObsMetrics{ + Views: views, + } +} + +// allViews return the list of all views that needs to be configured. +func allViews() *ObsMetrics { + var views []*view.View + // Receiver views. + measures := []*stats.Int64Measure{ + obsmetrics.ReceiverAcceptedSpans, + obsmetrics.ReceiverRefusedSpans, + obsmetrics.ReceiverAcceptedMetricPoints, + obsmetrics.ReceiverRefusedMetricPoints, + obsmetrics.ReceiverAcceptedLogRecords, + obsmetrics.ReceiverRefusedLogRecords, + } + tagKeys := []tag.Key{ + obsmetrics.TagKeyReceiver, obsmetrics.TagKeyTransport, + } + views = append(views, genViews(measures, tagKeys, view.Sum())...) + + // Scraper views. + measures = []*stats.Int64Measure{ + obsmetrics.ScraperScrapedMetricPoints, + obsmetrics.ScraperErroredMetricPoints, + } + tagKeys = []tag.Key{obsmetrics.TagKeyReceiver, obsmetrics.TagKeyScraper} + views = append(views, genViews(measures, tagKeys, view.Sum())...) + + // Exporter views. + measures = []*stats.Int64Measure{ + obsmetrics.ExporterSentSpans, + obsmetrics.ExporterFailedToSendSpans, + obsmetrics.ExporterFailedToEnqueueSpans, + obsmetrics.ExporterSentMetricPoints, + obsmetrics.ExporterFailedToSendMetricPoints, + obsmetrics.ExporterFailedToEnqueueMetricPoints, + obsmetrics.ExporterSentLogRecords, + obsmetrics.ExporterFailedToSendLogRecords, + obsmetrics.ExporterFailedToEnqueueLogRecords, + } + tagKeys = []tag.Key{obsmetrics.TagKeyExporter} + views = append(views, genViews(measures, tagKeys, view.Sum())...) + + // Processor views. + measures = []*stats.Int64Measure{ + obsmetrics.ProcessorAcceptedSpans, + obsmetrics.ProcessorRefusedSpans, + obsmetrics.ProcessorDroppedSpans, + obsmetrics.ProcessorAcceptedMetricPoints, + obsmetrics.ProcessorRefusedMetricPoints, + obsmetrics.ProcessorDroppedMetricPoints, + obsmetrics.ProcessorAcceptedLogRecords, + obsmetrics.ProcessorRefusedLogRecords, + obsmetrics.ProcessorDroppedLogRecords, + } + tagKeys = []tag.Key{obsmetrics.TagKeyProcessor} + views = append(views, genViews(measures, tagKeys, view.Sum())...) + + return &ObsMetrics{ + Views: views, + } +} + +func genViews( + measures []*stats.Int64Measure, + tagKeys []tag.Key, + aggregation *view.Aggregation, +) []*view.View { + views := make([]*view.View, 0, len(measures)) + for _, measure := range measures { + views = append(views, &view.View{ + Name: measure.Name(), + Description: measure.Description(), + TagKeys: tagKeys, + Measure: measure, + Aggregation: aggregation, + }) + } + return views +} diff --git a/internal/otel_collector/internal/otlptext/databuffer.go b/internal/otel_collector/internal/otlptext/databuffer.go new file mode 100644 index 00000000000..82484f8dcf1 --- /dev/null +++ b/internal/otel_collector/internal/otlptext/databuffer.go @@ -0,0 +1,255 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlptext + +import ( + "bytes" + "fmt" + "strconv" + "strings" + + "go.opentelemetry.io/collector/model/pdata" +) + +type dataBuffer struct { + buf bytes.Buffer +} + +func (b *dataBuffer) logEntry(format string, a ...interface{}) { + b.buf.WriteString(fmt.Sprintf(format, a...)) + b.buf.WriteString("\n") +} + +func (b *dataBuffer) logAttr(label string, value string) { + b.logEntry(" %-15s: %s", label, value) +} + +func (b *dataBuffer) logAttributeMap(label string, am pdata.AttributeMap) { + if am.Len() == 0 { + return + } + + b.logEntry("%s:", label) + am.Range(func(k string, v pdata.AttributeValue) bool { + b.logEntry(" -> %s: %s(%s)", k, v.Type().String(), attributeValueToString(v)) + return true + }) +} + +func (b *dataBuffer) logInstrumentationLibrary(il pdata.InstrumentationLibrary) { + b.logEntry( + "InstrumentationLibrary %s %s", + il.Name(), + il.Version()) +} + +func (b *dataBuffer) logMetricDescriptor(md pdata.Metric) { + b.logEntry("Descriptor:") + b.logEntry(" -> Name: %s", md.Name()) + b.logEntry(" -> Description: %s", md.Description()) + b.logEntry(" -> Unit: %s", md.Unit()) + b.logEntry(" -> DataType: %s", md.DataType().String()) +} + +func (b *dataBuffer) logMetricDataPoints(m pdata.Metric) { + switch m.DataType() { + case pdata.MetricDataTypeNone: + return + case pdata.MetricDataTypeGauge: + b.logNumberDataPoints(m.Gauge().DataPoints()) + case pdata.MetricDataTypeSum: + data := m.Sum() + b.logEntry(" -> IsMonotonic: %t", data.IsMonotonic()) + b.logEntry(" -> AggregationTemporality: %s", data.AggregationTemporality().String()) + b.logNumberDataPoints(data.DataPoints()) + case pdata.MetricDataTypeHistogram: + data := m.Histogram() + b.logEntry(" -> AggregationTemporality: %s", data.AggregationTemporality().String()) + b.logDoubleHistogramDataPoints(data.DataPoints()) + case pdata.MetricDataTypeSummary: + data := m.Summary() + b.logDoubleSummaryDataPoints(data.DataPoints()) + } +} + +func (b *dataBuffer) logNumberDataPoints(ps pdata.NumberDataPointSlice) { + for i := 0; i < ps.Len(); i++ { + p := ps.At(i) + b.logEntry("NumberDataPoints #%d", i) + b.logDataPointAttributes(p.Attributes()) + + b.logEntry("StartTimestamp: %s", p.StartTimestamp()) + b.logEntry("Timestamp: %s", p.Timestamp()) + switch p.Type() { + case pdata.MetricValueTypeInt: + b.logEntry("Value: %d", p.IntVal()) + case pdata.MetricValueTypeDouble: + b.logEntry("Value: %f", p.DoubleVal()) + } + } +} + +func (b *dataBuffer) logDoubleHistogramDataPoints(ps pdata.HistogramDataPointSlice) { + for i := 0; i < ps.Len(); i++ { + p := ps.At(i) + b.logEntry("HistogramDataPoints #%d", i) + b.logDataPointAttributes(p.Attributes()) + + b.logEntry("StartTimestamp: %s", p.StartTimestamp()) + b.logEntry("Timestamp: %s", p.Timestamp()) + b.logEntry("Count: %d", p.Count()) + b.logEntry("Sum: %f", p.Sum()) + + bounds := p.ExplicitBounds() + if len(bounds) != 0 { + for i, bound := range bounds { + b.logEntry("ExplicitBounds #%d: %f", i, bound) + } + } + + buckets := p.BucketCounts() + if len(buckets) != 0 { + for j, bucket := range buckets { + b.logEntry("Buckets #%d, Count: %d", j, bucket) + } + } + } +} + +func (b *dataBuffer) logDoubleSummaryDataPoints(ps pdata.SummaryDataPointSlice) { + for i := 0; i < ps.Len(); i++ { + p := ps.At(i) + b.logEntry("SummaryDataPoints #%d", i) + b.logDataPointAttributes(p.Attributes()) + + b.logEntry("StartTimestamp: %s", p.StartTimestamp()) + b.logEntry("Timestamp: %s", p.Timestamp()) + b.logEntry("Count: %d", p.Count()) + b.logEntry("Sum: %f", p.Sum()) + + quantiles := p.QuantileValues() + for i := 0; i < quantiles.Len(); i++ { + quantile := quantiles.At(i) + b.logEntry("QuantileValue #%d: Quantile %f, Value %f", i, quantile.Quantile(), quantile.Value()) + } + } +} + +func (b *dataBuffer) logDataPointAttributes(labels pdata.AttributeMap) { + b.logAttributeMap("Data point attributes", labels) +} + +func (b *dataBuffer) logLogRecord(lr pdata.LogRecord) { + b.logEntry("Timestamp: %s", lr.Timestamp()) + b.logEntry("Severity: %s", lr.SeverityText()) + b.logEntry("ShortName: %s", lr.Name()) + b.logEntry("Body: %s", attributeValueToString(lr.Body())) + b.logAttributeMap("Attributes", lr.Attributes()) +} + +func (b *dataBuffer) logEvents(description string, se pdata.SpanEventSlice) { + if se.Len() == 0 { + return + } + + b.logEntry("%s:", description) + for i := 0; i < se.Len(); i++ { + e := se.At(i) + b.logEntry("SpanEvent #%d", i) + b.logEntry(" -> Name: %s", e.Name()) + b.logEntry(" -> Timestamp: %s", e.Timestamp()) + b.logEntry(" -> DroppedAttributesCount: %d", e.DroppedAttributesCount()) + + if e.Attributes().Len() == 0 { + continue + } + b.logEntry(" -> Attributes:") + e.Attributes().Range(func(k string, v pdata.AttributeValue) bool { + b.logEntry(" -> %s: %s(%s)", k, v.Type().String(), attributeValueToString(v)) + return true + }) + } +} + +func (b *dataBuffer) logLinks(description string, sl pdata.SpanLinkSlice) { + if sl.Len() == 0 { + return + } + + b.logEntry("%s:", description) + + for i := 0; i < sl.Len(); i++ { + l := sl.At(i) + b.logEntry("SpanLink #%d", i) + b.logEntry(" -> Trace ID: %s", l.TraceID().HexString()) + b.logEntry(" -> ID: %s", l.SpanID().HexString()) + b.logEntry(" -> TraceState: %s", l.TraceState()) + b.logEntry(" -> DroppedAttributesCount: %d", l.DroppedAttributesCount()) + if l.Attributes().Len() == 0 { + continue + } + b.logEntry(" -> Attributes:") + l.Attributes().Range(func(k string, v pdata.AttributeValue) bool { + b.logEntry(" -> %s: %s(%s)", k, v.Type().String(), attributeValueToString(v)) + return true + }) + } +} + +func attributeValueToString(av pdata.AttributeValue) string { + switch av.Type() { + case pdata.AttributeValueTypeString: + return av.StringVal() + case pdata.AttributeValueTypeBool: + return strconv.FormatBool(av.BoolVal()) + case pdata.AttributeValueTypeDouble: + return strconv.FormatFloat(av.DoubleVal(), 'f', -1, 64) + case pdata.AttributeValueTypeInt: + return strconv.FormatInt(av.IntVal(), 10) + case pdata.AttributeValueTypeArray: + return attributeValueArrayToString(av.ArrayVal()) + case pdata.AttributeValueTypeMap: + return attributeMapToString(av.MapVal()) + default: + return fmt.Sprintf("", av.Type()) + } +} + +func attributeValueArrayToString(av pdata.AnyValueArray) string { + var b strings.Builder + b.WriteByte('[') + for i := 0; i < av.Len(); i++ { + if i < av.Len()-1 { + fmt.Fprintf(&b, "%s, ", attributeValueToString(av.At(i))) + } else { + b.WriteString(attributeValueToString(av.At(i))) + } + } + + b.WriteByte(']') + return b.String() +} + +func attributeMapToString(av pdata.AttributeMap) string { + var b strings.Builder + b.WriteString("{\n") + + av.Sort().Range(func(k string, v pdata.AttributeValue) bool { + fmt.Fprintf(&b, " -> %s: %s(%s)\n", k, v.Type(), v.AsString()) + return true + }) + b.WriteByte('}') + return b.String() +} diff --git a/internal/otel_collector/internal/otlptext/logs.go b/internal/otel_collector/internal/otlptext/logs.go new file mode 100644 index 00000000000..1676de15d1a --- /dev/null +++ b/internal/otel_collector/internal/otlptext/logs.go @@ -0,0 +1,52 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlptext + +import ( + "go.opentelemetry.io/collector/model/pdata" +) + +// NewTextLogsMarshaler returns a serializer.LogsMarshaler to encode to OTLP text bytes. +func NewTextLogsMarshaler() pdata.LogsMarshaler { + return textLogsMarshaler{} +} + +type textLogsMarshaler struct{} + +// MarshalLogs pdata.Logs to OTLP text. +func (textLogsMarshaler) MarshalLogs(ld pdata.Logs) ([]byte, error) { + buf := dataBuffer{} + rls := ld.ResourceLogs() + for i := 0; i < rls.Len(); i++ { + buf.logEntry("ResourceLog #%d", i) + rl := rls.At(i) + buf.logAttributeMap("Resource labels", rl.Resource().Attributes()) + ills := rl.InstrumentationLibraryLogs() + for j := 0; j < ills.Len(); j++ { + buf.logEntry("InstrumentationLibraryLogs #%d", j) + ils := ills.At(j) + buf.logInstrumentationLibrary(ils.InstrumentationLibrary()) + + logs := ils.Logs() + for k := 0; k < logs.Len(); k++ { + buf.logEntry("LogRecord #%d", k) + lr := logs.At(k) + buf.logLogRecord(lr) + } + } + } + + return buf.buf.Bytes(), nil +} diff --git a/internal/otel_collector/internal/otlptext/metrics.go b/internal/otel_collector/internal/otlptext/metrics.go new file mode 100644 index 00000000000..a107de27e5f --- /dev/null +++ b/internal/otel_collector/internal/otlptext/metrics.go @@ -0,0 +1,52 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlptext + +import ( + "go.opentelemetry.io/collector/model/pdata" +) + +// NewTextMetricsMarshaler returns a serializer.MetricsMarshaler to encode to OTLP text bytes. +func NewTextMetricsMarshaler() pdata.MetricsMarshaler { + return textMetricsMarshaler{} +} + +type textMetricsMarshaler struct{} + +// MarshalMetrics pdata.Metrics to OTLP text. +func (textMetricsMarshaler) MarshalMetrics(md pdata.Metrics) ([]byte, error) { + buf := dataBuffer{} + rms := md.ResourceMetrics() + for i := 0; i < rms.Len(); i++ { + buf.logEntry("ResourceMetrics #%d", i) + rm := rms.At(i) + buf.logAttributeMap("Resource labels", rm.Resource().Attributes()) + ilms := rm.InstrumentationLibraryMetrics() + for j := 0; j < ilms.Len(); j++ { + buf.logEntry("InstrumentationLibraryMetrics #%d", j) + ilm := ilms.At(j) + buf.logInstrumentationLibrary(ilm.InstrumentationLibrary()) + metrics := ilm.Metrics() + for k := 0; k < metrics.Len(); k++ { + buf.logEntry("Metric #%d", k) + metric := metrics.At(k) + buf.logMetricDescriptor(metric) + buf.logMetricDataPoints(metric) + } + } + } + + return buf.buf.Bytes(), nil +} diff --git a/internal/otel_collector/internal/otlptext/traces.go b/internal/otel_collector/internal/otlptext/traces.go new file mode 100644 index 00000000000..d3e3cc6548c --- /dev/null +++ b/internal/otel_collector/internal/otlptext/traces.go @@ -0,0 +1,65 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlptext + +import ( + "go.opentelemetry.io/collector/model/pdata" +) + +// NewTextTracesMarshaler returns a serializer.TracesMarshaler to encode to OTLP text bytes. +func NewTextTracesMarshaler() pdata.TracesMarshaler { + return textTracesMarshaler{} +} + +type textTracesMarshaler struct{} + +// MarshalTraces pdata.Traces to OTLP text. +func (textTracesMarshaler) MarshalTraces(td pdata.Traces) ([]byte, error) { + buf := dataBuffer{} + rss := td.ResourceSpans() + for i := 0; i < rss.Len(); i++ { + buf.logEntry("ResourceSpans #%d", i) + rs := rss.At(i) + buf.logAttributeMap("Resource labels", rs.Resource().Attributes()) + ilss := rs.InstrumentationLibrarySpans() + for j := 0; j < ilss.Len(); j++ { + buf.logEntry("InstrumentationLibrarySpans #%d", j) + ils := ilss.At(j) + buf.logInstrumentationLibrary(ils.InstrumentationLibrary()) + + spans := ils.Spans() + for k := 0; k < spans.Len(); k++ { + buf.logEntry("Span #%d", k) + span := spans.At(k) + buf.logAttr("Trace ID", span.TraceID().HexString()) + buf.logAttr("Parent ID", span.ParentSpanID().HexString()) + buf.logAttr("ID", span.SpanID().HexString()) + buf.logAttr("Name", span.Name()) + buf.logAttr("Kind", span.Kind().String()) + buf.logAttr("Start time", span.StartTimestamp().String()) + buf.logAttr("End time", span.EndTimestamp().String()) + + buf.logAttr("Status code", span.Status().Code().String()) + buf.logAttr("Status message", span.Status().Message()) + + buf.logAttributeMap("Attributes", span.Attributes()) + buf.logEvents("Events", span.Events()) + buf.logLinks("Links", span.Links()) + } + } + } + + return buf.buf.Bytes(), nil +} diff --git a/internal/otel_collector/internal/sharedcomponent/sharedcomponent.go b/internal/otel_collector/internal/sharedcomponent/sharedcomponent.go new file mode 100644 index 00000000000..66e6487f7e1 --- /dev/null +++ b/internal/otel_collector/internal/sharedcomponent/sharedcomponent.go @@ -0,0 +1,87 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package sharedcomponent exposes util functionality for receivers and exporters +// that need to share state between different signal types instances such as net.Listener or os.File. +package sharedcomponent + +import ( + "context" + "sync" + + "go.opentelemetry.io/collector/component" +) + +// SharedComponents a map that keeps reference of all created instances for a given configuration, +// and ensures that the shared state is started and stopped only once. +type SharedComponents struct { + comps map[interface{}]*SharedComponent +} + +// NewSharedComponents returns a new empty SharedComponents. +func NewSharedComponents() *SharedComponents { + return &SharedComponents{ + comps: make(map[interface{}]*SharedComponent), + } +} + +// GetOrAdd returns the already created instance if exists, otherwise creates a new instance +// and adds it to the map of references. +func (scs *SharedComponents) GetOrAdd(key interface{}, create func() component.Component) *SharedComponent { + if c, ok := scs.comps[key]; ok { + return c + } + newComp := &SharedComponent{ + Component: create(), + removeFunc: func() { + delete(scs.comps, key) + }, + } + scs.comps[key] = newComp + return newComp +} + +// SharedComponent ensures that the wrapped component is started and stopped only once. +// When stopped it is removed from the SharedComponents map. +type SharedComponent struct { + component.Component + + startOnce sync.Once + stopOnce sync.Once + removeFunc func() +} + +// Unwrap returns the original component. +func (r *SharedComponent) Unwrap() component.Component { + return r.Component +} + +// Start implements component.Component. +func (r *SharedComponent) Start(ctx context.Context, host component.Host) error { + var err error + r.startOnce.Do(func() { + err = r.Component.Start(ctx, host) + }) + return err +} + +// Shutdown implements component.Component. +func (r *SharedComponent) Shutdown(ctx context.Context) error { + var err error + r.stopOnce.Do(func() { + err = r.Component.Shutdown(ctx) + r.removeFunc() + }) + return err +} diff --git a/internal/otel_collector/internal/testcomponents/example_exporter.go b/internal/otel_collector/internal/testcomponents/example_exporter.go new file mode 100644 index 00000000000..896ce6002fa --- /dev/null +++ b/internal/otel_collector/internal/testcomponents/example_exporter.go @@ -0,0 +1,131 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testcomponents + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configparser" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/model/pdata" +) + +var _ config.Unmarshallable = (*ExampleExporter)(nil) + +// ExampleExporter is for testing purposes. We are defining an example config and factory +// for "exampleexporter" exporter type. +type ExampleExporter struct { + config.ExporterSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + ExtraInt int32 `mapstructure:"extra_int"` + ExtraSetting string `mapstructure:"extra"` + ExtraMapSetting map[string]string `mapstructure:"extra_map"` + ExtraListSetting []string `mapstructure:"extra_list"` +} + +// Unmarshal a viper data into the config struct +func (cfg *ExampleExporter) Unmarshal(componentParser *configparser.Parser) error { + return componentParser.UnmarshalExact(cfg) +} + +const expType = "exampleexporter" + +// ExampleExporterFactory is factory for ExampleExporter. +var ExampleExporterFactory = exporterhelper.NewFactory( + expType, + createExporterDefaultConfig, + exporterhelper.WithTraces(createTracesExporter), + exporterhelper.WithMetrics(createMetricsExporter), + exporterhelper.WithLogs(createLogsExporter)) + +// CreateDefaultConfig creates the default configuration for the Exporter. +func createExporterDefaultConfig() config.Exporter { + return &ExampleExporter{ + ExporterSettings: config.NewExporterSettings(config.NewID(expType)), + ExtraSetting: "some export string", + ExtraMapSetting: nil, + ExtraListSetting: nil, + } +} + +func createTracesExporter( + _ context.Context, + _ component.ExporterCreateSettings, + _ config.Exporter, +) (component.TracesExporter, error) { + return &ExampleExporterConsumer{}, nil +} + +func createMetricsExporter( + _ context.Context, + _ component.ExporterCreateSettings, + _ config.Exporter, +) (component.MetricsExporter, error) { + return &ExampleExporterConsumer{}, nil +} + +func createLogsExporter( + _ context.Context, + _ component.ExporterCreateSettings, + _ config.Exporter, +) (component.LogsExporter, error) { + return &ExampleExporterConsumer{}, nil +} + +// ExampleExporterConsumer stores consumed traces and metrics for testing purposes. +type ExampleExporterConsumer struct { + Traces []pdata.Traces + Metrics []pdata.Metrics + Logs []pdata.Logs + ExporterStarted bool + ExporterShutdown bool +} + +// Start tells the exporter to start. The exporter may prepare for exporting +// by connecting to the endpoint. Host parameter can be used for communicating +// with the host after Start() has already returned. +func (exp *ExampleExporterConsumer) Start(_ context.Context, _ component.Host) error { + exp.ExporterStarted = true + return nil +} + +// ConsumeTraces receives pdata.Traces for processing by the consumer.Traces. +func (exp *ExampleExporterConsumer) ConsumeTraces(_ context.Context, td pdata.Traces) error { + exp.Traces = append(exp.Traces, td) + return nil +} + +func (exp *ExampleExporterConsumer) Capabilities() consumer.Capabilities { + return consumer.Capabilities{MutatesData: false} +} + +// ConsumeMetrics receives pdata.Metrics for processing by the Metrics. +func (exp *ExampleExporterConsumer) ConsumeMetrics(_ context.Context, md pdata.Metrics) error { + exp.Metrics = append(exp.Metrics, md) + return nil +} + +func (exp *ExampleExporterConsumer) ConsumeLogs(_ context.Context, ld pdata.Logs) error { + exp.Logs = append(exp.Logs, ld) + return nil +} + +// Shutdown is invoked during shutdown. +func (exp *ExampleExporterConsumer) Shutdown(context.Context) error { + exp.ExporterShutdown = true + return nil +} diff --git a/internal/otel_collector/internal/testcomponents/example_extension.go b/internal/otel_collector/internal/testcomponents/example_extension.go new file mode 100644 index 00000000000..f233dbc178e --- /dev/null +++ b/internal/otel_collector/internal/testcomponents/example_extension.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testcomponents + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenthelper" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/extension/extensionhelper" +) + +// ExampleExtensionCfg is for testing purposes. We are defining an example config and factory +// for "exampleextension" extension type. +type ExampleExtensionCfg struct { + config.ExtensionSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + ExtraSetting string `mapstructure:"extra"` + ExtraMapSetting map[string]string `mapstructure:"extra_map"` + ExtraListSetting []string `mapstructure:"extra_list"` +} + +const extType = "exampleextension" + +// ExampleExtensionFactory is factory for ExampleExtensionCfg. +var ExampleExtensionFactory = extensionhelper.NewFactory(extType, createExtensionDefaultConfig, createExtension) + +// CreateDefaultConfig creates the default configuration for the Extension. +func createExtensionDefaultConfig() config.Extension { + return &ExampleExtensionCfg{ + ExtensionSettings: config.NewExtensionSettings(config.NewID(extType)), + ExtraSetting: "extra string setting", + ExtraMapSetting: nil, + ExtraListSetting: nil, + } +} + +// CreateExtension creates an Extension based on this config. +func createExtension(context.Context, component.ExtensionCreateSettings, config.Extension) (component.Extension, error) { + return componenthelper.New(), nil +} diff --git a/internal/otel_collector/internal/testcomponents/example_factories.go b/internal/otel_collector/internal/testcomponents/example_factories.go new file mode 100644 index 00000000000..6d478223a1e --- /dev/null +++ b/internal/otel_collector/internal/testcomponents/example_factories.go @@ -0,0 +1,41 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testcomponents + +import ( + "go.opentelemetry.io/collector/component" +) + +// ExampleComponents registers example factories. This is only used by tests. +func ExampleComponents() ( + factories component.Factories, + err error, +) { + if factories.Extensions, err = component.MakeExtensionFactoryMap(ExampleExtensionFactory); err != nil { + return + } + + if factories.Receivers, err = component.MakeReceiverFactoryMap(ExampleReceiverFactory); err != nil { + return + } + + if factories.Exporters, err = component.MakeExporterFactoryMap(ExampleExporterFactory); err != nil { + return + } + + factories.Processors, err = component.MakeProcessorFactoryMap(ExampleProcessorFactory) + + return +} diff --git a/internal/otel_collector/internal/testcomponents/example_processor.go b/internal/otel_collector/internal/testcomponents/example_processor.go new file mode 100644 index 00000000000..788b14b6cad --- /dev/null +++ b/internal/otel_collector/internal/testcomponents/example_processor.go @@ -0,0 +1,83 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testcomponents + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +// ExampleProcessorCfg is for testing purposes. We are defining an example config and factory +// for "exampleprocessor" processor type. +type ExampleProcessorCfg struct { + config.ProcessorSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + ExtraSetting string `mapstructure:"extra"` + ExtraMapSetting map[string]string `mapstructure:"extra_map"` + ExtraListSetting []string `mapstructure:"extra_list"` +} + +const procType = "exampleprocessor" + +// ExampleProcessorFactory is factory for exampleProcessor. +var ExampleProcessorFactory = processorhelper.NewFactory( + procType, + createDefaultConfig, + processorhelper.WithTraces(createTracesProcessor), + processorhelper.WithMetrics(createMetricsProcessor), + processorhelper.WithLogs(createLogsProcessor)) + +// CreateDefaultConfig creates the default configuration for the Processor. +func createDefaultConfig() config.Processor { + return &ExampleProcessorCfg{ + ProcessorSettings: config.NewProcessorSettings(config.NewID(procType)), + ExtraSetting: "some export string", + ExtraMapSetting: nil, + ExtraListSetting: nil, + } +} + +func createTracesProcessor(_ context.Context, _ component.ProcessorCreateSettings, _ config.Processor, nextConsumer consumer.Traces) (component.TracesProcessor, error) { + return &exampleProcessor{Traces: nextConsumer}, nil +} + +func createMetricsProcessor(_ context.Context, _ component.ProcessorCreateSettings, _ config.Processor, nextConsumer consumer.Metrics) (component.MetricsProcessor, error) { + return &exampleProcessor{Metrics: nextConsumer}, nil +} + +func createLogsProcessor(_ context.Context, _ component.ProcessorCreateSettings, _ config.Processor, nextConsumer consumer.Logs) (component.LogsProcessor, error) { + return &exampleProcessor{Logs: nextConsumer}, nil +} + +type exampleProcessor struct { + consumer.Traces + consumer.Metrics + consumer.Logs +} + +func (ep *exampleProcessor) Start(_ context.Context, _ component.Host) error { + return nil +} + +func (ep *exampleProcessor) Shutdown(_ context.Context) error { + return nil +} + +func (ep *exampleProcessor) Capabilities() consumer.Capabilities { + return consumer.Capabilities{MutatesData: false} +} diff --git a/internal/otel_collector/internal/testcomponents/example_receiver.go b/internal/otel_collector/internal/testcomponents/example_receiver.go new file mode 100644 index 00000000000..5fd889391ef --- /dev/null +++ b/internal/otel_collector/internal/testcomponents/example_receiver.go @@ -0,0 +1,137 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testcomponents + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver/receiverhelper" +) + +// ExampleReceiver is for testing purposes. We are defining an example config and factory +// for "examplereceiver" receiver type. +type ExampleReceiver struct { + config.ReceiverSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + // Configures the receiver server protocol. + confignet.TCPAddr `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + + ExtraSetting string `mapstructure:"extra"` + ExtraMapSetting map[string]string `mapstructure:"extra_map"` + ExtraListSetting []string `mapstructure:"extra_list"` +} + +var receiverType = config.Type("examplereceiver") + +// ExampleReceiverFactory is factory for ExampleReceiver. +var ExampleReceiverFactory = receiverhelper.NewFactory( + receiverType, + createReceiverDefaultConfig, + receiverhelper.WithTraces(createTracesReceiver), + receiverhelper.WithMetrics(createMetricsReceiver), + receiverhelper.WithLogs(createLogsReceiver)) + +func createReceiverDefaultConfig() config.Receiver { + return &ExampleReceiver{ + ReceiverSettings: config.NewReceiverSettings(config.NewID(receiverType)), + TCPAddr: confignet.TCPAddr{ + Endpoint: "localhost:1000", + }, + ExtraSetting: "some string", + ExtraMapSetting: nil, + ExtraListSetting: nil, + } +} + +// CreateTracesReceiver creates a trace receiver based on this config. +func createTracesReceiver( + _ context.Context, + _ component.ReceiverCreateSettings, + cfg config.Receiver, + nextConsumer consumer.Traces, +) (component.TracesReceiver, error) { + receiver := createReceiver(cfg) + receiver.Traces = nextConsumer + return receiver, nil +} + +// CreateMetricsReceiver creates a metrics receiver based on this config. +func createMetricsReceiver( + _ context.Context, + _ component.ReceiverCreateSettings, + cfg config.Receiver, + nextConsumer consumer.Metrics, +) (component.MetricsReceiver, error) { + receiver := createReceiver(cfg) + receiver.Metrics = nextConsumer + return receiver, nil +} + +func createLogsReceiver( + _ context.Context, + _ component.ReceiverCreateSettings, + cfg config.Receiver, + nextConsumer consumer.Logs, +) (component.LogsReceiver, error) { + receiver := createReceiver(cfg) + receiver.Logs = nextConsumer + + return receiver, nil +} + +func createReceiver(cfg config.Receiver) *ExampleReceiverProducer { + // There must be one receiver for all data types. We maintain a map of + // receivers per config. + + // Check to see if there is already a receiver for this config. + receiver, ok := exampleReceivers[cfg] + if !ok { + receiver = &ExampleReceiverProducer{} + // Remember the receiver in the map + exampleReceivers[cfg] = receiver + } + + return receiver +} + +// ExampleReceiverProducer allows producing traces and metrics for testing purposes. +type ExampleReceiverProducer struct { + Started bool + Stopped bool + consumer.Traces + consumer.Metrics + consumer.Logs +} + +// Start tells the receiver to start its processing. +func (erp *ExampleReceiverProducer) Start(_ context.Context, _ component.Host) error { + erp.Started = true + return nil +} + +// Shutdown tells the receiver that should stop reception, +func (erp *ExampleReceiverProducer) Shutdown(context.Context) error { + erp.Stopped = true + return nil +} + +// This is the map of already created example receivers for particular configurations. +// We maintain this map because the ReceiverFactory is asked trace and metric receivers separately +// when it gets CreateTracesReceiver() and CreateMetricsReceiver() but they must not +// create separate objects, they must use one Receiver object per configuration. +var exampleReceivers = map[config.Receiver]*ExampleReceiverProducer{} diff --git a/internal/otel_collector/internal/testdata/common.go b/internal/otel_collector/internal/testdata/common.go new file mode 100644 index 00000000000..da6c95cd8f1 --- /dev/null +++ b/internal/otel_collector/internal/testdata/common.go @@ -0,0 +1,87 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testdata + +import ( + "go.opentelemetry.io/collector/model/pdata" +) + +var ( + resourceAttributes1 = pdata.NewAttributeMapFromMap(map[string]pdata.AttributeValue{"resource-attr": pdata.NewAttributeValueString("resource-attr-val-1")}) + resourceAttributes2 = pdata.NewAttributeMapFromMap(map[string]pdata.AttributeValue{"resource-attr": pdata.NewAttributeValueString("resource-attr-val-2")}) + spanEventAttributes = pdata.NewAttributeMapFromMap(map[string]pdata.AttributeValue{"span-event-attr": pdata.NewAttributeValueString("span-event-attr-val")}) + spanLinkAttributes = pdata.NewAttributeMapFromMap(map[string]pdata.AttributeValue{"span-link-attr": pdata.NewAttributeValueString("span-link-attr-val")}) + spanAttributes = pdata.NewAttributeMapFromMap(map[string]pdata.AttributeValue{"span-attr": pdata.NewAttributeValueString("span-attr-val")}) + metricAttachment = pdata.NewAttributeMapFromMap(map[string]pdata.AttributeValue{"exemplar-attachment": pdata.NewAttributeValueString("exemplar-attachment-value")}) +) + +const ( + TestLabelKey1 = "label-1" + TestLabelValue1 = "label-value-1" + TestLabelKey2 = "label-2" + TestLabelValue2 = "label-value-2" + TestLabelKey3 = "label-3" + TestLabelValue3 = "label-value-3" +) + +func initResourceAttributes1(dest pdata.AttributeMap) { + dest.Clear() + resourceAttributes1.CopyTo(dest) +} + +func initResourceAttributes2(dest pdata.AttributeMap) { + dest.Clear() + resourceAttributes2.CopyTo(dest) +} + +func initSpanAttributes(dest pdata.AttributeMap) { + dest.Clear() + spanAttributes.CopyTo(dest) +} + +func initSpanEventAttributes(dest pdata.AttributeMap) { + dest.Clear() + spanEventAttributes.CopyTo(dest) +} + +func initSpanLinkAttributes(dest pdata.AttributeMap) { + dest.Clear() + spanLinkAttributes.CopyTo(dest) +} + +func initMetricAttachment(dest pdata.AttributeMap) { + dest.Clear() + metricAttachment.CopyTo(dest) +} + +func initMetricAttributes1(dest pdata.AttributeMap) { + dest.Clear() + dest.InsertString(TestLabelKey1, TestLabelValue1) +} + +func initMetricAttributes12(dest pdata.AttributeMap) { + initMetricAttributes1(dest) + dest.InsertString(TestLabelKey2, TestLabelValue2) +} + +func initMetricAttributes13(dest pdata.AttributeMap) { + initMetricAttributes1(dest) + dest.InsertString(TestLabelKey3, TestLabelValue3) +} + +func initMetricAttributes2(dest pdata.AttributeMap) { + dest.Clear() + dest.InsertString(TestLabelKey2, TestLabelValue2) +} diff --git a/internal/otel_collector/internal/testdata/log.go b/internal/otel_collector/internal/testdata/log.go new file mode 100644 index 00000000000..d21a80f08b2 --- /dev/null +++ b/internal/otel_collector/internal/testdata/log.go @@ -0,0 +1,139 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testdata + +import ( + "time" + + "go.opentelemetry.io/collector/model/pdata" +) + +var ( + TestLogTime = time.Date(2020, 2, 11, 20, 26, 13, 789, time.UTC) + TestLogTimestamp = pdata.NewTimestampFromTime(TestLogTime) +) + +func GenerateLogsOneEmptyResourceLogs() pdata.Logs { + ld := pdata.NewLogs() + ld.ResourceLogs().AppendEmpty() + return ld +} + +func GenerateLogsNoLogRecords() pdata.Logs { + ld := GenerateLogsOneEmptyResourceLogs() + initResource1(ld.ResourceLogs().At(0).Resource()) + return ld +} + +func GenerateLogsOneEmptyLogRecord() pdata.Logs { + ld := GenerateLogsNoLogRecords() + rs0 := ld.ResourceLogs().At(0) + rs0.InstrumentationLibraryLogs().AppendEmpty().Logs().AppendEmpty() + return ld +} + +func GenerateLogsOneLogRecordNoResource() pdata.Logs { + ld := GenerateLogsOneEmptyResourceLogs() + rs0 := ld.ResourceLogs().At(0) + fillLogOne(rs0.InstrumentationLibraryLogs().AppendEmpty().Logs().AppendEmpty()) + return ld +} + +func GenerateLogsOneLogRecord() pdata.Logs { + ld := GenerateLogsOneEmptyLogRecord() + fillLogOne(ld.ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs().At(0)) + return ld +} + +func GenerateLogsTwoLogRecordsSameResource() pdata.Logs { + ld := GenerateLogsOneEmptyLogRecord() + logs := ld.ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs() + fillLogOne(logs.At(0)) + fillLogTwo(logs.AppendEmpty()) + return ld +} + +func GenerateLogsTwoLogRecordsSameResourceOneDifferent() pdata.Logs { + ld := pdata.NewLogs() + rl0 := ld.ResourceLogs().AppendEmpty() + initResource1(rl0.Resource()) + logs := rl0.InstrumentationLibraryLogs().AppendEmpty().Logs() + fillLogOne(logs.AppendEmpty()) + fillLogTwo(logs.AppendEmpty()) + rl1 := ld.ResourceLogs().AppendEmpty() + initResource2(rl1.Resource()) + fillLogThree(rl1.InstrumentationLibraryLogs().AppendEmpty().Logs().AppendEmpty()) + return ld +} +func fillLogOne(log pdata.LogRecord) { + log.SetName("logA") + log.SetTimestamp(TestLogTimestamp) + log.SetDroppedAttributesCount(1) + log.SetSeverityNumber(pdata.SeverityNumberINFO) + log.SetSeverityText("Info") + log.SetSpanID(pdata.NewSpanID([8]byte{0x01, 0x02, 0x04, 0x08})) + log.SetTraceID(pdata.NewTraceID([16]byte{0x08, 0x04, 0x02, 0x01})) + + attrs := log.Attributes() + attrs.InsertString("app", "server") + attrs.InsertInt("instance_num", 1) + + log.Body().SetStringVal("This is a log message") +} + +func fillLogTwo(log pdata.LogRecord) { + log.SetName("logB") + log.SetTimestamp(TestLogTimestamp) + log.SetDroppedAttributesCount(1) + log.SetSeverityNumber(pdata.SeverityNumberINFO) + log.SetSeverityText("Info") + + attrs := log.Attributes() + attrs.InsertString("customer", "acme") + attrs.InsertString("env", "dev") + + log.Body().SetStringVal("something happened") +} + +func fillLogThree(log pdata.LogRecord) { + log.SetName("logC") + log.SetTimestamp(TestLogTimestamp) + log.SetDroppedAttributesCount(1) + log.SetSeverityNumber(pdata.SeverityNumberWARN) + log.SetSeverityText("Warning") + + log.Body().SetStringVal("something else happened") +} + +func GenerateLogsManyLogRecordsSameResource(count int) pdata.Logs { + ld := GenerateLogsOneEmptyLogRecord() + logs := ld.ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs() + logs.EnsureCapacity(count) + for i := 0; i < count; i++ { + var l pdata.LogRecord + if i < logs.Len() { + l = logs.At(i) + } else { + l = logs.AppendEmpty() + } + + if i%2 == 0 { + fillLogOne(l) + } else { + fillLogTwo(l) + } + } + return ld +} diff --git a/internal/otel_collector/internal/testdata/metric.go b/internal/otel_collector/internal/testdata/metric.go new file mode 100644 index 00000000000..718899f60b6 --- /dev/null +++ b/internal/otel_collector/internal/testdata/metric.go @@ -0,0 +1,297 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testdata + +import ( + "time" + + "go.opentelemetry.io/collector/model/pdata" +) + +var ( + TestMetricStartTime = time.Date(2020, 2, 11, 20, 26, 12, 321, time.UTC) + TestMetricStartTimestamp = pdata.NewTimestampFromTime(TestMetricStartTime) + + TestMetricExemplarTime = time.Date(2020, 2, 11, 20, 26, 13, 123, time.UTC) + TestMetricExemplarTimestamp = pdata.NewTimestampFromTime(TestMetricExemplarTime) + + TestMetricTime = time.Date(2020, 2, 11, 20, 26, 13, 789, time.UTC) + TestMetricTimestamp = pdata.NewTimestampFromTime(TestMetricTime) +) + +const ( + TestGaugeDoubleMetricName = "gauge-double" + TestGaugeIntMetricName = "gauge-int" + TestSumDoubleMetricName = "counter-double" + TestSumIntMetricName = "counter-int" + TestDoubleHistogramMetricName = "double-histogram" + TestDoubleSummaryMetricName = "double-summary" +) + +func GenerateMetricsOneEmptyResourceMetrics() pdata.Metrics { + md := pdata.NewMetrics() + md.ResourceMetrics().AppendEmpty() + return md +} + +func GenerateMetricsNoLibraries() pdata.Metrics { + md := GenerateMetricsOneEmptyResourceMetrics() + ms0 := md.ResourceMetrics().At(0) + initResource1(ms0.Resource()) + return md +} + +func GenerateMetricsOneEmptyInstrumentationLibrary() pdata.Metrics { + md := GenerateMetricsNoLibraries() + md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().AppendEmpty() + return md +} + +func GenerateMetricsOneMetricNoResource() pdata.Metrics { + md := GenerateMetricsOneEmptyResourceMetrics() + rm0 := md.ResourceMetrics().At(0) + rm0ils0 := rm0.InstrumentationLibraryMetrics().AppendEmpty() + initSumIntMetric(rm0ils0.Metrics().AppendEmpty()) + return md +} + +func GenerateMetricsOneMetric() pdata.Metrics { + md := GenerateMetricsOneEmptyInstrumentationLibrary() + rm0ils0 := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0) + initSumIntMetric(rm0ils0.Metrics().AppendEmpty()) + return md +} + +func GenerateMetricsTwoMetrics() pdata.Metrics { + md := GenerateMetricsOneEmptyInstrumentationLibrary() + rm0ils0 := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0) + initSumIntMetric(rm0ils0.Metrics().AppendEmpty()) + initSumIntMetric(rm0ils0.Metrics().AppendEmpty()) + return md +} + +func GenerateMetricsOneCounterOneSummaryMetrics() pdata.Metrics { + md := GenerateMetricsOneEmptyInstrumentationLibrary() + rm0ils0 := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0) + initSumIntMetric(rm0ils0.Metrics().AppendEmpty()) + initDoubleSummaryMetric(rm0ils0.Metrics().AppendEmpty()) + return md +} + +func GenerateMetricsOneMetricNoAttributes() pdata.Metrics { + md := GenerateMetricsOneMetric() + dps := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(0).Sum().DataPoints() + dps.At(0).Attributes().Clear() + dps.At(1).Attributes().Clear() + return md +} + +func GenerateMetricsAllTypesNoDataPoints() pdata.Metrics { + md := GenerateMetricsOneEmptyInstrumentationLibrary() + ilm0 := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0) + ms := ilm0.Metrics() + initMetric(ms.AppendEmpty(), TestGaugeDoubleMetricName, pdata.MetricDataTypeGauge) + initMetric(ms.AppendEmpty(), TestGaugeIntMetricName, pdata.MetricDataTypeGauge) + initMetric(ms.AppendEmpty(), TestSumDoubleMetricName, pdata.MetricDataTypeSum) + initMetric(ms.AppendEmpty(), TestSumIntMetricName, pdata.MetricDataTypeSum) + initMetric(ms.AppendEmpty(), TestDoubleHistogramMetricName, pdata.MetricDataTypeHistogram) + initMetric(ms.AppendEmpty(), TestDoubleSummaryMetricName, pdata.MetricDataTypeSummary) + return md +} + +func GenerateMetricsAllTypesEmptyDataPoint() pdata.Metrics { + md := GenerateMetricsOneEmptyInstrumentationLibrary() + ilm0 := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0) + ms := ilm0.Metrics() + + doubleGauge := ms.AppendEmpty() + initMetric(doubleGauge, TestGaugeDoubleMetricName, pdata.MetricDataTypeGauge) + doubleGauge.Gauge().DataPoints().AppendEmpty() + intGauge := ms.AppendEmpty() + initMetric(intGauge, TestGaugeIntMetricName, pdata.MetricDataTypeGauge) + intGauge.Gauge().DataPoints().AppendEmpty() + doubleSum := ms.AppendEmpty() + initMetric(doubleSum, TestSumDoubleMetricName, pdata.MetricDataTypeSum) + doubleSum.Sum().DataPoints().AppendEmpty() + intSum := ms.AppendEmpty() + initMetric(intSum, TestSumIntMetricName, pdata.MetricDataTypeSum) + intSum.Sum().DataPoints().AppendEmpty() + histogram := ms.AppendEmpty() + initMetric(histogram, TestDoubleHistogramMetricName, pdata.MetricDataTypeHistogram) + histogram.Histogram().DataPoints().AppendEmpty() + summary := ms.AppendEmpty() + initMetric(summary, TestDoubleSummaryMetricName, pdata.MetricDataTypeSummary) + summary.Summary().DataPoints().AppendEmpty() + return md +} + +func GenerateMetricsMetricTypeInvalid() pdata.Metrics { + md := GenerateMetricsOneEmptyInstrumentationLibrary() + ilm0 := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0) + initMetric(ilm0.Metrics().AppendEmpty(), TestSumIntMetricName, pdata.MetricDataTypeNone) + return md +} + +func GeneratMetricsAllTypesWithSampleDatapoints() pdata.Metrics { + md := GenerateMetricsOneEmptyInstrumentationLibrary() + + ilm := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0) + ms := ilm.Metrics() + initGaugeIntMetric(ms.AppendEmpty()) + initGaugeDoubleMetric(ms.AppendEmpty()) + initSumIntMetric(ms.AppendEmpty()) + initSumDoubleMetric(ms.AppendEmpty()) + initDoubleHistogramMetric(ms.AppendEmpty()) + initDoubleSummaryMetric(ms.AppendEmpty()) + + return md +} + +func initGaugeIntMetric(im pdata.Metric) { + initMetric(im, TestGaugeIntMetricName, pdata.MetricDataTypeGauge) + + idps := im.Gauge().DataPoints() + idp0 := idps.AppendEmpty() + initMetricAttributes1(idp0.Attributes()) + idp0.SetStartTimestamp(TestMetricStartTimestamp) + idp0.SetTimestamp(TestMetricTimestamp) + idp0.SetIntVal(123) + idp1 := idps.AppendEmpty() + initMetricAttributes2(idp1.Attributes()) + idp1.SetStartTimestamp(TestMetricStartTimestamp) + idp1.SetTimestamp(TestMetricTimestamp) + idp1.SetIntVal(456) +} + +func initGaugeDoubleMetric(im pdata.Metric) { + initMetric(im, TestGaugeDoubleMetricName, pdata.MetricDataTypeGauge) + + idps := im.Gauge().DataPoints() + idp0 := idps.AppendEmpty() + initMetricAttributes12(idp0.Attributes()) + idp0.SetStartTimestamp(TestMetricStartTimestamp) + idp0.SetTimestamp(TestMetricTimestamp) + idp0.SetDoubleVal(1.23) + idp1 := idps.AppendEmpty() + initMetricAttributes13(idp1.Attributes()) + idp1.SetStartTimestamp(TestMetricStartTimestamp) + idp1.SetTimestamp(TestMetricTimestamp) + idp1.SetDoubleVal(4.56) +} + +func initSumIntMetric(im pdata.Metric) { + initMetric(im, TestSumIntMetricName, pdata.MetricDataTypeSum) + + idps := im.Sum().DataPoints() + idp0 := idps.AppendEmpty() + initMetricAttributes1(idp0.Attributes()) + idp0.SetStartTimestamp(TestMetricStartTimestamp) + idp0.SetTimestamp(TestMetricTimestamp) + idp0.SetIntVal(123) + idp1 := idps.AppendEmpty() + initMetricAttributes2(idp1.Attributes()) + idp1.SetStartTimestamp(TestMetricStartTimestamp) + idp1.SetTimestamp(TestMetricTimestamp) + idp1.SetIntVal(456) +} + +func initSumDoubleMetric(dm pdata.Metric) { + initMetric(dm, TestSumDoubleMetricName, pdata.MetricDataTypeSum) + + ddps := dm.Sum().DataPoints() + ddp0 := ddps.AppendEmpty() + initMetricAttributes12(ddp0.Attributes()) + ddp0.SetStartTimestamp(TestMetricStartTimestamp) + ddp0.SetTimestamp(TestMetricTimestamp) + ddp0.SetDoubleVal(1.23) + + ddp1 := ddps.AppendEmpty() + initMetricAttributes13(ddp1.Attributes()) + ddp1.SetStartTimestamp(TestMetricStartTimestamp) + ddp1.SetTimestamp(TestMetricTimestamp) + ddp1.SetDoubleVal(4.56) +} + +func initDoubleHistogramMetric(hm pdata.Metric) { + initMetric(hm, TestDoubleHistogramMetricName, pdata.MetricDataTypeHistogram) + + hdps := hm.Histogram().DataPoints() + hdp0 := hdps.AppendEmpty() + initMetricAttributes13(hdp0.Attributes()) + hdp0.SetStartTimestamp(TestMetricStartTimestamp) + hdp0.SetTimestamp(TestMetricTimestamp) + hdp0.SetCount(1) + hdp0.SetSum(15) + hdp1 := hdps.AppendEmpty() + initMetricAttributes2(hdp1.Attributes()) + hdp1.SetStartTimestamp(TestMetricStartTimestamp) + hdp1.SetTimestamp(TestMetricTimestamp) + hdp1.SetCount(1) + hdp1.SetSum(15) + hdp1.SetBucketCounts([]uint64{0, 1}) + exemplar := hdp1.Exemplars().AppendEmpty() + exemplar.SetTimestamp(TestMetricExemplarTimestamp) + exemplar.SetDoubleVal(15) + initMetricAttachment(exemplar.FilteredAttributes()) + hdp1.SetExplicitBounds([]float64{1}) +} + +func initDoubleSummaryMetric(sm pdata.Metric) { + initMetric(sm, TestDoubleSummaryMetricName, pdata.MetricDataTypeSummary) + + sdps := sm.Summary().DataPoints() + sdp0 := sdps.AppendEmpty() + initMetricAttributes13(sdp0.Attributes()) + sdp0.SetStartTimestamp(TestMetricStartTimestamp) + sdp0.SetTimestamp(TestMetricTimestamp) + sdp0.SetCount(1) + sdp0.SetSum(15) + sdp1 := sdps.AppendEmpty() + initMetricAttributes2(sdp1.Attributes()) + sdp1.SetStartTimestamp(TestMetricStartTimestamp) + sdp1.SetTimestamp(TestMetricTimestamp) + sdp1.SetCount(1) + sdp1.SetSum(15) + + quantile := sdp1.QuantileValues().AppendEmpty() + quantile.SetQuantile(0.01) + quantile.SetValue(15) +} + +func initMetric(m pdata.Metric, name string, ty pdata.MetricDataType) { + m.SetName(name) + m.SetDescription("") + m.SetUnit("1") + m.SetDataType(ty) + switch ty { + case pdata.MetricDataTypeSum: + sum := m.Sum() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + case pdata.MetricDataTypeHistogram: + histo := m.Histogram() + histo.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + } +} + +func GenerateMetricsManyMetricsSameResource(metricsCount int) pdata.Metrics { + md := GenerateMetricsOneEmptyInstrumentationLibrary() + rs0ilm0 := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0) + rs0ilm0.Metrics().EnsureCapacity(metricsCount) + for i := 0; i < metricsCount; i++ { + initSumIntMetric(rs0ilm0.Metrics().AppendEmpty()) + } + return md +} diff --git a/internal/otel_collector/internal/testdata/resource.go b/internal/otel_collector/internal/testdata/resource.go new file mode 100644 index 00000000000..f5d2b8f726d --- /dev/null +++ b/internal/otel_collector/internal/testdata/resource.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testdata + +import ( + "go.opentelemetry.io/collector/model/pdata" +) + +func initResource1(r pdata.Resource) { + initResourceAttributes1(r.Attributes()) +} + +func initResource2(r pdata.Resource) { + initResourceAttributes2(r.Attributes()) +} diff --git a/internal/otel_collector/internal/testdata/trace.go b/internal/otel_collector/internal/testdata/trace.go new file mode 100644 index 00000000000..516da1ccef4 --- /dev/null +++ b/internal/otel_collector/internal/testdata/trace.go @@ -0,0 +1,138 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testdata + +import ( + "time" + + "go.opentelemetry.io/collector/model/pdata" +) + +var ( + TestSpanStartTime = time.Date(2020, 2, 11, 20, 26, 12, 321, time.UTC) + TestSpanStartTimestamp = pdata.NewTimestampFromTime(TestSpanStartTime) + + TestSpanEventTime = time.Date(2020, 2, 11, 20, 26, 13, 123, time.UTC) + TestSpanEventTimestamp = pdata.NewTimestampFromTime(TestSpanEventTime) + + TestSpanEndTime = time.Date(2020, 2, 11, 20, 26, 13, 789, time.UTC) + TestSpanEndTimestamp = pdata.NewTimestampFromTime(TestSpanEndTime) +) + +func GenerateTracesOneEmptyResourceSpans() pdata.Traces { + td := pdata.NewTraces() + td.ResourceSpans().AppendEmpty() + return td +} + +func GenerateTracesNoLibraries() pdata.Traces { + td := GenerateTracesOneEmptyResourceSpans() + rs0 := td.ResourceSpans().At(0) + initResource1(rs0.Resource()) + return td +} + +func GenerateTracesOneEmptyInstrumentationLibrary() pdata.Traces { + td := GenerateTracesNoLibraries() + td.ResourceSpans().At(0).InstrumentationLibrarySpans().AppendEmpty() + return td +} + +func GenerateTracesOneSpanNoResource() pdata.Traces { + td := GenerateTracesOneEmptyResourceSpans() + rs0 := td.ResourceSpans().At(0) + fillSpanOne(rs0.InstrumentationLibrarySpans().AppendEmpty().Spans().AppendEmpty()) + return td +} + +func GenerateTracesOneSpan() pdata.Traces { + td := GenerateTracesOneEmptyInstrumentationLibrary() + rs0ils0 := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0) + fillSpanOne(rs0ils0.Spans().AppendEmpty()) + return td +} + +func GenerateTracesTwoSpansSameResource() pdata.Traces { + td := GenerateTracesOneEmptyInstrumentationLibrary() + rs0ils0 := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0) + fillSpanOne(rs0ils0.Spans().AppendEmpty()) + fillSpanTwo(rs0ils0.Spans().AppendEmpty()) + return td +} + +func GenerateTracesTwoSpansSameResourceOneDifferent() pdata.Traces { + td := pdata.NewTraces() + rs0 := td.ResourceSpans().AppendEmpty() + initResource1(rs0.Resource()) + rs0ils0 := rs0.InstrumentationLibrarySpans().AppendEmpty() + fillSpanOne(rs0ils0.Spans().AppendEmpty()) + fillSpanTwo(rs0ils0.Spans().AppendEmpty()) + rs1 := td.ResourceSpans().AppendEmpty() + initResource2(rs1.Resource()) + rs1ils0 := rs1.InstrumentationLibrarySpans().AppendEmpty() + fillSpanThree(rs1ils0.Spans().AppendEmpty()) + return td +} + +func GenerateTracesManySpansSameResource(spanCount int) pdata.Traces { + td := GenerateTracesOneEmptyInstrumentationLibrary() + rs0ils0 := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0) + rs0ils0.Spans().EnsureCapacity(spanCount) + for i := 0; i < spanCount; i++ { + fillSpanOne(rs0ils0.Spans().AppendEmpty()) + } + return td +} + +func fillSpanOne(span pdata.Span) { + span.SetName("operationA") + span.SetStartTimestamp(TestSpanStartTimestamp) + span.SetEndTimestamp(TestSpanEndTimestamp) + span.SetDroppedAttributesCount(1) + evs := span.Events() + ev0 := evs.AppendEmpty() + ev0.SetTimestamp(TestSpanEventTimestamp) + ev0.SetName("event-with-attr") + initSpanEventAttributes(ev0.Attributes()) + ev0.SetDroppedAttributesCount(2) + ev1 := evs.AppendEmpty() + ev1.SetTimestamp(TestSpanEventTimestamp) + ev1.SetName("event") + ev1.SetDroppedAttributesCount(2) + span.SetDroppedEventsCount(1) + status := span.Status() + status.SetCode(pdata.StatusCodeError) + status.SetMessage("status-cancelled") +} + +func fillSpanTwo(span pdata.Span) { + span.SetName("operationB") + span.SetStartTimestamp(TestSpanStartTimestamp) + span.SetEndTimestamp(TestSpanEndTimestamp) + link0 := span.Links().AppendEmpty() + initSpanLinkAttributes(link0.Attributes()) + link0.SetDroppedAttributesCount(4) + link1 := span.Links().AppendEmpty() + link1.SetDroppedAttributesCount(4) + span.SetDroppedLinksCount(3) +} + +func fillSpanThree(span pdata.Span) { + span.SetName("operationC") + span.SetStartTimestamp(TestSpanStartTimestamp) + span.SetEndTimestamp(TestSpanEndTimestamp) + initSpanAttributes(span.Attributes()) + span.SetDroppedAttributesCount(5) +} diff --git a/internal/otel_collector/internal/testutil/testutil.go b/internal/otel_collector/internal/testutil/testutil.go new file mode 100644 index 00000000000..20462754077 --- /dev/null +++ b/internal/otel_collector/internal/testutil/testutil.go @@ -0,0 +1,111 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "net" + "os/exec" + "runtime" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +type portpair struct { + first string + last string +} + +// GetAvailableLocalAddress finds an available local port and returns an endpoint +// describing it. The port is available for opening when this function returns +// provided that there is no race by some other code to grab the same port +// immediately. +func GetAvailableLocalAddress(t *testing.T) string { + ln, err := net.Listen("tcp", "localhost:0") + require.NoError(t, err, "Failed to get a free local port") + // There is a possible race if something else takes this same port before + // the test uses it, however, that is unlikely in practice. + defer ln.Close() + return ln.Addr().String() +} + +// GetAvailablePort finds an available local port and returns it. The port is +// available for opening when this function returns provided that there is no +// race by some other code to grab the same port immediately. +func GetAvailablePort(t *testing.T) uint16 { + // Retry has been added for windows as net.Listen can return a port that is not actually available. Details can be + // found in https://github.com/docker/for-win/issues/3171 but to summarize Hyper-V will reserve ranges of ports + // which do not show up under the "netstat -ano" but can only be found by + // "netsh interface ipv4 show excludedportrange protocol=tcp". We'll use []exclusions to hold those ranges and + // retry if the port returned by GetAvailableLocalAddress falls in one of those them. + var exclusions []portpair + portFound := false + var port string + var err error + if runtime.GOOS == "windows" { + exclusions = getExclusionsList(t) + } + + for !portFound { + endpoint := GetAvailableLocalAddress(t) + _, port, err = net.SplitHostPort(endpoint) + require.NoError(t, err) + portFound = true + if runtime.GOOS == "windows" { + for _, pair := range exclusions { + if port >= pair.first && port <= pair.last { + portFound = false + break + } + } + } + } + + portInt, err := strconv.Atoi(port) + require.NoError(t, err) + + return uint16(portInt) +} + +// Get excluded ports on Windows from the command: netsh interface ipv4 show excludedportrange protocol=tcp +func getExclusionsList(t *testing.T) []portpair { + cmd := exec.Command("netsh", "interface", "ipv4", "show", "excludedportrange", "protocol=tcp") + output, err := cmd.CombinedOutput() + require.NoError(t, err) + + exclusions := createExclusionsList(string(output), t) + return exclusions +} + +func createExclusionsList(exclusionsText string, t *testing.T) []portpair { + exclusions := []portpair{} + + parts := strings.Split(exclusionsText, "--------") + require.Equal(t, len(parts), 3) + portsText := strings.Split(parts[2], "*") + require.Greater(t, len(portsText), 1) // original text may have a suffix like " - Administered port exclusions." + lines := strings.Split(portsText[0], "\n") + for _, line := range lines { + if strings.TrimSpace(line) != "" { + entries := strings.Fields(strings.TrimSpace(line)) + require.Equal(t, len(entries), 2) + pair := portpair{entries[0], entries[1]} + exclusions = append(exclusions, pair) + } + } + return exclusions +} diff --git a/internal/otel_collector/internal/version/version.go b/internal/otel_collector/internal/version/version.go new file mode 100644 index 00000000000..a0fa98ac9ff --- /dev/null +++ b/internal/otel_collector/internal/version/version.go @@ -0,0 +1,69 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "bytes" + "fmt" + "runtime" + "time" +) + +var ( + // Version variable will be replaced at link time after `make` has been run. + Version = "latest" + + // startTime + startTime time.Time +) + +func init() { + startTime = time.Now() +} + +// InfoVar is a singleton instance of the Info struct. +var InfoVar = Info{ + {"Version", Version}, + {"GoVersion", runtime.Version()}, + {"OS", runtime.GOOS}, + {"Architecture", runtime.GOARCH}, + // Add other valuable build-time information here. +} + +// RuntimeVar returns the InfoVar plus runtime information like uptime. +func RuntimeVar() Info { + return append(InfoVar, [2]string{"StartTimestamp", startTime.String()}, [2]string{"Uptime", time.Since(startTime).String()}) +} + +// Info has properties about the build and runtime. +type Info [][2]string + +// String returns a formatted string, with linebreaks, intended to be displayed +// on stdout. +func (i Info) String() string { + buf := new(bytes.Buffer) + maxRow1Alignment := 0 + for _, prop := range i { + if cl0 := len(prop[0]); cl0 > maxRow1Alignment { + maxRow1Alignment = cl0 + } + } + + for _, prop := range i { + // Then finally print them with left alignment + fmt.Fprintf(buf, "%*s %s\n", -maxRow1Alignment, prop[0], prop[1]) + } + return buf.String() +} diff --git a/internal/otel_collector/obsreport/doc.go b/internal/otel_collector/obsreport/doc.go new file mode 100644 index 00000000000..cb796729ce6 --- /dev/null +++ b/internal/otel_collector/obsreport/doc.go @@ -0,0 +1,117 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package obsreport provides unified and consistent observability signals ( +// metrics, tracing, etc) for components of the OpenTelemetry collector. +// +// The function Configure is used to control which signals are going to be +// generated. It provides functions for the typical operations of receivers, +// processors, and exporters. +// +// Receivers should use the respective start and end according to the data type +// being received, ie.: +// +// * Traces receive operations should use the pair: +// StartTracesOp/EndTracesOp +// +// * Metrics receive operations should use the pair: +// StartMetricsOp/EndMetricsOp +// +// * Logs receive operations should use the pair: +// StartLogsOp/EndLogsOp +// +// Similar for exporters: +// +// * Traces export operations should use the pair: +// StartTracesOp/EndTracesOp +// +// * Metrics export operations should use the pair: +// StartMetricsOp/EndMetricsOp +// +// * Metrics export operations should use the pair: +// StartLogsOp/EndLogsOp +// +// The package is capable of generating legacy metrics by using the +// observability package allowing a controlled transition from legacy to the +// new metrics. The goal is to eventually remove the legacy metrics and use only +// the new metrics. +// +// The main differences regarding the legacy metrics are: +// +// 1. "Amount of metric data" is measured as metric points (ie.: a single value +// in time), contrast it with number of time series used legacy. Number of +// metric data points is a more general concept regarding various metric +// formats. +// +// 2. Exporters measure the number of items, ie.: number of spans or metric +// points, that were sent and the ones for which the attempt to send failed. +// For more information about this see Notes below about reporting data loss. +// +// 3. All measurements of "amount of data" used in the new metrics for receivers +// and exporters should reflect their native formats, not the internal format +// used in the Collector. This is to facilitate reconciliation between Collector, +// client and backend. For instance: certain metric formats do not provide +// direct support for histograms and have predefined conventions to represent +// those, this conversion may end with a different number of time series and +// data points than the internal Collector format. +// +// Notes: +// +// * Data loss should be recorded only when the component itself remove the data +// from the pipeline. Legacy metrics for receivers used "dropped" in their names +// but these could be non-zero under normal operations and reflected no actual +// data loss when exporters with "sending_queue" are used. New metrics were renamed +// to avoid this misunderstanding. Here are the general recommendations to report data loss: +// +// * Receivers reporting errors to clients typically result in the client +// re-sending the same data so it is more correct to report "receive errors", +// not actual data loss. +// +// * Exporters need to report individual failures to send data, but on +// typical production pipelines processors usually take care of retries, +// so these should be reported as "send errors". +// +// * Data "filtered out" should have its own metrics and not be confused +// with dropped data. +// +// Naming Convention for New Metrics +// +// Common Metrics: +// Metrics shared by different components should follow the convention below: +// +// `/` +// +// As a label the metric should have at least `{=""}` where +// `` is the name used in the configuration for the instance of the +// component, eg.: +// +// `receiver/accepted_spans{receiver="otlp",...}` +// `exporter/sent_spans{exporter="otlp/prod",...}` +// +// Component Specific Metrics: +// These metrics are implemented by specific components, eg.: batch processor. +// The general pattern is the same as the common metrics but with the addition +// of the component type (as it appears in the configuration) before the actual +// metric: +// +// `//` +// +// Even metrics exclusive to a single type should follow the conventions above +// and also include the type (as written in the configuration) as part of the +// metric name since there could be multiple instances of the same type in +// different pipelines, eg.: +// +// `processor/batch/batch_size_trigger_send{processor="batch/dev",...}` +// +package obsreport diff --git a/internal/otel_collector/obsreport/obsreport.go b/internal/otel_collector/obsreport/obsreport.go new file mode 100644 index 00000000000..79edae4765e --- /dev/null +++ b/internal/otel_collector/obsreport/obsreport.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package obsreport + +import ( + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" +) + +func recordError(span trace.Span, err error) { + if err != nil { + span.SetStatus(codes.Error, err.Error()) + } +} diff --git a/internal/otel_collector/obsreport/obsreport_exporter.go b/internal/otel_collector/obsreport/obsreport_exporter.go new file mode 100644 index 00000000000..8b1ee551be9 --- /dev/null +++ b/internal/otel_collector/obsreport/obsreport_exporter.go @@ -0,0 +1,134 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package obsreport + +import ( + "context" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/internal/obsreportconfig" + "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" +) + +// Exporter is a helper to add observability to a component.Exporter. +type Exporter struct { + level configtelemetry.Level + spanNamePrefix string + mutators []tag.Mutator + tracer trace.Tracer +} + +// ExporterSettings are settings for creating an Exporter. +type ExporterSettings struct { + Level configtelemetry.Level + ExporterID config.ComponentID + ExporterCreateSettings component.ExporterCreateSettings +} + +// NewExporter creates a new Exporter. +func NewExporter(cfg ExporterSettings) *Exporter { + return &Exporter{ + level: cfg.Level, + spanNamePrefix: obsmetrics.ExporterPrefix + cfg.ExporterID.String(), + mutators: []tag.Mutator{tag.Upsert(obsmetrics.TagKeyExporter, cfg.ExporterID.String(), tag.WithTTL(tag.TTLNoPropagation))}, + tracer: cfg.ExporterCreateSettings.TracerProvider.Tracer(cfg.ExporterID.String()), + } +} + +// StartTracesOp is called at the start of an Export operation. +// The returned context should be used in other calls to the Exporter functions +// dealing with the same export operation. +func (exp *Exporter) StartTracesOp(ctx context.Context) context.Context { + return exp.startOp(ctx, obsmetrics.ExportTraceDataOperationSuffix) +} + +// EndTracesOp completes the export operation that was started with StartTracesOp. +func (exp *Exporter) EndTracesOp(ctx context.Context, numSpans int, err error) { + numSent, numFailedToSend := toNumItems(numSpans, err) + exp.recordMetrics(ctx, numSent, numFailedToSend, obsmetrics.ExporterSentSpans, obsmetrics.ExporterFailedToSendSpans) + endSpan(ctx, err, numSent, numFailedToSend, obsmetrics.SentSpansKey, obsmetrics.FailedToSendSpansKey) +} + +// StartMetricsOp is called at the start of an Export operation. +// The returned context should be used in other calls to the Exporter functions +// dealing with the same export operation. +func (exp *Exporter) StartMetricsOp(ctx context.Context) context.Context { + return exp.startOp(ctx, obsmetrics.ExportMetricsOperationSuffix) +} + +// EndMetricsOp completes the export operation that was started with +// StartMetricsOp. +func (exp *Exporter) EndMetricsOp(ctx context.Context, numMetricPoints int, err error) { + numSent, numFailedToSend := toNumItems(numMetricPoints, err) + exp.recordMetrics(ctx, numSent, numFailedToSend, obsmetrics.ExporterSentMetricPoints, obsmetrics.ExporterFailedToSendMetricPoints) + endSpan(ctx, err, numSent, numFailedToSend, obsmetrics.SentMetricPointsKey, obsmetrics.FailedToSendMetricPointsKey) +} + +// StartLogsOp is called at the start of an Export operation. +// The returned context should be used in other calls to the Exporter functions +// dealing with the same export operation. +func (exp *Exporter) StartLogsOp(ctx context.Context) context.Context { + return exp.startOp(ctx, obsmetrics.ExportLogsOperationSuffix) +} + +// EndLogsOp completes the export operation that was started with StartLogsOp. +func (exp *Exporter) EndLogsOp(ctx context.Context, numLogRecords int, err error) { + numSent, numFailedToSend := toNumItems(numLogRecords, err) + exp.recordMetrics(ctx, numSent, numFailedToSend, obsmetrics.ExporterSentLogRecords, obsmetrics.ExporterFailedToSendLogRecords) + endSpan(ctx, err, numSent, numFailedToSend, obsmetrics.SentLogRecordsKey, obsmetrics.FailedToSendLogRecordsKey) +} + +// startOp creates the span used to trace the operation. Returning +// the updated context and the created span. +func (exp *Exporter) startOp(ctx context.Context, operationSuffix string) context.Context { + spanName := exp.spanNamePrefix + operationSuffix + ctx, _ = exp.tracer.Start(ctx, spanName) + return ctx +} + +func (exp *Exporter) recordMetrics(ctx context.Context, numSent, numFailedToSend int64, sentMeasure, failedToSendMeasure *stats.Int64Measure) { + if obsreportconfig.Level == configtelemetry.LevelNone { + return + } + // Ignore the error for now. This should not happen. + _ = stats.RecordWithTags(ctx, exp.mutators, sentMeasure.M(numSent), failedToSendMeasure.M(numFailedToSend)) +} + +func endSpan(ctx context.Context, err error, numSent, numFailedToSend int64, sentItemsKey, failedToSendItemsKey string) { + span := trace.SpanFromContext(ctx) + // End span according to errors. + if span.IsRecording() { + span.SetAttributes( + attribute.Int64(sentItemsKey, numSent), + attribute.Int64(failedToSendItemsKey, numFailedToSend), + ) + recordError(span, err) + } + span.End() +} + +func toNumItems(numExportedItems int, err error) (int64, int64) { + if err != nil { + return 0, int64(numExportedItems) + } + return int64(numExportedItems), 0 +} diff --git a/internal/otel_collector/obsreport/obsreport_processor.go b/internal/otel_collector/obsreport/obsreport_processor.go new file mode 100644 index 00000000000..f5df4b4ffd8 --- /dev/null +++ b/internal/otel_collector/obsreport/obsreport_processor.go @@ -0,0 +1,178 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package obsreport + +import ( + "context" + "strings" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" + + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" +) + +// BuildProcessorCustomMetricName is used to be build a metric name following +// the standards used in the Collector. The configType should be the same +// value used to identify the type on the config. +func BuildProcessorCustomMetricName(configType, metric string) string { + componentPrefix := obsmetrics.ProcessorPrefix + if !strings.HasSuffix(componentPrefix, obsmetrics.NameSep) { + componentPrefix += obsmetrics.NameSep + } + if configType == "" { + return componentPrefix + } + return componentPrefix + configType + obsmetrics.NameSep + metric +} + +// Processor is a helper to add observability to a component.Processor. +type Processor struct { + level configtelemetry.Level + mutators []tag.Mutator +} + +// ProcessorSettings are settings for creating a Processor. +type ProcessorSettings struct { + Level configtelemetry.Level + ProcessorID config.ComponentID +} + +// NewProcessor creates a new Processor. +func NewProcessor(cfg ProcessorSettings) *Processor { + return &Processor{ + level: cfg.Level, + mutators: []tag.Mutator{tag.Upsert(obsmetrics.TagKeyProcessor, cfg.ProcessorID.String(), tag.WithTTL(tag.TTLNoPropagation))}, + } +} + +// TracesAccepted reports that the trace data was accepted. +func (por *Processor) TracesAccepted(ctx context.Context, numSpans int) { + if por.level != configtelemetry.LevelNone { + stats.RecordWithTags( + ctx, + por.mutators, + obsmetrics.ProcessorAcceptedSpans.M(int64(numSpans)), + obsmetrics.ProcessorRefusedSpans.M(0), + obsmetrics.ProcessorDroppedSpans.M(0), + ) + } +} + +// TracesRefused reports that the trace data was refused. +func (por *Processor) TracesRefused(ctx context.Context, numSpans int) { + if por.level != configtelemetry.LevelNone { + stats.RecordWithTags( + ctx, + por.mutators, + obsmetrics.ProcessorAcceptedSpans.M(0), + obsmetrics.ProcessorRefusedSpans.M(int64(numSpans)), + obsmetrics.ProcessorDroppedSpans.M(0), + ) + } +} + +// TracesDropped reports that the trace data was dropped. +func (por *Processor) TracesDropped(ctx context.Context, numSpans int) { + if por.level != configtelemetry.LevelNone { + stats.RecordWithTags( + ctx, + por.mutators, + obsmetrics.ProcessorAcceptedSpans.M(0), + obsmetrics.ProcessorRefusedSpans.M(0), + obsmetrics.ProcessorDroppedSpans.M(int64(numSpans)), + ) + } +} + +// MetricsAccepted reports that the metrics were accepted. +func (por *Processor) MetricsAccepted(ctx context.Context, numPoints int) { + if por.level != configtelemetry.LevelNone { + stats.RecordWithTags( + ctx, + por.mutators, + obsmetrics.ProcessorAcceptedMetricPoints.M(int64(numPoints)), + obsmetrics.ProcessorRefusedMetricPoints.M(0), + obsmetrics.ProcessorDroppedMetricPoints.M(0), + ) + } +} + +// MetricsRefused reports that the metrics were refused. +func (por *Processor) MetricsRefused(ctx context.Context, numPoints int) { + if por.level != configtelemetry.LevelNone { + stats.RecordWithTags( + ctx, + por.mutators, + obsmetrics.ProcessorAcceptedMetricPoints.M(0), + obsmetrics.ProcessorRefusedMetricPoints.M(int64(numPoints)), + obsmetrics.ProcessorDroppedMetricPoints.M(0), + ) + } +} + +// MetricsDropped reports that the metrics were dropped. +func (por *Processor) MetricsDropped(ctx context.Context, numPoints int) { + if por.level != configtelemetry.LevelNone { + stats.RecordWithTags( + ctx, + por.mutators, + obsmetrics.ProcessorAcceptedMetricPoints.M(0), + obsmetrics.ProcessorRefusedMetricPoints.M(0), + obsmetrics.ProcessorDroppedMetricPoints.M(int64(numPoints)), + ) + } +} + +// LogsAccepted reports that the logs were accepted. +func (por *Processor) LogsAccepted(ctx context.Context, numRecords int) { + if por.level != configtelemetry.LevelNone { + stats.RecordWithTags( + ctx, + por.mutators, + obsmetrics.ProcessorAcceptedLogRecords.M(int64(numRecords)), + obsmetrics.ProcessorRefusedLogRecords.M(0), + obsmetrics.ProcessorDroppedLogRecords.M(0), + ) + } +} + +// LogsRefused reports that the logs were refused. +func (por *Processor) LogsRefused(ctx context.Context, numRecords int) { + if por.level != configtelemetry.LevelNone { + stats.RecordWithTags( + ctx, + por.mutators, + obsmetrics.ProcessorAcceptedLogRecords.M(0), + obsmetrics.ProcessorRefusedLogRecords.M(int64(numRecords)), + obsmetrics.ProcessorDroppedMetricPoints.M(0), + ) + } +} + +// LogsDropped reports that the logs were dropped. +func (por *Processor) LogsDropped(ctx context.Context, numRecords int) { + if por.level != configtelemetry.LevelNone { + stats.RecordWithTags( + ctx, + por.mutators, + obsmetrics.ProcessorAcceptedLogRecords.M(0), + obsmetrics.ProcessorRefusedLogRecords.M(0), + obsmetrics.ProcessorDroppedLogRecords.M(int64(numRecords)), + ) + } +} diff --git a/internal/otel_collector/obsreport/obsreport_receiver.go b/internal/otel_collector/obsreport/obsreport_receiver.go new file mode 100644 index 00000000000..4c278e69e88 --- /dev/null +++ b/internal/otel_collector/obsreport/obsreport_receiver.go @@ -0,0 +1,206 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package obsreport + +import ( + "context" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/internal/obsreportconfig" + "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" +) + +// Receiver is a helper to add obersvability to a component.Receiver. +type Receiver struct { + spanNamePrefix string + transport string + longLivedCtx bool + mutators []tag.Mutator + tracer trace.Tracer +} + +// ReceiverSettings are settings for creating an Receiver. +type ReceiverSettings struct { + ReceiverID config.ComponentID + Transport string + // LongLivedCtx when true indicates that the context passed in the call + // outlives the individual receive operation. + // Typically the long lived context is associated to a connection, + // eg.: a gRPC stream, for which many batches of data are received in individual + // operations without a corresponding new context per operation. + LongLivedCtx bool +} + +// NewReceiver creates a new Receiver. +func NewReceiver(cfg ReceiverSettings) *Receiver { + return &Receiver{ + spanNamePrefix: obsmetrics.ReceiverPrefix + cfg.ReceiverID.String(), + transport: cfg.Transport, + longLivedCtx: cfg.LongLivedCtx, + mutators: []tag.Mutator{ + tag.Upsert(obsmetrics.TagKeyReceiver, cfg.ReceiverID.String(), tag.WithTTL(tag.TTLNoPropagation)), + tag.Upsert(obsmetrics.TagKeyTransport, cfg.Transport, tag.WithTTL(tag.TTLNoPropagation)), + }, + tracer: otel.GetTracerProvider().Tracer(cfg.ReceiverID.String()), + } +} + +// StartTracesOp is called when a request is received from a client. +// The returned context should be used in other calls to the obsreport functions +// dealing with the same receive operation. +func (rec *Receiver) StartTracesOp(operationCtx context.Context) context.Context { + return rec.startOp(operationCtx, obsmetrics.ReceiveTraceDataOperationSuffix) +} + +// EndTracesOp completes the receive operation that was started with +// StartTracesOp. +func (rec *Receiver) EndTracesOp( + receiverCtx context.Context, + format string, + numReceivedSpans int, + err error, +) { + rec.endOp(receiverCtx, format, numReceivedSpans, err, config.TracesDataType) +} + +// StartLogsOp is called when a request is received from a client. +// The returned context should be used in other calls to the obsreport functions +// dealing with the same receive operation. +func (rec *Receiver) StartLogsOp(operationCtx context.Context) context.Context { + return rec.startOp(operationCtx, obsmetrics.ReceiverLogsOperationSuffix) +} + +// EndLogsOp completes the receive operation that was started with +// StartLogsOp. +func (rec *Receiver) EndLogsOp( + receiverCtx context.Context, + format string, + numReceivedLogRecords int, + err error, +) { + rec.endOp(receiverCtx, format, numReceivedLogRecords, err, config.LogsDataType) +} + +// StartMetricsOp is called when a request is received from a client. +// The returned context should be used in other calls to the obsreport functions +// dealing with the same receive operation. +func (rec *Receiver) StartMetricsOp(operationCtx context.Context) context.Context { + return rec.startOp(operationCtx, obsmetrics.ReceiverMetricsOperationSuffix) +} + +// EndMetricsOp completes the receive operation that was started with +// StartMetricsOp. +func (rec *Receiver) EndMetricsOp( + receiverCtx context.Context, + format string, + numReceivedPoints int, + err error, +) { + rec.endOp(receiverCtx, format, numReceivedPoints, err, config.MetricsDataType) +} + +// startOp creates the span used to trace the operation. Returning +// the updated context with the created span. +func (rec *Receiver) startOp(receiverCtx context.Context, operationSuffix string) context.Context { + ctx, _ := tag.New(receiverCtx, rec.mutators...) + var span trace.Span + spanName := rec.spanNamePrefix + operationSuffix + if !rec.longLivedCtx { + ctx, span = rec.tracer.Start(ctx, spanName) + } else { + // Since the receiverCtx is long lived do not use it to start the span. + // This way this trace ends when the EndTracesOp is called. + // Here is safe to ignore the returned context since it is not used below. + _, span = rec.tracer.Start(context.Background(), spanName, trace.WithLinks(trace.Link{ + SpanContext: trace.SpanContextFromContext(receiverCtx), + })) + + ctx = trace.ContextWithSpan(ctx, span) + } + + if rec.transport != "" { + span.SetAttributes(attribute.String(obsmetrics.TransportKey, rec.transport)) + } + return ctx +} + +// endOp records the observability signals at the end of an operation. +func (rec *Receiver) endOp( + receiverCtx context.Context, + format string, + numReceivedItems int, + err error, + dataType config.DataType, +) { + numAccepted := numReceivedItems + numRefused := 0 + if err != nil { + numAccepted = 0 + numRefused = numReceivedItems + } + + span := trace.SpanFromContext(receiverCtx) + + if obsreportconfig.Level != configtelemetry.LevelNone { + var acceptedMeasure, refusedMeasure *stats.Int64Measure + switch dataType { + case config.TracesDataType: + acceptedMeasure = obsmetrics.ReceiverAcceptedSpans + refusedMeasure = obsmetrics.ReceiverRefusedSpans + case config.MetricsDataType: + acceptedMeasure = obsmetrics.ReceiverAcceptedMetricPoints + refusedMeasure = obsmetrics.ReceiverRefusedMetricPoints + case config.LogsDataType: + acceptedMeasure = obsmetrics.ReceiverAcceptedLogRecords + refusedMeasure = obsmetrics.ReceiverRefusedLogRecords + } + + stats.Record( + receiverCtx, + acceptedMeasure.M(int64(numAccepted)), + refusedMeasure.M(int64(numRefused))) + } + + // end span according to errors + if span.IsRecording() { + var acceptedItemsKey, refusedItemsKey string + switch dataType { + case config.TracesDataType: + acceptedItemsKey = obsmetrics.AcceptedSpansKey + refusedItemsKey = obsmetrics.RefusedSpansKey + case config.MetricsDataType: + acceptedItemsKey = obsmetrics.AcceptedMetricPointsKey + refusedItemsKey = obsmetrics.RefusedMetricPointsKey + case config.LogsDataType: + acceptedItemsKey = obsmetrics.AcceptedLogRecordsKey + refusedItemsKey = obsmetrics.RefusedLogRecordsKey + } + + span.SetAttributes( + attribute.String(obsmetrics.FormatKey, format), + attribute.Int64(acceptedItemsKey, int64(numAccepted)), + attribute.Int64(refusedItemsKey, int64(numRefused)), + ) + recordError(span, err) + } + span.End() +} diff --git a/internal/otel_collector/obsreport/obsreport_scraper.go b/internal/otel_collector/obsreport/obsreport_scraper.go new file mode 100644 index 00000000000..caa03fa32b4 --- /dev/null +++ b/internal/otel_collector/obsreport/obsreport_scraper.go @@ -0,0 +1,120 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package obsreport + +import ( + "context" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/internal/obsreportconfig" + "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" + "go.opentelemetry.io/collector/receiver/scrapererror" +) + +// ScraperContext adds the keys used when recording observability metrics to +// the given context returning the newly created context. This context should +// be used in related calls to the obsreport functions so metrics are properly +// recorded. +func ScraperContext( + ctx context.Context, + receiverID config.ComponentID, + scraper config.ComponentID, +) context.Context { + ctx, _ = tag.New( + ctx, + tag.Upsert(obsmetrics.TagKeyReceiver, receiverID.String(), tag.WithTTL(tag.TTLNoPropagation)), + tag.Upsert(obsmetrics.TagKeyScraper, scraper.String(), tag.WithTTL(tag.TTLNoPropagation))) + + return ctx +} + +// Scraper is a helper to add observability to a component.Scraper. +type Scraper struct { + receiverID config.ComponentID + scraper config.ComponentID + tracer trace.Tracer +} + +// ScraperSettings are settings for creating a Scraper. +type ScraperSettings struct { + ReceiverID config.ComponentID + Scraper config.ComponentID +} + +// NewScraper creates a new Scraper. +func NewScraper(cfg ScraperSettings) *Scraper { + return &Scraper{ + receiverID: cfg.ReceiverID, + scraper: cfg.Scraper, + tracer: otel.GetTracerProvider().Tracer(cfg.Scraper.String()), + } +} + +// StartMetricsOp is called when a scrape operation is started. The +// returned context should be used in other calls to the obsreport functions +// dealing with the same scrape operation. +func (s *Scraper) StartMetricsOp( + scraperCtx context.Context, +) context.Context { + spanName := obsmetrics.ScraperPrefix + s.receiverID.String() + obsmetrics.NameSep + s.scraper.String() + obsmetrics.ScraperMetricsOperationSuffix + ctx, _ := s.tracer.Start(scraperCtx, spanName) + return ctx +} + +// EndMetricsOp completes the scrape operation that was started with +// StartMetricsOp. +func (s *Scraper) EndMetricsOp( + scraperCtx context.Context, + numScrapedMetrics int, + err error, +) { + numErroredMetrics := 0 + if err != nil { + if partialErr, isPartial := err.(scrapererror.PartialScrapeError); isPartial { + numErroredMetrics = partialErr.Failed + } else { + numErroredMetrics = numScrapedMetrics + numScrapedMetrics = 0 + } + } + + span := trace.SpanFromContext(scraperCtx) + + if obsreportconfig.Level != configtelemetry.LevelNone { + stats.Record( + scraperCtx, + obsmetrics.ScraperScrapedMetricPoints.M(int64(numScrapedMetrics)), + obsmetrics.ScraperErroredMetricPoints.M(int64(numErroredMetrics))) + } + + // end span according to errors + if span.IsRecording() { + span.SetAttributes( + attribute.String(obsmetrics.FormatKey, string(config.MetricsDataType)), + attribute.Int64(obsmetrics.ScrapedMetricPointsKey, int64(numScrapedMetrics)), + attribute.Int64(obsmetrics.ErroredMetricPointsKey, int64(numErroredMetrics)), + ) + recordError(span, err) + } + + span.End() +} diff --git a/internal/otel_collector/obsreport/obsreporttest/obsreporttest.go b/internal/otel_collector/obsreport/obsreporttest/obsreporttest.go new file mode 100644 index 00000000000..e0ca299cd0d --- /dev/null +++ b/internal/otel_collector/obsreport/obsreporttest/obsreporttest.go @@ -0,0 +1,203 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package obsreporttest + +import ( + "reflect" + "sort" + "testing" + + "github.com/stretchr/testify/require" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/internal/obsreportconfig" +) + +var ( + // Names used by the metrics and labels are hard coded here in order to avoid + // inadvertent changes: at this point changing metric names and labels should + // be treated as a breaking changing and requires a good justification. + // Changes to metric names or labels can break alerting, dashboards, etc + // that are used to monitor the Collector in production deployments. + // DO NOT SWITCH THE VARIABLES BELOW TO SIMILAR ONES DEFINED ON THE PACKAGE. + receiverTag, _ = tag.NewKey("receiver") + scraperTag, _ = tag.NewKey("scraper") + transportTag, _ = tag.NewKey("transport") + exporterTag, _ = tag.NewKey("exporter") + processorTag, _ = tag.NewKey("processor") +) + +// SetupRecordedMetricsTest does setup the testing environment to check the metrics recorded by receivers, producers or exporters. +// The returned function should be deferred. +func SetupRecordedMetricsTest() (func(), error) { + obsMetrics := obsreportconfig.Configure(configtelemetry.LevelNormal) + views := obsMetrics.Views + err := view.Register(views...) + if err != nil { + return nil, err + } + + return func() { + view.Unregister(views...) + }, err +} + +// CheckExporterTraces checks that for the current exported values for trace exporter metrics match given values. +// When this function is called it is required to also call SetupRecordedMetricsTest as first thing. +func CheckExporterTraces(t *testing.T, exporter config.ComponentID, acceptedSpans, droppedSpans int64) { + exporterTags := tagsForExporterView(exporter) + checkValueForView(t, exporterTags, acceptedSpans, "exporter/sent_spans") + checkValueForView(t, exporterTags, droppedSpans, "exporter/send_failed_spans") +} + +// CheckExporterMetrics checks that for the current exported values for metrics exporter metrics match given values. +// When this function is called it is required to also call SetupRecordedMetricsTest as first thing. +func CheckExporterMetrics(t *testing.T, exporter config.ComponentID, acceptedMetricsPoints, droppedMetricsPoints int64) { + exporterTags := tagsForExporterView(exporter) + checkValueForView(t, exporterTags, acceptedMetricsPoints, "exporter/sent_metric_points") + checkValueForView(t, exporterTags, droppedMetricsPoints, "exporter/send_failed_metric_points") +} + +// CheckExporterLogs checks that for the current exported values for logs exporter metrics match given values. +// When this function is called it is required to also call SetupRecordedMetricsTest as first thing. +func CheckExporterLogs(t *testing.T, exporter config.ComponentID, acceptedLogRecords, droppedLogRecords int64) { + exporterTags := tagsForExporterView(exporter) + checkValueForView(t, exporterTags, acceptedLogRecords, "exporter/sent_log_records") + checkValueForView(t, exporterTags, droppedLogRecords, "exporter/send_failed_log_records") +} + +// CheckProcessorTraces checks that for the current exported values for trace exporter metrics match given values. +// When this function is called it is required to also call SetupRecordedMetricsTest as first thing. +func CheckProcessorTraces(t *testing.T, processor config.ComponentID, acceptedSpans, refusedSpans, droppedSpans int64) { + processorTags := tagsForProcessorView(processor) + checkValueForView(t, processorTags, acceptedSpans, "processor/accepted_spans") + checkValueForView(t, processorTags, refusedSpans, "processor/refused_spans") + checkValueForView(t, processorTags, droppedSpans, "processor/dropped_spans") +} + +// CheckProcessorMetrics checks that for the current exported values for metrics exporter metrics match given values. +// When this function is called it is required to also call SetupRecordedMetricsTest as first thing. +func CheckProcessorMetrics(t *testing.T, processor config.ComponentID, acceptedMetricPoints, refusedMetricPoints, droppedMetricPoints int64) { + processorTags := tagsForProcessorView(processor) + checkValueForView(t, processorTags, acceptedMetricPoints, "processor/accepted_metric_points") + checkValueForView(t, processorTags, refusedMetricPoints, "processor/refused_metric_points") + checkValueForView(t, processorTags, droppedMetricPoints, "processor/dropped_metric_points") +} + +// CheckProcessorLogs checks that for the current exported values for logs exporter metrics match given values. +// When this function is called it is required to also call SetupRecordedMetricsTest as first thing. +func CheckProcessorLogs(t *testing.T, processor config.ComponentID, acceptedLogRecords, refusedLogRecords, droppedLogRecords int64) { + processorTags := tagsForProcessorView(processor) + checkValueForView(t, processorTags, acceptedLogRecords, "processor/accepted_log_records") + checkValueForView(t, processorTags, refusedLogRecords, "processor/refused_log_records") + checkValueForView(t, processorTags, droppedLogRecords, "processor/dropped_log_records") +} + +// CheckReceiverTraces checks that for the current exported values for trace receiver metrics match given values. +// When this function is called it is required to also call SetupRecordedMetricsTest as first thing. +func CheckReceiverTraces(t *testing.T, receiver config.ComponentID, protocol string, acceptedSpans, droppedSpans int64) { + receiverTags := tagsForReceiverView(receiver, protocol) + checkValueForView(t, receiverTags, acceptedSpans, "receiver/accepted_spans") + checkValueForView(t, receiverTags, droppedSpans, "receiver/refused_spans") +} + +// CheckReceiverLogs checks that for the current exported values for logs receiver metrics match given values. +// When this function is called it is required to also call SetupRecordedMetricsTest as first thing. +func CheckReceiverLogs(t *testing.T, receiver config.ComponentID, protocol string, acceptedLogRecords, droppedLogRecords int64) { + receiverTags := tagsForReceiverView(receiver, protocol) + checkValueForView(t, receiverTags, acceptedLogRecords, "receiver/accepted_log_records") + checkValueForView(t, receiverTags, droppedLogRecords, "receiver/refused_log_records") +} + +// CheckReceiverMetrics checks that for the current exported values for metrics receiver metrics match given values. +// When this function is called it is required to also call SetupRecordedMetricsTest as first thing. +func CheckReceiverMetrics(t *testing.T, receiver config.ComponentID, protocol string, acceptedMetricPoints, droppedMetricPoints int64) { + receiverTags := tagsForReceiverView(receiver, protocol) + checkValueForView(t, receiverTags, acceptedMetricPoints, "receiver/accepted_metric_points") + checkValueForView(t, receiverTags, droppedMetricPoints, "receiver/refused_metric_points") +} + +// CheckScraperMetrics checks that for the current exported values for metrics scraper metrics match given values. +// When this function is called it is required to also call SetupRecordedMetricsTest as first thing. +func CheckScraperMetrics(t *testing.T, receiver config.ComponentID, scraper config.ComponentID, scrapedMetricPoints, erroredMetricPoints int64) { + scraperTags := tagsForScraperView(receiver, scraper) + checkValueForView(t, scraperTags, scrapedMetricPoints, "scraper/scraped_metric_points") + checkValueForView(t, scraperTags, erroredMetricPoints, "scraper/errored_metric_points") +} + +// checkValueForView checks that for the current exported value in the view with the given name +// for {LegacyTagKeyReceiver: receiverName} is equal to "value". +func checkValueForView(t *testing.T, wantTags []tag.Tag, value int64, vName string) { + // Make sure the tags slice is sorted by tag keys. + sortTags(wantTags) + + rows, err := view.RetrieveData(vName) + require.NoError(t, err) + + for _, row := range rows { + // Make sure the tags slice is sorted by tag keys. + sortTags(row.Tags) + if reflect.DeepEqual(wantTags, row.Tags) { + sum := row.Data.(*view.SumData) + require.Equal(t, float64(value), sum.Value) + return + } + } + + require.Failf(t, "could not find tags", "wantTags: %s in rows %v", wantTags, rows) +} + +// tagsForReceiverView returns the tags that are needed for the receiver views. +func tagsForReceiverView(receiver config.ComponentID, transport string) []tag.Tag { + tags := make([]tag.Tag, 0, 2) + + tags = append(tags, tag.Tag{Key: receiverTag, Value: receiver.String()}) + if transport != "" { + tags = append(tags, tag.Tag{Key: transportTag, Value: transport}) + } + + return tags +} + +// tagsForScraperView returns the tags that are needed for the scraper views. +func tagsForScraperView(receiver config.ComponentID, scraper config.ComponentID) []tag.Tag { + return []tag.Tag{ + {Key: receiverTag, Value: receiver.String()}, + {Key: scraperTag, Value: scraper.String()}, + } +} + +// tagsForProcessorView returns the tags that are needed for the processor views. +func tagsForProcessorView(processor config.ComponentID) []tag.Tag { + return []tag.Tag{ + {Key: processorTag, Value: processor.String()}, + } +} + +// tagsForExporterView returns the tags that are needed for the exporter views. +func tagsForExporterView(exporter config.ComponentID) []tag.Tag { + return []tag.Tag{ + {Key: exporterTag, Value: exporter.String()}, + } +} + +func sortTags(tags []tag.Tag) { + sort.SliceStable(tags, func(i, j int) bool { + return tags[i].Key.Name() < tags[j].Key.Name() + }) +} diff --git a/internal/otel_collector/processor/README.md b/internal/otel_collector/processor/README.md new file mode 100644 index 00000000000..0bc81f0827d --- /dev/null +++ b/internal/otel_collector/processor/README.md @@ -0,0 +1,234 @@ +# General Information + +Processors are used at various stages of a pipeline. Generally, a processor +pre-processes data before it is exported (e.g. modify attributes or sample) or +helps ensure that data makes it through a pipeline successfully (e.g. +batch/retry). + +Some important aspects of pipelines and processors to be aware of: +- [Recommended Processors](#recommended-processors) +- [Data Ownership](#data-ownership) +- [Exclusive Ownership](#exclusive-ownership) +- [Shared Ownership](#shared-ownership) +- [Ordering Processors](#ordering-processors) + +Supported processors (sorted alphabetically): +- [Attributes Processor](attributesprocessor/README.md) +- [Batch Processor](batchprocessor/README.md) +- [Filter Processor](filterprocessor/README.md) +- [Memory Limiter Processor](memorylimiter/README.md) +- [Resource Processor](resourceprocessor/README.md) +- [Probabilistic Sampling Processor](probabilisticsamplerprocessor/README.md) +- [Span Processor](spanprocessor/README.md) + +The [contrib repository](https://github.com/open-telemetry/opentelemetry-collector-contrib) + has more processors that can be added to a custom build of the Collector. + +## Recommended Processors + +By default, no processors are enabled. Depending on the data source, it may be recommended that multiple processors be enabled. Processors must be +enabled for every data source: Not all processors support all data sources. +In addition, it is important to note that the order of processors matters. The +order in each section below is the best practice. Refer to the individual +processor documentation for more information. + +### Traces + +1. [memory_limiter](memorylimiter/README.md) +2. *any sampling processors* +3. Any processor relying on sending source from `Context` (e.g. `k8s_tagger`) +3. [batch](batchprocessor/README.md) +4. *any other processors* + +### Metrics + +1. [memory_limiter](memorylimiter/README.md) +2. Any processor relying on sending source from `Context` (e.g. `k8s_tagger`) +3. [batch](batchprocessor/README.md) +4. *any other processors* + +## Data Ownership + +The ownership of the `pdata.Traces`, `pdata.Metrics` and `pdata.Logs` data in a pipeline +is passed as the data travels through the pipeline. The data is created by the receiver +and then the ownership is passed to the first processor when `ConsumeTraces`/`ConsumeMetrics`/`ConsumeLogs` +function is called. + +Note: the receiver may be attached to multiple pipelines, in which case the same data +will be passed to all attached pipelines via a data fan-out connector. + +From data ownership perspective pipelines can work in 2 modes: +* Exclusive data ownership +* Shared data ownership + +The mode is defined during startup based on data modification intent reported by the +processors. The intent is reported by each processor via `MutatesData` field of +the struct returned by `Capabilities` function. If any processor in the pipeline +declares an intent to modify the data then that pipeline will work in exclusive ownership +mode. In addition, any other pipeline that receives data from a receiver that is attached +to a pipeline with exclusive ownership mode will be also operating in exclusive ownership +mode. + +### Exclusive Ownership + +In exclusive ownership mode the data is owned exclusively by a particular processor at a +given moment of time, and the processor is free to modify the data it owns. + +Exclusive ownership mode is only applicable for pipelines that receive data from the +same receiver. If a pipeline is marked to be in exclusive ownership mode then any data +received from a shared receiver will be cloned at the fan-out connector before passing +further to each pipeline. This ensures that each pipeline has its own exclusive copy of +data, and the data can be safely modified in the pipeline. + +The exclusive ownership of data allows processors to freely modify the data while +they own it (e.g. see `attributesprocessor`). The duration of ownership of the data +by processor is from the beginning of `ConsumeTraces`/`ConsumeMetrics`/`ConsumeLogs` +call until the processor calls the next processor's `ConsumeTraces`/`ConsumeMetrics`/`ConsumeLogs` +function, which passes the ownership to the next processor. After that the processor +must no longer read or write the data since it may be concurrently modified by the +new owner. + +Exclusive Ownership mode allows to easily implement processors that need to modify +the data by simply declaring such intent. + +### Shared Ownership + +In shared ownership mode no particular processor owns the data and no processor is +allowed the modify the shared data. + +In this mode no cloning is performed at the fan-out connector of receivers that +are attached to multiple pipelines. In this case all such pipelines will see +the same single shared copy of the data. Processors in pipelines operating in shared +ownership mode are prohibited from modifying the original data that they receive +via `ConsumeTraces`/`ConsumeMetrics`/`ConsumeLogs` call. Processors may only read +the data but must not modify the data. + +If the processor needs to modify the data while performing the processing but +does not want to incur the cost of data cloning that Exclusive mode brings then +the processor can declare that it does not modify the data and use any +different technique that ensures original data is not modified. For example, +the processor can implement copy-on-write approach for individual sub-parts of +`pdata.Traces`/`pdata.Metrics`/`pdata.Logs` argument. Any approach that does not +mutate the original `pdata.Traces`/`pdata.Metrics`/`pdata.Logs` is allowed. + +If the processor uses such technique it should declare that it does not intend +to modify the original data by setting `MutatesData=false` in its capabilities +to avoid marking the pipeline for Exclusive ownership and to avoid the cost of +data cloning described in Exclusive Ownership section. + +## Ordering Processors + +The order processors are specified in a pipeline is important as this is the +order in which each processor is applied to traces and metrics. + +### Include/Exclude Metrics + +The [filter processor](filterprocessor/README.md) exposes the option to provide a set of +metric names to match against to determine if the metric should be +included or excluded from the processor. To configure this option, under +`include` and/or `exclude` both `match_type` and `metrics_names` are required. + +Note: If both `include` and `exclude` are specified, the `include` properties +are checked before the `exclude` properties. + +```yaml +filter: + # metrics indicates this processor applies to metrics + metrics: + # include and/or exclude can be specified. However, the include properties + # are always checked before the exclude properties. + {include, exclude}: + # match_type controls how items matching is done. + # Possible values are "regexp" or "strict". + # This is a required field. + match_type: {strict, regexp} + + # regexp is an optional configuration section for match_type regexp. + regexp: + # < see "Match Configuration" below > + + # metric_names specify an array of items to match the metric name against. + # This is a required field. + metric_names: [, ..., ] +``` + +#### Match Configuration + +Some `match_type` values have additional configuration options that can be +specified. The `match_type` value is the name of the configuration section. +These sections are optional. + +```yaml +# regexp is an optional configuration section for match_type regexp. +regexp: + # cacheenabled determines whether match results are LRU cached to make subsequent matches faster. + # Cache size is unlimited unless cachemaxnumentries is also specified. + cacheenabled: + # cachemaxnumentries is the max number of entries of the LRU cache; ignored if cacheenabled is false. + cachemaxnumentries: +``` + +### Include/Exclude Spans + +The [attribute processor](attributesprocessor/README.md) and the [span processor](spanprocessor/README.md) expose +the option to provide a set of properties of a span to match against to determine +if the span should be included or excluded from the processor. To configure +this option, under `include` and/or `exclude` at least `match_type` and one of +`services`, `span_names` or `attributes` is required. + +Note: If both `include` and `exclude` are specified, the `include` properties +are checked before the `exclude` properties. + +```yaml +{span, attributes}: + # include and/or exclude can be specified. However, the include properties + # are always checked before the exclude properties. + {include, exclude}: + # At least one of services, span_names or attributes must be specified. + # It is supported to have more than one specified, but all of the specified + # conditions must evaluate to true for a match to occur. + + # match_type controls how items in "services" and "span_names" arrays are + # interpreted. Possible values are "regexp" or "strict". + # This is a required field. + match_type: {strict, regexp} + + # regexp is an optional configuration section for match_type regexp. + regexp: + # < see "Match Configuration" below > + + # services specify an array of items to match the service name against. + # A match occurs if the span service name matches at least of the items. + # This is an optional field. + services: [, ..., ] + + # The span name must match at least one of the items. + # This is an optional field. + span_names: [, ..., ] + + # Attributes specifies the list of attributes to match against. + # All of these attributes must match exactly for a match to occur. + # This is an optional field. + attributes: + # Key specifies the attribute to match against. + - key: + # Value specifies the exact value to match against. + # If not specified, a match occurs if the key is present in the attributes. + value: {value} +``` + +#### Match Configuration + +Some `match_type` values have additional configuration options that can be +specified. The `match_type` value is the name of the configuration section. +These sections are optional. + +```yaml +# regexp is an optional configuration section for match_type regexp. +regexp: + # cacheenabled determines whether match results are LRU cached to make subsequent matches faster. + # Cache size is unlimited unless cachemaxnumentries is also specified. + cacheenabled: + # cachemaxnumentries is the max number of entries of the LRU cache; ignored if cacheenabled is false. + cachemaxnumentries: +``` diff --git a/internal/otel_collector/processor/batchprocessor/README.md b/internal/otel_collector/processor/batchprocessor/README.md new file mode 100644 index 00000000000..5b69aa0166f --- /dev/null +++ b/internal/otel_collector/processor/batchprocessor/README.md @@ -0,0 +1,38 @@ +# Batch Processor + +Supported pipeline types: metric, traces, logs + +The batch processor accepts spans, metrics, or logs and places them into +batches. Batching helps better compress the data and reduce the number of +outgoing connections required to transmit the data. This processor supports +both size and time based batching. + +It is highly recommended to configure the batch processor on every collector. +The batch processor should be defined in the pipeline after the `memory_limiter` +as well as any sampling processors. This is because batching should happen after +any data drops such as sampling. + +Please refer to [config.go](./config.go) for the config spec. + +The following configuration options can be modified: +- `send_batch_size` (default = 8192): Number of spans, metric data points, or log +records after which a batch will be sent regardless of the timeout. +- `timeout` (default = 200ms): Time duration after which a batch will be sent +regardless of size. +- `send_batch_max_size` (default = 0): The upper limit of the batch size. + `0` means no upper limit of the batch size. + This property ensures that larger batches are split into smaller units. + It must be greater or equal to `send_batch_size`. + +Examples: + +```yaml +processors: + batch: + batch/2: + send_batch_size: 10000 + timeout: 10s +``` + +Refer to [config.yaml](./testdata/config.yaml) for detailed +examples on using the processor. diff --git a/internal/otel_collector/processor/batchprocessor/batch_processor.go b/internal/otel_collector/processor/batchprocessor/batch_processor.go new file mode 100644 index 00000000000..c7ccab58003 --- /dev/null +++ b/internal/otel_collector/processor/batchprocessor/batch_processor.go @@ -0,0 +1,351 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package batchprocessor + +import ( + "context" + "runtime" + "sync" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/model/otlp" + "go.opentelemetry.io/collector/model/pdata" +) + +// batch_processor is a component that accepts spans and metrics, places them +// into batches and sends downstream. +// +// batch_processor implements consumer.Traces and consumer.Metrics +// +// Batches are sent out with any of the following conditions: +// - batch size reaches cfg.SendBatchSize +// - cfg.Timeout is elapsed since the timestamp when the previous batch was sent out. +type batchProcessor struct { + logger *zap.Logger + exportCtx context.Context + timer *time.Timer + timeout time.Duration + sendBatchSize int + sendBatchMaxSize int + + newItem chan interface{} + batch batch + + shutdownC chan struct{} + goroutines sync.WaitGroup + + telemetryLevel configtelemetry.Level +} + +type batch interface { + // export the current batch + export(ctx context.Context, sendBatchMaxSize int) error + + // itemCount returns the size of the current batch + itemCount() int + + // size returns the size in bytes of the current batch + size() int + + // add item to the current batch + add(item interface{}) +} + +var _ consumer.Traces = (*batchProcessor)(nil) +var _ consumer.Metrics = (*batchProcessor)(nil) +var _ consumer.Logs = (*batchProcessor)(nil) + +func newBatchProcessor(set component.ProcessorCreateSettings, cfg *Config, batch batch, telemetryLevel configtelemetry.Level) (*batchProcessor, error) { + exportCtx, err := tag.New(context.Background(), tag.Insert(processorTagKey, cfg.ID().String())) + if err != nil { + return nil, err + } + return &batchProcessor{ + logger: set.Logger, + exportCtx: exportCtx, + telemetryLevel: telemetryLevel, + + sendBatchSize: int(cfg.SendBatchSize), + sendBatchMaxSize: int(cfg.SendBatchMaxSize), + timeout: cfg.Timeout, + newItem: make(chan interface{}, runtime.NumCPU()), + batch: batch, + shutdownC: make(chan struct{}, 1), + }, nil +} + +func (bp *batchProcessor) Capabilities() consumer.Capabilities { + return consumer.Capabilities{MutatesData: true} +} + +// Start is invoked during service startup. +func (bp *batchProcessor) Start(context.Context, component.Host) error { + bp.goroutines.Add(1) + go bp.startProcessingCycle() + return nil +} + +// Shutdown is invoked during service shutdown. +func (bp *batchProcessor) Shutdown(context.Context) error { + close(bp.shutdownC) + + // Wait until all goroutines are done. + bp.goroutines.Wait() + return nil +} + +func (bp *batchProcessor) startProcessingCycle() { + defer bp.goroutines.Done() + bp.timer = time.NewTimer(bp.timeout) + for { + select { + case <-bp.shutdownC: + DONE: + for { + select { + case item := <-bp.newItem: + bp.processItem(item) + default: + break DONE + } + } + // This is the close of the channel + if bp.batch.itemCount() > 0 { + // TODO: Set a timeout on sendTraces or + // make it cancellable using the context that Shutdown gets as a parameter + bp.sendItems(statTimeoutTriggerSend) + } + return + case item := <-bp.newItem: + if item == nil { + continue + } + bp.processItem(item) + case <-bp.timer.C: + if bp.batch.itemCount() > 0 { + bp.sendItems(statTimeoutTriggerSend) + } + bp.resetTimer() + } + } +} + +func (bp *batchProcessor) processItem(item interface{}) { + bp.batch.add(item) + sent := false + for bp.batch.itemCount() >= bp.sendBatchSize { + sent = true + bp.sendItems(statBatchSizeTriggerSend) + } + + if sent { + bp.stopTimer() + bp.resetTimer() + } +} + +func (bp *batchProcessor) stopTimer() { + if !bp.timer.Stop() { + <-bp.timer.C + } +} + +func (bp *batchProcessor) resetTimer() { + bp.timer.Reset(bp.timeout) +} + +func (bp *batchProcessor) sendItems(triggerMeasure *stats.Int64Measure) { + // Add that it came form the trace pipeline? + stats.Record(bp.exportCtx, triggerMeasure.M(1), statBatchSendSize.M(int64(bp.batch.itemCount()))) + + if bp.telemetryLevel == configtelemetry.LevelDetailed { + stats.Record(bp.exportCtx, statBatchSendSizeBytes.M(int64(bp.batch.size()))) + } + + if err := bp.batch.export(bp.exportCtx, bp.sendBatchMaxSize); err != nil { + bp.logger.Warn("Sender failed", zap.Error(err)) + } +} + +// ConsumeTraces implements TracesProcessor +func (bp *batchProcessor) ConsumeTraces(_ context.Context, td pdata.Traces) error { + bp.newItem <- td + return nil +} + +// ConsumeMetrics implements MetricsProcessor +func (bp *batchProcessor) ConsumeMetrics(_ context.Context, md pdata.Metrics) error { + // First thing is convert into a different internal format + bp.newItem <- md + return nil +} + +// ConsumeLogs implements LogsProcessor +func (bp *batchProcessor) ConsumeLogs(_ context.Context, ld pdata.Logs) error { + bp.newItem <- ld + return nil +} + +// newBatchTracesProcessor creates a new batch processor that batches traces by size or with timeout +func newBatchTracesProcessor(set component.ProcessorCreateSettings, next consumer.Traces, cfg *Config, telemetryLevel configtelemetry.Level) (*batchProcessor, error) { + return newBatchProcessor(set, cfg, newBatchTraces(next), telemetryLevel) +} + +// newBatchMetricsProcessor creates a new batch processor that batches metrics by size or with timeout +func newBatchMetricsProcessor(set component.ProcessorCreateSettings, next consumer.Metrics, cfg *Config, telemetryLevel configtelemetry.Level) (*batchProcessor, error) { + return newBatchProcessor(set, cfg, newBatchMetrics(next), telemetryLevel) +} + +// newBatchLogsProcessor creates a new batch processor that batches logs by size or with timeout +func newBatchLogsProcessor(set component.ProcessorCreateSettings, next consumer.Logs, cfg *Config, telemetryLevel configtelemetry.Level) (*batchProcessor, error) { + return newBatchProcessor(set, cfg, newBatchLogs(next), telemetryLevel) +} + +type batchTraces struct { + nextConsumer consumer.Traces + traceData pdata.Traces + spanCount int + sizer pdata.TracesSizer +} + +func newBatchTraces(nextConsumer consumer.Traces) *batchTraces { + return &batchTraces{nextConsumer: nextConsumer, traceData: pdata.NewTraces(), sizer: otlp.NewProtobufTracesMarshaler().(pdata.TracesSizer)} +} + +// add updates current batchTraces by adding new TraceData object +func (bt *batchTraces) add(item interface{}) { + td := item.(pdata.Traces) + newSpanCount := td.SpanCount() + if newSpanCount == 0 { + return + } + + bt.spanCount += newSpanCount + td.ResourceSpans().MoveAndAppendTo(bt.traceData.ResourceSpans()) +} + +func (bt *batchTraces) export(ctx context.Context, sendBatchMaxSize int) error { + var req pdata.Traces + if sendBatchMaxSize > 0 && bt.itemCount() > sendBatchMaxSize { + req = splitTraces(sendBatchMaxSize, bt.traceData) + bt.spanCount -= sendBatchMaxSize + } else { + req = bt.traceData + bt.traceData = pdata.NewTraces() + bt.spanCount = 0 + } + return bt.nextConsumer.ConsumeTraces(ctx, req) +} + +func (bt *batchTraces) itemCount() int { + return bt.spanCount +} + +func (bt *batchTraces) size() int { + return bt.sizer.TracesSize(bt.traceData) +} + +type batchMetrics struct { + nextConsumer consumer.Metrics + metricData pdata.Metrics + dataPointCount int + sizer pdata.MetricsSizer +} + +func newBatchMetrics(nextConsumer consumer.Metrics) *batchMetrics { + return &batchMetrics{nextConsumer: nextConsumer, metricData: pdata.NewMetrics(), sizer: otlp.NewProtobufMetricsMarshaler().(pdata.MetricsSizer)} +} + +func (bm *batchMetrics) export(ctx context.Context, sendBatchMaxSize int) error { + var req pdata.Metrics + if sendBatchMaxSize > 0 && bm.dataPointCount > sendBatchMaxSize { + req = splitMetrics(sendBatchMaxSize, bm.metricData) + bm.dataPointCount -= sendBatchMaxSize + } else { + req = bm.metricData + bm.metricData = pdata.NewMetrics() + bm.dataPointCount = 0 + } + return bm.nextConsumer.ConsumeMetrics(ctx, req) +} + +func (bm *batchMetrics) itemCount() int { + return bm.dataPointCount +} + +func (bm *batchMetrics) size() int { + return bm.sizer.MetricsSize(bm.metricData) +} + +func (bm *batchMetrics) add(item interface{}) { + md := item.(pdata.Metrics) + + newDataPointCount := md.DataPointCount() + if newDataPointCount == 0 { + return + } + bm.dataPointCount += newDataPointCount + md.ResourceMetrics().MoveAndAppendTo(bm.metricData.ResourceMetrics()) +} + +type batchLogs struct { + nextConsumer consumer.Logs + logData pdata.Logs + logCount int + sizer pdata.LogsSizer +} + +func newBatchLogs(nextConsumer consumer.Logs) *batchLogs { + return &batchLogs{nextConsumer: nextConsumer, logData: pdata.NewLogs(), sizer: otlp.NewProtobufLogsMarshaler().(pdata.LogsSizer)} +} + +func (bl *batchLogs) export(ctx context.Context, sendBatchMaxSize int) error { + var req pdata.Logs + if sendBatchMaxSize > 0 && bl.logCount > sendBatchMaxSize { + req = splitLogs(sendBatchMaxSize, bl.logData) + bl.logCount -= sendBatchMaxSize + } else { + req = bl.logData + bl.logData = pdata.NewLogs() + bl.logCount = 0 + } + return bl.nextConsumer.ConsumeLogs(ctx, req) +} + +func (bl *batchLogs) itemCount() int { + return bl.logCount +} + +func (bl *batchLogs) size() int { + return bl.sizer.LogsSize(bl.logData) +} + +func (bl *batchLogs) add(item interface{}) { + ld := item.(pdata.Logs) + + newLogsCount := ld.LogRecordCount() + if newLogsCount == 0 { + return + } + bl.logCount += newLogsCount + ld.ResourceLogs().MoveAndAppendTo(bl.logData.ResourceLogs()) +} diff --git a/internal/otel_collector/processor/batchprocessor/config.go b/internal/otel_collector/processor/batchprocessor/config.go new file mode 100644 index 00000000000..eb25d77c6e3 --- /dev/null +++ b/internal/otel_collector/processor/batchprocessor/config.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package batchprocessor + +import ( + "errors" + "time" + + "go.opentelemetry.io/collector/config" +) + +// Config defines configuration for batch processor. +type Config struct { + config.ProcessorSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + + // Timeout sets the time after which a batch will be sent regardless of size. + Timeout time.Duration `mapstructure:"timeout,omitempty"` + + // SendBatchSize is the size of a batch which after hit, will trigger it to be sent. + SendBatchSize uint32 `mapstructure:"send_batch_size,omitempty"` + + // SendBatchMaxSize is the maximum size of a batch. It must be larger than SendBatchSize. + // Larger batches are split into smaller units. + // Default value is 0, that means no maximum size. + SendBatchMaxSize uint32 `mapstructure:"send_batch_max_size,omitempty"` +} + +var _ config.Processor = (*Config)(nil) + +// Validate checks if the processor configuration is valid +func (cfg *Config) Validate() error { + if cfg.SendBatchMaxSize > 0 && cfg.SendBatchMaxSize < cfg.SendBatchSize { + return errors.New("send_batch_max_size must be greater or equal to send_batch_size") + } + return nil +} diff --git a/internal/otel_collector/processor/batchprocessor/factory.go b/internal/otel_collector/processor/batchprocessor/factory.go new file mode 100644 index 00000000000..42cf1d858bf --- /dev/null +++ b/internal/otel_collector/processor/batchprocessor/factory.go @@ -0,0 +1,82 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package batchprocessor + +import ( + "context" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +const ( + // The value of "type" key in configuration. + typeStr = "batch" + + defaultSendBatchSize = uint32(8192) + defaultTimeout = 200 * time.Millisecond +) + +// NewFactory returns a new factory for the Batch processor. +func NewFactory() component.ProcessorFactory { + return processorhelper.NewFactory( + typeStr, + createDefaultConfig, + processorhelper.WithTraces(createTracesProcessor), + processorhelper.WithMetrics(createMetricsProcessor), + processorhelper.WithLogs(createLogsProcessor)) +} + +func createDefaultConfig() config.Processor { + return &Config{ + ProcessorSettings: config.NewProcessorSettings(config.NewID(typeStr)), + SendBatchSize: defaultSendBatchSize, + Timeout: defaultTimeout, + } +} + +func createTracesProcessor( + _ context.Context, + set component.ProcessorCreateSettings, + cfg config.Processor, + nextConsumer consumer.Traces, +) (component.TracesProcessor, error) { + level := configtelemetry.GetMetricsLevelFlagValue() + return newBatchTracesProcessor(set, nextConsumer, cfg.(*Config), level) +} + +func createMetricsProcessor( + _ context.Context, + set component.ProcessorCreateSettings, + cfg config.Processor, + nextConsumer consumer.Metrics, +) (component.MetricsProcessor, error) { + level := configtelemetry.GetMetricsLevelFlagValue() + return newBatchMetricsProcessor(set, nextConsumer, cfg.(*Config), level) +} + +func createLogsProcessor( + _ context.Context, + set component.ProcessorCreateSettings, + cfg config.Processor, + nextConsumer consumer.Logs, +) (component.LogsProcessor, error) { + level := configtelemetry.GetMetricsLevelFlagValue() + return newBatchLogsProcessor(set, nextConsumer, cfg.(*Config), level) +} diff --git a/internal/otel_collector/processor/batchprocessor/metrics.go b/internal/otel_collector/processor/batchprocessor/metrics.go new file mode 100644 index 00000000000..8ae203e915d --- /dev/null +++ b/internal/otel_collector/processor/batchprocessor/metrics.go @@ -0,0 +1,78 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package batchprocessor + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + + "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" + "go.opentelemetry.io/collector/obsreport" +) + +var ( + processorTagKey = tag.MustNewKey(obsmetrics.ProcessorKey) + statBatchSizeTriggerSend = stats.Int64("batch_size_trigger_send", "Number of times the batch was sent due to a size trigger", stats.UnitDimensionless) + statTimeoutTriggerSend = stats.Int64("timeout_trigger_send", "Number of times the batch was sent due to a timeout trigger", stats.UnitDimensionless) + statBatchSendSize = stats.Int64("batch_send_size", "Number of units in the batch", stats.UnitDimensionless) + statBatchSendSizeBytes = stats.Int64("batch_send_size_bytes", "Number of bytes in batch that was sent", stats.UnitBytes) +) + +// MetricViews returns the metrics views related to batching +func MetricViews() []*view.View { + processorTagKeys := []tag.Key{processorTagKey} + + countBatchSizeTriggerSendView := &view.View{ + Name: obsreport.BuildProcessorCustomMetricName(typeStr, statBatchSizeTriggerSend.Name()), + Measure: statBatchSizeTriggerSend, + Description: statBatchSizeTriggerSend.Description(), + TagKeys: processorTagKeys, + Aggregation: view.Sum(), + } + + countTimeoutTriggerSendView := &view.View{ + Name: obsreport.BuildProcessorCustomMetricName(typeStr, statTimeoutTriggerSend.Name()), + Measure: statTimeoutTriggerSend, + Description: statTimeoutTriggerSend.Description(), + TagKeys: processorTagKeys, + Aggregation: view.Sum(), + } + + distributionBatchSendSizeView := &view.View{ + Name: obsreport.BuildProcessorCustomMetricName(typeStr, statBatchSendSize.Name()), + Measure: statBatchSendSize, + Description: statBatchSendSize.Description(), + TagKeys: processorTagKeys, + Aggregation: view.Distribution(10, 25, 50, 75, 100, 250, 500, 750, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000, 20000, 30000, 50000, 100000), + } + + distributionBatchSendSizeBytesView := &view.View{ + Name: obsreport.BuildProcessorCustomMetricName(typeStr, statBatchSendSizeBytes.Name()), + Measure: statBatchSendSizeBytes, + Description: statBatchSendSizeBytes.Description(), + TagKeys: processorTagKeys, + Aggregation: view.Distribution(10, 25, 50, 75, 100, 250, 500, 750, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000, 20000, 30000, 50000, + 100_000, 200_000, 300_000, 400_000, 500_000, 600_000, 700_000, 800_00, 900_000, + 1000_000, 2000_000, 3000_000, 4000_000, 5000_000, 6000_000, 7000_000, 8000_000, 9000_000), + } + + return []*view.View{ + countBatchSizeTriggerSendView, + countTimeoutTriggerSendView, + distributionBatchSendSizeView, + distributionBatchSendSizeBytesView, + } +} diff --git a/internal/otel_collector/processor/batchprocessor/splitlogs.go b/internal/otel_collector/processor/batchprocessor/splitlogs.go new file mode 100644 index 00000000000..2d6637dd1dc --- /dev/null +++ b/internal/otel_collector/processor/batchprocessor/splitlogs.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package batchprocessor + +import ( + "go.opentelemetry.io/collector/model/pdata" +) + +// splitLogs removes logrecords from the input data and returns a new data of the specified size. +func splitLogs(size int, src pdata.Logs) pdata.Logs { + if src.LogRecordCount() <= size { + return src + } + totalCopiedLogs := 0 + dest := pdata.NewLogs() + + src.ResourceLogs().RemoveIf(func(srcRs pdata.ResourceLogs) bool { + // If we are done skip everything else. + if totalCopiedLogs == size { + return false + } + + destRs := dest.ResourceLogs().AppendEmpty() + srcRs.Resource().CopyTo(destRs.Resource()) + + srcRs.InstrumentationLibraryLogs().RemoveIf(func(srcIlm pdata.InstrumentationLibraryLogs) bool { + // If we are done skip everything else. + if totalCopiedLogs == size { + return false + } + + destIlm := destRs.InstrumentationLibraryLogs().AppendEmpty() + srcIlm.InstrumentationLibrary().CopyTo(destIlm.InstrumentationLibrary()) + + // If possible to move all metrics do that. + srcLogsLen := srcIlm.Logs().Len() + if size >= srcLogsLen+totalCopiedLogs { + totalCopiedLogs += srcLogsLen + srcIlm.Logs().MoveAndAppendTo(destIlm.Logs()) + return true + } + + srcIlm.Logs().RemoveIf(func(srcMetric pdata.LogRecord) bool { + // If we are done skip everything else. + if totalCopiedLogs == size { + return false + } + srcMetric.CopyTo(destIlm.Logs().AppendEmpty()) + totalCopiedLogs++ + return true + }) + return false + }) + return srcRs.InstrumentationLibraryLogs().Len() == 0 + }) + + return dest +} diff --git a/internal/otel_collector/processor/batchprocessor/splitmetrics.go b/internal/otel_collector/processor/batchprocessor/splitmetrics.go new file mode 100644 index 00000000000..b168936d63b --- /dev/null +++ b/internal/otel_collector/processor/batchprocessor/splitmetrics.go @@ -0,0 +1,156 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package batchprocessor + +import ( + "go.opentelemetry.io/collector/model/pdata" +) + +// splitMetrics removes metrics from the input data and returns a new data of the specified size. +func splitMetrics(size int, src pdata.Metrics) pdata.Metrics { + dataPoints := src.DataPointCount() + if dataPoints <= size { + return src + } + totalCopiedDataPoints := 0 + dest := pdata.NewMetrics() + + src.ResourceMetrics().RemoveIf(func(srcRs pdata.ResourceMetrics) bool { + // If we are done skip everything else. + if totalCopiedDataPoints == size { + return false + } + + destRs := dest.ResourceMetrics().AppendEmpty() + srcRs.Resource().CopyTo(destRs.Resource()) + + srcRs.InstrumentationLibraryMetrics().RemoveIf(func(srcIlm pdata.InstrumentationLibraryMetrics) bool { + // If we are done skip everything else. + if totalCopiedDataPoints == size { + return false + } + + destIlm := destRs.InstrumentationLibraryMetrics().AppendEmpty() + srcIlm.InstrumentationLibrary().CopyTo(destIlm.InstrumentationLibrary()) + + // If possible to move all metrics do that. + srcDataPointCount := metricSliceDataPointCount(srcIlm.Metrics()) + if size-totalCopiedDataPoints >= srcDataPointCount { + totalCopiedDataPoints += srcDataPointCount + srcIlm.Metrics().MoveAndAppendTo(destIlm.Metrics()) + return true + } + + srcIlm.Metrics().RemoveIf(func(srcMetric pdata.Metric) bool { + // If we are done skip everything else. + if totalCopiedDataPoints == size { + return false + } + // If the metric has more data points than free slots we should split it. + copiedDataPoints, remove := splitMetric(srcMetric, destIlm.Metrics().AppendEmpty(), size-totalCopiedDataPoints) + totalCopiedDataPoints += copiedDataPoints + return remove + }) + return false + }) + return srcRs.InstrumentationLibraryMetrics().Len() == 0 + }) + + return dest +} + +// metricSliceDataPointCount calculates the total number of data points. +func metricSliceDataPointCount(ms pdata.MetricSlice) (dataPointCount int) { + for k := 0; k < ms.Len(); k++ { + dataPointCount += metricDataPointCount(ms.At(k)) + } + return +} + +// metricDataPointCount calculates the total number of data points. +func metricDataPointCount(ms pdata.Metric) (dataPointCount int) { + switch ms.DataType() { + case pdata.MetricDataTypeGauge: + dataPointCount = ms.Gauge().DataPoints().Len() + case pdata.MetricDataTypeSum: + dataPointCount = ms.Sum().DataPoints().Len() + case pdata.MetricDataTypeHistogram: + dataPointCount = ms.Histogram().DataPoints().Len() + case pdata.MetricDataTypeSummary: + dataPointCount = ms.Summary().DataPoints().Len() + } + return +} + +// splitMetric removes metric points from the input data and moves data of the specified size to destination. +// Returns size of moved data and boolean describing, whether the metric should be removed from original slice. +func splitMetric(ms, dest pdata.Metric, size int) (int, bool) { + if metricDataPointCount(ms) <= size { + ms.CopyTo(dest) + return metricDataPointCount(ms), true + } + + msSize, i := metricDataPointCount(ms)-size, 0 + filterDataPoints := func() bool { i++; return i <= msSize } + + dest.SetDataType(ms.DataType()) + dest.SetName(ms.Name()) + dest.SetDescription(ms.Description()) + dest.SetUnit(ms.Unit()) + + switch ms.DataType() { + case pdata.MetricDataTypeGauge: + src := ms.Gauge().DataPoints() + dst := dest.Gauge().DataPoints() + dst.EnsureCapacity(size) + for j := 0; j < size; j++ { + src.At(j).CopyTo(dst.AppendEmpty()) + } + src.RemoveIf(func(_ pdata.NumberDataPoint) bool { + return filterDataPoints() + }) + case pdata.MetricDataTypeSum: + src := ms.Sum().DataPoints() + dst := dest.Sum().DataPoints() + dst.EnsureCapacity(size) + for j := 0; j < size; j++ { + src.At(j).CopyTo(dst.AppendEmpty()) + } + src.RemoveIf(func(_ pdata.NumberDataPoint) bool { + return filterDataPoints() + }) + case pdata.MetricDataTypeHistogram: + src := ms.Histogram().DataPoints() + dst := dest.Histogram().DataPoints() + dst.EnsureCapacity(size) + for j := 0; j < size; j++ { + src.At(j).CopyTo(dst.AppendEmpty()) + } + src.RemoveIf(func(_ pdata.HistogramDataPoint) bool { + return filterDataPoints() + }) + case pdata.MetricDataTypeSummary: + src := ms.Summary().DataPoints() + dst := dest.Summary().DataPoints() + dst.EnsureCapacity(size) + for j := 0; j < size; j++ { + src.At(j).CopyTo(dst.AppendEmpty()) + } + src.RemoveIf(func(_ pdata.SummaryDataPoint) bool { + return filterDataPoints() + }) + } + return size, false +} diff --git a/internal/otel_collector/processor/batchprocessor/splittraces.go b/internal/otel_collector/processor/batchprocessor/splittraces.go new file mode 100644 index 00000000000..46ff7f933ac --- /dev/null +++ b/internal/otel_collector/processor/batchprocessor/splittraces.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package batchprocessor + +import ( + "go.opentelemetry.io/collector/model/pdata" +) + +// splitTraces removes spans from the input trace and returns a new trace of the specified size. +func splitTraces(size int, src pdata.Traces) pdata.Traces { + if src.SpanCount() <= size { + return src + } + totalCopiedSpans := 0 + dest := pdata.NewTraces() + + src.ResourceSpans().RemoveIf(func(srcRs pdata.ResourceSpans) bool { + // If we are done skip everything else. + if totalCopiedSpans == size { + return false + } + + destRs := dest.ResourceSpans().AppendEmpty() + srcRs.Resource().CopyTo(destRs.Resource()) + + srcRs.InstrumentationLibrarySpans().RemoveIf(func(srcIls pdata.InstrumentationLibrarySpans) bool { + // If we are done skip everything else. + if totalCopiedSpans == size { + return false + } + + destIls := destRs.InstrumentationLibrarySpans().AppendEmpty() + srcIls.InstrumentationLibrary().CopyTo(destIls.InstrumentationLibrary()) + + // If possible to move all metrics do that. + srcSpansLen := srcIls.Spans().Len() + if size-totalCopiedSpans >= srcSpansLen { + totalCopiedSpans += srcSpansLen + srcIls.Spans().MoveAndAppendTo(destIls.Spans()) + return true + } + + srcIls.Spans().RemoveIf(func(srcSpan pdata.Span) bool { + // If we are done skip everything else. + if totalCopiedSpans == size { + return false + } + srcSpan.CopyTo(destIls.Spans().AppendEmpty()) + totalCopiedSpans++ + return true + }) + return false + }) + return srcRs.InstrumentationLibrarySpans().Len() == 0 + }) + + return dest +} diff --git a/internal/otel_collector/processor/batchprocessor/testdata/config.yaml b/internal/otel_collector/processor/batchprocessor/testdata/config.yaml new file mode 100644 index 00000000000..95ace7436ef --- /dev/null +++ b/internal/otel_collector/processor/batchprocessor/testdata/config.yaml @@ -0,0 +1,19 @@ +receivers: + nop: + +processors: + batch: + batch/2: + timeout: 10s + send_batch_size: 10000 + send_batch_max_size: 11000 + +exporters: + nop: + +service: + pipelines: + traces: + receivers: [nop] + processors: [batch/2] + exporters: [nop] diff --git a/internal/otel_collector/processor/memorylimiter/README.md b/internal/otel_collector/processor/memorylimiter/README.md new file mode 100644 index 00000000000..cc89f5c585f --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/README.md @@ -0,0 +1,103 @@ +# Memory Limiter Processor + +Supported pipeline types: metrics, traces, logs + +The memory limiter processor is used to prevent out of memory situations on +the collector. Given that the amount and type of data the collector processes is +environment specific and resource utilization of the collector is also dependent +on the configured processors, it is important to put checks in place regarding +memory usage. + +The memory_limiter processor allows to perform periodic checks of memory +usage if it exceeds defined limits will begin dropping data and forcing GC to reduce +memory consumption. + +The memory_limiter uses soft and hard memory limits. Hard limit is always above or equal +the soft limit. + +When the memory usage exceeds the soft limit the processor will start dropping the data and +return errors to the preceding component it in the pipeline (which should be normally a +receiver). + +When the memory usage is above the hard limit in addition to dropping the data the +processor will forcedly perform garbage collection in order to try to free memory. + +When the memory usage drop below the soft limit, the normal operation is resumed (data +will not longer be dropped and no forced garbage collection will be performed). + +The difference between the soft limit and hard limits is defined via `spike_limit_mib` +configuration option. The value of this option should be selected in a way that ensures +that between the memory check intervals the memory usage cannot increase by more than this +value (otherwise memory usage may exceed the hard limit - even if temporarily). +A good starting point for `spike_limit_mib` is 20% of the hard limit. Bigger +`spike_limit_mib` values may be necessary for spiky traffic or for longer check intervals. + +In addition, if the ballast size is specified in [ballastextension](../../extension/ballastextension), +the same value that is provided via the `ballastextension` will be used in `memory_limitor` for +calculating the total allocated memory for the collector. +The `memory_limiter.ballast_size_mib` config has been deprecated and will be removed soon. + +Note that while the processor can help mitigate out of memory situations, +it is not a replacement for properly sizing and configuring the +collector. Keep in mind that if the soft limit is crossed, the collector will +return errors to all receive operations until enough memory is freed. This will +result in dropped data. + +It is highly recommended to configure `ballastextension` as well as the +`memory_limiter` processor on every collector. The ballast should be configured to +be 1/3 to 1/2 of the memory allocated to the collector. The memory_limiter +processor should be the first processor defined in the pipeline (immediately after +the receivers). This is to ensure that backpressure can be sent to applicable +receivers and minimize the likelihood of dropped data when the memory_limiter gets +triggered. + +Please refer to [config.go](./config.go) for the config spec. + +The following configuration options **must be changed**: +- `check_interval` (default = 0s): Time between measurements of memory +usage. The recommended value is 1 second. +If the expected traffic to the Collector is very spiky then decrease the `check_interval` +or increase `spike_limit_mib` to avoid memory usage going over the hard limit. +- `limit_mib` (default = 0): Maximum amount of memory, in MiB, targeted to be +allocated by the process heap. Note that typically the total memory usage of +process will be about 50MiB higher than this value. This defines the hard limit. +- `spike_limit_mib` (default = 20% of `limit_mib`): Maximum spike expected between the +measurements of memory usage. The value must be less than `limit_mib`. The soft limit +value will be equal to (limit_mib - spike_limit_mib). +The recommended value for `spike_limit_mib` is about 20% `limit_mib`. +- `limit_percentage` (default = 0): Maximum amount of total memory targeted to be +allocated by the process heap. This configuration is supported on Linux systems with cgroups +and it's intended to be used in dynamic platforms like docker. +This option is used to calculate `memory_limit` from the total available memory. +For instance setting of 75% with the total memory of 1GiB will result in the limit of 750 MiB. +The fixed memory setting (`limit_mib`) takes precedence +over the percentage configuration. +- `spike_limit_percentage` (default = 0): Maximum spike expected between the +measurements of memory usage. The value must be less than `limit_percentage`. +This option is used to calculate `spike_limit_mib` from the total available memory. +For instance setting of 25% with the total memory of 1GiB will result in the spike limit of 250MiB. +This option is intended to be used only with `limit_percentage`. + +The `ballast_size_mib` configuration has been deprecated and replaced by `ballast_extension`. +- `ballast_size_mib` (default = 0): Must match the value of `ballast_size_mib` in `ballastextension` config + +Examples: + +```yaml +processors: + memory_limiter: + check_interval: 1s + limit_mib: 4000 + spike_limit_mib: 800 +``` + +```yaml +processors: + memory_limiter: + check_interval: 1s + limit_percentage: 50 + spike_limit_percentage: 30 +``` + +Refer to [config.yaml](./testdata/config.yaml) for detailed +examples on using the processor. diff --git a/internal/otel_collector/processor/memorylimiter/config.go b/internal/otel_collector/processor/memorylimiter/config.go new file mode 100644 index 00000000000..a987094bd46 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/config.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package memorylimiter provides a processor for OpenTelemetry Service pipeline +// that drops data on the pipeline according to the current state of memory +// usage. +package memorylimiter + +import ( + "time" + + "go.opentelemetry.io/collector/config" +) + +// Config defines configuration for memory memoryLimiter processor. +type Config struct { + config.ProcessorSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + + // CheckInterval is the time between measurements of memory usage for the + // purposes of avoiding going over the limits. Defaults to zero, so no + // checks will be performed. + CheckInterval time.Duration `mapstructure:"check_interval"` + + // MemoryLimitMiB is the maximum amount of memory, in MiB, targeted to be + // allocated by the process. + MemoryLimitMiB uint32 `mapstructure:"limit_mib"` + + // MemorySpikeLimitMiB is the maximum, in MiB, spike expected between the + // measurements of memory usage. + MemorySpikeLimitMiB uint32 `mapstructure:"spike_limit_mib"` + + // BallastSizeMiB is the size, in MiB, of the ballast size being used by the + // process. + // Deprecated: use the ballast size configuration in `ballastextension` component instead. + BallastSizeMiB uint32 `mapstructure:"ballast_size_mib"` + + // MemoryLimitPercentage is the maximum amount of memory, in %, targeted to be + // allocated by the process. The fixed memory settings MemoryLimitMiB has a higher precedence. + MemoryLimitPercentage uint32 `mapstructure:"limit_percentage"` + // MemorySpikePercentage is the maximum, in percents against the total memory, + // spike expected between the measurements of memory usage. + MemorySpikePercentage uint32 `mapstructure:"spike_limit_percentage"` +} + +var _ config.Processor = (*Config)(nil) + +// Validate checks if the processor configuration is valid +func (cfg *Config) Validate() error { + return nil +} + +// Name of BallastSizeMiB config option. +const ballastSizeMibKey = "ballast_size_mib" diff --git a/internal/otel_collector/processor/memorylimiter/factory.go b/internal/otel_collector/processor/memorylimiter/factory.go new file mode 100644 index 00000000000..e560f80edac --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/factory.go @@ -0,0 +1,104 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memorylimiter + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +const ( + // The value of "type" Attribute Key in configuration. + typeStr = "memory_limiter" +) + +var processorCapabilities = consumer.Capabilities{MutatesData: false} + +// NewFactory returns a new factory for the Memory Limiter processor. +func NewFactory() component.ProcessorFactory { + return processorhelper.NewFactory( + typeStr, + createDefaultConfig, + processorhelper.WithTraces(createTracesProcessor), + processorhelper.WithMetrics(createMetricsProcessor), + processorhelper.WithLogs(createLogsProcessor)) +} + +// CreateDefaultConfig creates the default configuration for processor. Notice +// that the default configuration is expected to fail for this processor. +func createDefaultConfig() config.Processor { + return &Config{ + ProcessorSettings: config.NewProcessorSettings(config.NewID(typeStr)), + } +} + +func createTracesProcessor( + _ context.Context, + set component.ProcessorCreateSettings, + cfg config.Processor, + nextConsumer consumer.Traces, +) (component.TracesProcessor, error) { + ml, err := newMemoryLimiter(set.Logger, cfg.(*Config)) + if err != nil { + return nil, err + } + return processorhelper.NewTracesProcessor( + cfg, + nextConsumer, + ml.processTraces, + processorhelper.WithCapabilities(processorCapabilities), + processorhelper.WithStart(ml.start), + processorhelper.WithShutdown(ml.shutdown)) +} + +func createMetricsProcessor( + _ context.Context, + set component.ProcessorCreateSettings, + cfg config.Processor, + nextConsumer consumer.Metrics, +) (component.MetricsProcessor, error) { + ml, err := newMemoryLimiter(set.Logger, cfg.(*Config)) + if err != nil { + return nil, err + } + return processorhelper.NewMetricsProcessor( + cfg, + nextConsumer, + ml.processMetrics, + processorhelper.WithCapabilities(processorCapabilities), + processorhelper.WithShutdown(ml.shutdown)) +} + +func createLogsProcessor( + _ context.Context, + set component.ProcessorCreateSettings, + cfg config.Processor, + nextConsumer consumer.Logs, +) (component.LogsProcessor, error) { + ml, err := newMemoryLimiter(set.Logger, cfg.(*Config)) + if err != nil { + return nil, err + } + return processorhelper.NewLogsProcessor( + cfg, + nextConsumer, + ml.processLogs, + processorhelper.WithCapabilities(processorCapabilities), + processorhelper.WithShutdown(ml.shutdown)) +} diff --git a/internal/otel_collector/processor/memorylimiter/memorylimiter.go b/internal/otel_collector/processor/memorylimiter/memorylimiter.go new file mode 100644 index 00000000000..425fc30a929 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/memorylimiter.go @@ -0,0 +1,338 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memorylimiter + +import ( + "context" + "errors" + "fmt" + "runtime" + "sync/atomic" + "time" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/extension/ballastextension" + "go.opentelemetry.io/collector/internal/iruntime" + "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/obsreport" +) + +const ( + mibBytes = 1024 * 1024 +) + +var ( + // errForcedDrop will be returned to callers of ConsumeTraceData to indicate + // that data is being dropped due to high memory usage. + errForcedDrop = errors.New("data dropped due to high memory usage") + + // Construction errors + + errCheckIntervalOutOfRange = errors.New( + "checkInterval must be greater than zero") + + errLimitOutOfRange = errors.New( + "memAllocLimit or memoryLimitPercentage must be greater than zero") + + errMemSpikeLimitOutOfRange = errors.New( + "memSpikeLimit must be smaller than memAllocLimit") + + errPercentageLimitOutOfRange = errors.New( + "memoryLimitPercentage and memorySpikePercentage must be greater than zero and less than or equal to hundred", + ) +) + +// make it overridable by tests +var getMemoryFn = iruntime.TotalMemory + +type memoryLimiter struct { + usageChecker memUsageChecker + + memCheckWait time.Duration + ballastSize uint64 + + // forceDrop is used atomically to indicate when data should be dropped. + forceDrop int64 + + ticker *time.Ticker + + lastGCDone time.Time + + // The function to read the mem values is set as a reference to help with + // testing different values. + readMemStatsFn func(m *runtime.MemStats) + + // Fields used for logging. + logger *zap.Logger + configMismatchedLogged bool + + obsrep *obsreport.Processor +} + +// Minimum interval between forced GC when in soft limited mode. We don't want to +// do GCs too frequently since it is a CPU-heavy operation. +const minGCIntervalWhenSoftLimited = 10 * time.Second + +// newMemoryLimiter returns a new memorylimiter processor. +func newMemoryLimiter(logger *zap.Logger, cfg *Config) (*memoryLimiter, error) { + if cfg.CheckInterval <= 0 { + return nil, errCheckIntervalOutOfRange + } + if cfg.MemoryLimitMiB == 0 && cfg.MemoryLimitPercentage == 0 { + return nil, errLimitOutOfRange + } + + usageChecker, err := getMemUsageChecker(cfg, logger) + if err != nil { + return nil, err + } + + logger.Info("Memory limiter configured", + zap.Uint64("limit_mib", usageChecker.memAllocLimit/mibBytes), + zap.Uint64("spike_limit_mib", usageChecker.memSpikeLimit/mibBytes), + zap.Duration("check_interval", cfg.CheckInterval)) + + ml := &memoryLimiter{ + usageChecker: *usageChecker, + memCheckWait: cfg.CheckInterval, + ticker: time.NewTicker(cfg.CheckInterval), + readMemStatsFn: runtime.ReadMemStats, + logger: logger, + obsrep: obsreport.NewProcessor(obsreport.ProcessorSettings{ + Level: configtelemetry.GetMetricsLevelFlagValue(), + ProcessorID: cfg.ID(), + }), + } + + return ml, nil +} + +func getMemUsageChecker(cfg *Config, logger *zap.Logger) (*memUsageChecker, error) { + memAllocLimit := uint64(cfg.MemoryLimitMiB) * mibBytes + memSpikeLimit := uint64(cfg.MemorySpikeLimitMiB) * mibBytes + if cfg.MemoryLimitMiB != 0 { + return newFixedMemUsageChecker(memAllocLimit, memSpikeLimit) + } + totalMemory, err := getMemoryFn() + if err != nil { + return nil, fmt.Errorf("failed to get total memory, use fixed memory settings (limit_mib): %w", err) + } + logger.Info("Using percentage memory limiter", + zap.Uint64("total_memory_mib", totalMemory/mibBytes), + zap.Uint32("limit_percentage", cfg.MemoryLimitPercentage), + zap.Uint32("spike_limit_percentage", cfg.MemorySpikePercentage)) + return newPercentageMemUsageChecker(totalMemory, uint64(cfg.MemoryLimitPercentage), uint64(cfg.MemorySpikePercentage)) +} + +func (ml *memoryLimiter) start(_ context.Context, host component.Host) error { + extensions := host.GetExtensions() + for _, extension := range extensions { + if ext, ok := extension.(*ballastextension.MemoryBallast); ok { + ml.ballastSize = ext.GetBallastSize() + break + } + } + + ml.startMonitoring() + return nil +} + +func (ml *memoryLimiter) shutdown(context.Context) error { + ml.ticker.Stop() + return nil +} + +func (ml *memoryLimiter) processTraces(ctx context.Context, td pdata.Traces) (pdata.Traces, error) { + numSpans := td.SpanCount() + if ml.forcingDrop() { + // TODO: actually to be 100% sure that this is "refused" and not "dropped" + // it is necessary to check the pipeline to see if this is directly connected + // to a receiver (ie.: a receiver is on the call stack). For now it + // assumes that the pipeline is properly configured and a receiver is on the + // callstack. + ml.obsrep.TracesRefused(ctx, numSpans) + + return td, errForcedDrop + } + + // Even if the next consumer returns error record the data as accepted by + // this processor. + ml.obsrep.TracesAccepted(ctx, numSpans) + return td, nil +} + +func (ml *memoryLimiter) processMetrics(ctx context.Context, md pdata.Metrics) (pdata.Metrics, error) { + numDataPoints := md.DataPointCount() + if ml.forcingDrop() { + // TODO: actually to be 100% sure that this is "refused" and not "dropped" + // it is necessary to check the pipeline to see if this is directly connected + // to a receiver (ie.: a receiver is on the call stack). For now it + // assumes that the pipeline is properly configured and a receiver is on the + // callstack. + ml.obsrep.MetricsRefused(ctx, numDataPoints) + return md, errForcedDrop + } + + // Even if the next consumer returns error record the data as accepted by + // this processor. + ml.obsrep.MetricsAccepted(ctx, numDataPoints) + return md, nil +} + +func (ml *memoryLimiter) processLogs(ctx context.Context, ld pdata.Logs) (pdata.Logs, error) { + numRecords := ld.LogRecordCount() + if ml.forcingDrop() { + // TODO: actually to be 100% sure that this is "refused" and not "dropped" + // it is necessary to check the pipeline to see if this is directly connected + // to a receiver (ie.: a receiver is on the call stack). For now it + // assumes that the pipeline is properly configured and a receiver is on the + // callstack. + ml.obsrep.LogsRefused(ctx, numRecords) + + return ld, errForcedDrop + } + + // Even if the next consumer returns error record the data as accepted by + // this processor. + ml.obsrep.LogsAccepted(ctx, numRecords) + return ld, nil +} + +func (ml *memoryLimiter) readMemStats() *runtime.MemStats { + ms := &runtime.MemStats{} + ml.readMemStatsFn(ms) + // If proper configured ms.Alloc should be at least ml.ballastSize but since + // a misconfiguration is possible check for that here. + if ms.Alloc >= ml.ballastSize { + ms.Alloc -= ml.ballastSize + } else if !ml.configMismatchedLogged { + // This indicates misconfiguration. Log it once. + ml.configMismatchedLogged = true + ml.logger.Warn(ballastSizeMibKey + " in ballast extension is likely incorrectly configured.") + } + + return ms +} + +// startMonitoring starts a ticker'd goroutine that will check memory usage +// every checkInterval period. +func (ml *memoryLimiter) startMonitoring() { + go func() { + for range ml.ticker.C { + ml.checkMemLimits() + } + }() +} + +// forcingDrop indicates when memory resources need to be released. +func (ml *memoryLimiter) forcingDrop() bool { + return atomic.LoadInt64(&ml.forceDrop) != 0 +} + +func (ml *memoryLimiter) setForcingDrop(b bool) { + var i int64 + if b { + i = 1 + } + atomic.StoreInt64(&ml.forceDrop, i) +} + +func memstatToZapField(ms *runtime.MemStats) zap.Field { + return zap.Uint64("cur_mem_mib", ms.Alloc/mibBytes) +} + +func (ml *memoryLimiter) doGCandReadMemStats() *runtime.MemStats { + runtime.GC() + ml.lastGCDone = time.Now() + ms := ml.readMemStats() + ml.logger.Info("Memory usage after GC.", memstatToZapField(ms)) + return ms +} + +func (ml *memoryLimiter) checkMemLimits() { + ms := ml.readMemStats() + + ml.logger.Debug("Currently used memory.", memstatToZapField(ms)) + + if ml.usageChecker.aboveHardLimit(ms) { + ml.logger.Warn("Memory usage is above hard limit. Forcing a GC.", memstatToZapField(ms)) + ms = ml.doGCandReadMemStats() + } + + // Remember current dropping state. + wasForcingDrop := ml.forcingDrop() + + // Check if the memory usage is above the soft limit. + mustForceDrop := ml.usageChecker.aboveSoftLimit(ms) + + if wasForcingDrop && !mustForceDrop { + // Was previously dropping but enough memory is available now, no need to limit. + ml.logger.Info("Memory usage back within limits. Resuming normal operation.", memstatToZapField(ms)) + } + + if !wasForcingDrop && mustForceDrop { + // We are above soft limit, do a GC if it wasn't done recently and see if + // it brings memory usage below the soft limit. + if time.Since(ml.lastGCDone) > minGCIntervalWhenSoftLimited { + ml.logger.Info("Memory usage is above soft limit. Forcing a GC.", memstatToZapField(ms)) + ms = ml.doGCandReadMemStats() + // Check the limit again to see if GC helped. + mustForceDrop = ml.usageChecker.aboveSoftLimit(ms) + } + + if mustForceDrop { + ml.logger.Warn("Memory usage is above soft limit. Dropping data.", memstatToZapField(ms)) + } + } + + ml.setForcingDrop(mustForceDrop) +} + +type memUsageChecker struct { + memAllocLimit uint64 + memSpikeLimit uint64 +} + +func (d memUsageChecker) aboveSoftLimit(ms *runtime.MemStats) bool { + return ms.Alloc >= d.memAllocLimit-d.memSpikeLimit +} + +func (d memUsageChecker) aboveHardLimit(ms *runtime.MemStats) bool { + return ms.Alloc >= d.memAllocLimit +} + +func newFixedMemUsageChecker(memAllocLimit, memSpikeLimit uint64) (*memUsageChecker, error) { + if memSpikeLimit >= memAllocLimit { + return nil, errMemSpikeLimitOutOfRange + } + if memSpikeLimit == 0 { + // If spike limit is unspecified use 20% of mem limit. + memSpikeLimit = memAllocLimit / 5 + } + return &memUsageChecker{ + memAllocLimit: memAllocLimit, + memSpikeLimit: memSpikeLimit, + }, nil +} + +func newPercentageMemUsageChecker(totalMemory uint64, percentageLimit, percentageSpike uint64) (*memUsageChecker, error) { + if percentageLimit > 100 || percentageLimit <= 0 || percentageSpike > 100 || percentageSpike <= 0 { + return nil, errPercentageLimitOutOfRange + } + return newFixedMemUsageChecker(percentageLimit*totalMemory/100, percentageSpike*totalMemory/100) +} diff --git a/internal/otel_collector/processor/memorylimiter/testdata/config.yaml b/internal/otel_collector/processor/memorylimiter/testdata/config.yaml new file mode 100644 index 00000000000..a5c60267123 --- /dev/null +++ b/internal/otel_collector/processor/memorylimiter/testdata/config.yaml @@ -0,0 +1,36 @@ +receivers: + nop: + +processors: + memory_limiter: + # empty config + + memory_limiter/with-settings: + # check_interval is the time between measurements of memory usage for the + # purposes of avoiding going over the limits. Defaults to zero, so no + # checks will be performed. Values below 1 second are not recommended since + # it can result in unnecessary CPU consumption. + check_interval: 5s + + # Maximum amount of memory, in MiB, targeted to be allocated by the process heap. + # Note that typically the total memory usage of process will be about 50MiB higher + # than this value. + limit_mib: 4000 + + # The maximum, in MiB, spike expected between the measurements of memory usage. + spike_limit_mib: 500 + + # BallastSizeMiB is the size, in MiB, of the ballast size being used by the process. + # This must match the value of mem-ballast-size-mib command line option (if used) + # otherwise the memory limiter will not work correctly. + ballast_size_mib: 2000 + +exporters: + nop: + +service: + pipelines: + traces: + receivers: [nop] + processors: [memory_limiter/with-settings] + exporters: [nop] diff --git a/internal/otel_collector/processor/processorhelper/factory.go b/internal/otel_collector/processor/processorhelper/factory.go new file mode 100644 index 00000000000..6d7794a9fc8 --- /dev/null +++ b/internal/otel_collector/processor/processorhelper/factory.go @@ -0,0 +1,132 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processorhelper + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" +) + +// FactoryOption apply changes to ProcessorOptions. +type FactoryOption func(o *factory) + +// CreateDefaultConfig is the equivalent of component.ProcessorFactory.CreateDefaultConfig() +type CreateDefaultConfig func() config.Processor + +// CreateTracesProcessor is the equivalent of component.ProcessorFactory.CreateTracesProcessor() +type CreateTracesProcessor func(context.Context, component.ProcessorCreateSettings, config.Processor, consumer.Traces) (component.TracesProcessor, error) + +// CreateMetricsProcessor is the equivalent of component.ProcessorFactory.CreateMetricsProcessor() +type CreateMetricsProcessor func(context.Context, component.ProcessorCreateSettings, config.Processor, consumer.Metrics) (component.MetricsProcessor, error) + +// CreateLogsProcessor is the equivalent of component.ProcessorFactory.CreateLogsProcessor() +type CreateLogsProcessor func(context.Context, component.ProcessorCreateSettings, config.Processor, consumer.Logs) (component.LogsProcessor, error) + +type factory struct { + component.BaseProcessorFactory + cfgType config.Type + createDefaultConfig CreateDefaultConfig + createTracesProcessor CreateTracesProcessor + createMetricsProcessor CreateMetricsProcessor + createLogsProcessor CreateLogsProcessor +} + +// WithTraces overrides the default "error not supported" implementation for CreateTracesProcessor. +func WithTraces(createTracesProcessor CreateTracesProcessor) FactoryOption { + return func(o *factory) { + o.createTracesProcessor = createTracesProcessor + } +} + +// WithMetrics overrides the default "error not supported" implementation for CreateMetricsProcessor. +func WithMetrics(createMetricsProcessor CreateMetricsProcessor) FactoryOption { + return func(o *factory) { + o.createMetricsProcessor = createMetricsProcessor + } +} + +// WithLogs overrides the default "error not supported" implementation for CreateLogsProcessor. +func WithLogs(createLogsProcessor CreateLogsProcessor) FactoryOption { + return func(o *factory) { + o.createLogsProcessor = createLogsProcessor + } +} + +// NewFactory returns a component.ProcessorFactory. +func NewFactory( + cfgType config.Type, + createDefaultConfig CreateDefaultConfig, + options ...FactoryOption) component.ProcessorFactory { + f := &factory{ + cfgType: cfgType, + createDefaultConfig: createDefaultConfig, + } + for _, opt := range options { + opt(f) + } + return f +} + +// Type gets the type of the Processor config created by this factory. +func (f *factory) Type() config.Type { + return f.cfgType +} + +// CreateDefaultConfig creates the default configuration for processor. +func (f *factory) CreateDefaultConfig() config.Processor { + return f.createDefaultConfig() +} + +// CreateTracesProcessor creates a component.TracesProcessor based on this config. +func (f *factory) CreateTracesProcessor( + ctx context.Context, + set component.ProcessorCreateSettings, + cfg config.Processor, + nextConsumer consumer.Traces, +) (component.TracesProcessor, error) { + if f.createTracesProcessor == nil { + return f.BaseProcessorFactory.CreateTracesProcessor(ctx, set, cfg, nextConsumer) + } + return f.createTracesProcessor(ctx, set, cfg, nextConsumer) +} + +// CreateMetricsProcessor creates a component.MetricsProcessor based on this config. +func (f *factory) CreateMetricsProcessor( + ctx context.Context, + set component.ProcessorCreateSettings, + cfg config.Processor, + nextConsumer consumer.Metrics, +) (component.MetricsProcessor, error) { + if f.createMetricsProcessor == nil { + return f.BaseProcessorFactory.CreateMetricsProcessor(ctx, set, cfg, nextConsumer) + } + return f.createMetricsProcessor(ctx, set, cfg, nextConsumer) +} + +// CreateLogsProcessor creates a component.LogsProcessor based on this config. +func (f *factory) CreateLogsProcessor( + ctx context.Context, + set component.ProcessorCreateSettings, + cfg config.Processor, + nextConsumer consumer.Logs, +) (component.LogsProcessor, error) { + if f.createLogsProcessor == nil { + return f.BaseProcessorFactory.CreateLogsProcessor(ctx, set, cfg, nextConsumer) + } + return f.createLogsProcessor(ctx, set, cfg, nextConsumer) +} diff --git a/internal/otel_collector/processor/processorhelper/logs.go b/internal/otel_collector/processor/processorhelper/logs.go new file mode 100644 index 00000000000..7a647e32f97 --- /dev/null +++ b/internal/otel_collector/processor/processorhelper/logs.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processorhelper + +import ( + "context" + "errors" + + "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/component/componenthelper" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumerhelper" + "go.opentelemetry.io/collector/model/pdata" +) + +// ProcessLogsFunc is a helper function that processes the incoming data and returns the data to be sent to the next component. +// If error is returned then returned data are ignored. It MUST not call the next component. +type ProcessLogsFunc func(context.Context, pdata.Logs) (pdata.Logs, error) + +type logProcessor struct { + component.Component + consumer.Logs +} + +// NewLogsProcessor creates a LogsProcessor that ensure context propagation and the right tags are set. +// TODO: Add observability metrics support +func NewLogsProcessor( + cfg config.Processor, + nextConsumer consumer.Logs, + logsFunc ProcessLogsFunc, + options ...Option, +) (component.LogsProcessor, error) { + if logsFunc == nil { + return nil, errors.New("nil logsFunc") + } + + if nextConsumer == nil { + return nil, componenterror.ErrNilNextConsumer + } + + eventOptions := spanAttributes(cfg.ID()) + bs := fromOptions(options) + logsConsumer, err := consumerhelper.NewLogs(func(ctx context.Context, ld pdata.Logs) error { + span := trace.SpanFromContext(ctx) + span.AddEvent("Start processing.", eventOptions) + var err error + ld, err = logsFunc(ctx, ld) + span.AddEvent("End processing.", eventOptions) + if err != nil { + if errors.Is(err, ErrSkipProcessingData) { + return nil + } + return err + } + return nextConsumer.ConsumeLogs(ctx, ld) + }, bs.consumerOptions...) + if err != nil { + return nil, err + } + + return &logProcessor{ + Component: componenthelper.New(bs.componentOptions...), + Logs: logsConsumer, + }, nil +} diff --git a/internal/otel_collector/processor/processorhelper/metrics.go b/internal/otel_collector/processor/processorhelper/metrics.go new file mode 100644 index 00000000000..62879c61bae --- /dev/null +++ b/internal/otel_collector/processor/processorhelper/metrics.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processorhelper + +import ( + "context" + "errors" + + "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/component/componenthelper" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumerhelper" + "go.opentelemetry.io/collector/model/pdata" +) + +// ProcessMetricsFunc is a helper function that processes the incoming data and returns the data to be sent to the next component. +// If error is returned then returned data are ignored. It MUST not call the next component. +type ProcessMetricsFunc func(context.Context, pdata.Metrics) (pdata.Metrics, error) + +type metricsProcessor struct { + component.Component + consumer.Metrics +} + +// NewMetricsProcessor creates a MetricsProcessor that ensure context propagation and the right tags are set. +// TODO: Add observability metrics support +func NewMetricsProcessor( + cfg config.Processor, + nextConsumer consumer.Metrics, + metricsFunc ProcessMetricsFunc, + options ...Option, +) (component.MetricsProcessor, error) { + if metricsFunc == nil { + return nil, errors.New("nil metricsFunc") + } + + if nextConsumer == nil { + return nil, componenterror.ErrNilNextConsumer + } + + eventOptions := spanAttributes(cfg.ID()) + bs := fromOptions(options) + metricsConsumer, err := consumerhelper.NewMetrics(func(ctx context.Context, md pdata.Metrics) error { + span := trace.SpanFromContext(ctx) + span.AddEvent("Start processing.", eventOptions) + var err error + md, err = metricsFunc(ctx, md) + span.AddEvent("End processing.", eventOptions) + if err != nil { + if errors.Is(err, ErrSkipProcessingData) { + return nil + } + return err + } + return nextConsumer.ConsumeMetrics(ctx, md) + }, bs.consumerOptions...) + if err != nil { + return nil, err + } + + return &metricsProcessor{ + Component: componenthelper.New(bs.componentOptions...), + Metrics: metricsConsumer, + }, nil +} diff --git a/internal/otel_collector/processor/processorhelper/processor.go b/internal/otel_collector/processor/processorhelper/processor.go new file mode 100644 index 00000000000..528560b206f --- /dev/null +++ b/internal/otel_collector/processor/processorhelper/processor.go @@ -0,0 +1,83 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processorhelper + +import ( + "errors" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/collector/component/componenthelper" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumerhelper" + "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" +) + +// ErrSkipProcessingData is a sentinel value to indicate when traces or metrics should intentionally be dropped +// from further processing in the pipeline because the data is determined to be irrelevant. A processor can return this error +// to stop further processing without propagating an error back up the pipeline to logs. +var ErrSkipProcessingData = errors.New("sentinel error to skip processing data from the remainder of the pipeline") + +// Option apply changes to internalOptions. +type Option func(*baseSettings) + +// WithStart overrides the default Start function for an processor. +// The default shutdown function does nothing and always returns nil. +func WithStart(start componenthelper.StartFunc) Option { + return func(o *baseSettings) { + o.componentOptions = append(o.componentOptions, componenthelper.WithStart(start)) + } +} + +// WithShutdown overrides the default Shutdown function for an processor. +// The default shutdown function does nothing and always returns nil. +func WithShutdown(shutdown componenthelper.ShutdownFunc) Option { + return func(o *baseSettings) { + o.componentOptions = append(o.componentOptions, componenthelper.WithShutdown(shutdown)) + } +} + +// WithCapabilities overrides the default GetCapabilities function for an processor. +// The default GetCapabilities function returns mutable capabilities. +func WithCapabilities(capabilities consumer.Capabilities) Option { + return func(o *baseSettings) { + o.consumerOptions = append(o.consumerOptions, consumerhelper.WithCapabilities(capabilities)) + } +} + +type baseSettings struct { + componentOptions []componenthelper.Option + consumerOptions []consumerhelper.Option +} + +// fromOptions returns the internal settings starting from the default and applying all options. +func fromOptions(options []Option) *baseSettings { + // Start from the default options: + opts := &baseSettings{ + consumerOptions: []consumerhelper.Option{consumerhelper.WithCapabilities(consumer.Capabilities{MutatesData: true})}, + } + + for _, op := range options { + op(opts) + } + + return opts +} + +func spanAttributes(id config.ComponentID) trace.EventOption { + return trace.WithAttributes(attribute.String(obsmetrics.ProcessorKey, id.String())) +} diff --git a/internal/otel_collector/processor/processorhelper/traces.go b/internal/otel_collector/processor/processorhelper/traces.go new file mode 100644 index 00000000000..8e0bd889abc --- /dev/null +++ b/internal/otel_collector/processor/processorhelper/traces.go @@ -0,0 +1,82 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processorhelper + +import ( + "context" + "errors" + + "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/component/componenthelper" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumerhelper" + "go.opentelemetry.io/collector/model/pdata" +) + +// ProcessTracesFunc is a helper function that processes the incoming data and returns the data to be sent to the next component. +// If error is returned then returned data are ignored. It MUST not call the next component. +type ProcessTracesFunc func(context.Context, pdata.Traces) (pdata.Traces, error) + +type tracesProcessor struct { + component.Component + consumer.Traces +} + +// NewTracesProcessor creates a TracesProcessor that ensure context propagation and the right tags are set. +// TODO: Add observability metrics support +func NewTracesProcessor( + cfg config.Processor, + nextConsumer consumer.Traces, + tracesFunc ProcessTracesFunc, + options ...Option, +) (component.TracesProcessor, error) { + if tracesFunc == nil { + return nil, errors.New("nil tracesFunc") + } + + if nextConsumer == nil { + return nil, componenterror.ErrNilNextConsumer + } + + eventOptions := spanAttributes(cfg.ID()) + bs := fromOptions(options) + traceConsumer, err := consumerhelper.NewTraces(func(ctx context.Context, td pdata.Traces) error { + span := trace.SpanFromContext(ctx) + span.AddEvent("Start processing.", eventOptions) + var err error + td, err = tracesFunc(ctx, td) + span.AddEvent("End processing.", eventOptions) + if err != nil { + if errors.Is(err, ErrSkipProcessingData) { + return nil + } + return err + } + return nextConsumer.ConsumeTraces(ctx, td) + }, bs.consumerOptions...) + + if err != nil { + return nil, err + } + + return &tracesProcessor{ + Component: componenthelper.New(bs.componentOptions...), + Traces: traceConsumer, + }, nil +} diff --git a/internal/otel_collector/proto_patch.sed b/internal/otel_collector/proto_patch.sed new file mode 100644 index 00000000000..e4771945252 --- /dev/null +++ b/internal/otel_collector/proto_patch.sed @@ -0,0 +1,50 @@ +s+github.com/open-telemetry/opentelemetry-proto/gen/go/+go.opentelemetry.io/collector/model/internal/data/protogen/+g + +s+package opentelemetry.proto.\(.*\).v1;+package opentelemetry.proto.\1.v1;\ +\ +import "gogoproto/gogo.proto";+g + +s+bytes trace_id = \(.*\);+bytes trace_id = \1\ + [\ + // Use custom TraceId data type for this field.\ + (gogoproto.nullable) = false,\ + (gogoproto.customtype) = "go.opentelemetry.io/collector/model/internal/data.TraceID"\ + ];+g + +s+bytes \(.*span_id\) = \(.*\);+bytes \1 = \2\ + [\ + // Use custom SpanId data type for this field.\ + (gogoproto.nullable) = false,\ + (gogoproto.customtype) = "go.opentelemetry.io/collector/model/internal/data.SpanID"\ + ];+g + +s+repeated opentelemetry.proto.common.v1.KeyValue \(.*\);+repeated opentelemetry.proto.common.v1.KeyValue \1\ + [ (gogoproto.nullable) = false ];+g + +s+repeated KeyValue \(.*\);+repeated KeyValue \1\ + [ (gogoproto.nullable) = false ];+g + +s+AnyValue \(.*\);+AnyValue \1\ + [ (gogoproto.nullable) = false ];+g + +# this line matches StringKeyValue that are deprecated +s+repeated opentelemetry.proto.common.v1.StringKeyValue \(.*\)\];+repeated opentelemetry.proto.common.v1.StringKeyValue \1\, (gogoproto.nullable) = false ];+g + +# this line matches StringKeyValue that are not deprecated +s+repeated opentelemetry.proto.common.v1.StringKeyValue \([^]]*\);+repeated opentelemetry.proto.common.v1.StringKeyValue \1\ + [ (gogoproto.nullable) = false ];+g + +s+opentelemetry.proto.resource.v1.Resource resource = \(.*\);+opentelemetry.proto.resource.v1.Resource resource = \1\ + [ (gogoproto.nullable) = false ];+g + +s+opentelemetry.proto.common.v1.InstrumentationLibrary instrumentation_library = \(.*\);+opentelemetry.proto.common.v1.InstrumentationLibrary instrumentation_library = \1\ + [ (gogoproto.nullable) = false ];+g + +s+Status \(.*\);+Status \1\ + [ (gogoproto.nullable) = false ];+g + +s+repeated IntExemplar exemplars = \(.*\);+repeated IntExemplar exemplars = \1\ + [ (gogoproto.nullable) = false ];+g + +s+repeated Exemplar exemplars = \(.*\);+repeated Exemplar exemplars = \1\ + [ (gogoproto.nullable) = false ];+g diff --git a/internal/otel_collector/receiver/README.md b/internal/otel_collector/receiver/README.md new file mode 100644 index 00000000000..5c5555525f6 --- /dev/null +++ b/internal/otel_collector/receiver/README.md @@ -0,0 +1,75 @@ +# General Information + +A receiver is how data gets into the OpenTelemetry Collector. Generally, a +receiver accepts data in a specified format, translates it into the internal +format and passes it to [processors](../processor/README.md) and +[exporters](../exporter/README.md) defined in the applicable +pipelines. + +Available trace receivers (sorted alphabetically): + +- [OTLP Receiver](otlpreceiver/README.md) + +Available metric receivers (sorted alphabetically): + +- [OTLP Receiver](otlpreceiver/README.md) + +Available log receivers (sorted alphabetically): + +- [OTLP Receiver](otlpreceiver/README.md) + +The [contrib repository](https://github.com/open-telemetry/opentelemetry-collector-contrib) + has more receivers that can be added to custom builds of the collector. + +## Configuring Receivers + +Receivers are configured via YAML under the top-level `receivers` tag. There +must be at least one enabled receiver for a configuration to be considered +valid. + +The following is a sample configuration for the `examplereceiver`. + +```yaml +receivers: + # Receiver 1. + # : + examplereceiver: + # : + endpoint: 1.2.3.4:8080 + # ... + # Receiver 2. + # /: + examplereceiver/settings: + # : + endpoint: 0.0.0.0:9211 +``` + +A receiver instance is referenced by its full name in other parts of the config, +such as in pipelines. A full name consists of the receiver type, '/' and the +name appended to the receiver type in the configuration. All receiver full names +must be unique. + +For the example above: + +- Receiver 1 has full name `examplereceiver`. +- Receiver 2 has full name `examplereceiver/settings`. + +Receivers are enabled upon being added to a pipeline. For example: + +```yaml +service: + pipelines: + # Valid pipelines are: traces, metrics or logs + # Trace pipeline 1. + traces: + receivers: [examplereceiver, examplereceiver/settings] + processors: [] + exporters: [exampleexporter] + # Trace pipeline 2. + traces/another: + receivers: [examplereceiver, examplereceiver/settings] + processors: [] + exporters: [exampleexporter] +``` + +> At least one receiver must be enabled per pipeline to be a valid configuration. diff --git a/internal/otel_collector/receiver/doc.go b/internal/otel_collector/receiver/doc.go new file mode 100644 index 00000000000..f2e788b1241 --- /dev/null +++ b/internal/otel_collector/receiver/doc.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package receiver contains implementations of Receiver components. +// +// A receiver must be added as a default component to be included in the collector. +package receiver diff --git a/internal/otel_collector/receiver/otlpreceiver/README.md b/internal/otel_collector/receiver/otlpreceiver/README.md new file mode 100644 index 00000000000..2f75ea51de3 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/README.md @@ -0,0 +1,68 @@ +# OTLP Receiver + +Receives data via gRPC or HTTP using [OTLP]( +https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md) +format. + +Supported pipeline types: traces, metrics, logs + +:warning: OTLP metrics format is currently marked as "Alpha" and may change in +incompatible way any time. + +## Getting Started + +All that is required to enable the OTLP receiver is to include it in the +receiver definitions. A protocol can be disabled by simply not specifying it in +the list of protocols. + +```yaml +receivers: + otlp: + protocols: + grpc: + http: +``` + +The following settings are configurable: + +- `endpoint` (default = 0.0.0.0:4317 for grpc protocol, 0.0.0.0:4318 http protocol): + host:port to which the receiver is going to receive data. The valid syntax is + described at https://github.com/grpc/grpc/blob/master/doc/naming.md. + +## Advanced Configuration + +Several helper files are leveraged to provide additional capabilities automatically: + +- [gRPC settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configgrpc/README.md) including CORS +- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md) +- [Queuing, retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md) + +## Writing with HTTP/JSON + +The OTLP receiver can receive trace export calls via HTTP/JSON in addition to +gRPC. The HTTP/JSON address is the same as gRPC as the protocol is recognized +and processed accordingly. Note the format needs to be [protobuf JSON +serialization](https://developers.google.com/protocol-buffers/docs/proto3#json). + +To write traces with HTTP/JSON, `POST` to `[address]/v1/traces` for traces, +to `[address]/v1/metrics` for metrics, to `[address]/v1/logs` for logs. The default +port is `4318`. + +The HTTP/JSON endpoint can also optionally configure +[CORS](https://fetch.spec.whatwg.org/#cors-protocol), which is enabled by +specifying a list of allowed CORS origins in the `cors_allowed_origins` +and optionally headers in `cors_allowed_headers`: + +```yaml +receivers: + otlp: + protocols: + http: + endpoint: "localhost:4318" + cors_allowed_origins: + - http://test.com + # Origins can have wildcards with *, use * by itself to match any origin. + - https://*.example.com + cors_allowed_headers: + - TestHeader +``` diff --git a/internal/otel_collector/receiver/otlpreceiver/config.go b/internal/otel_collector/receiver/otlpreceiver/config.go new file mode 100644 index 00000000000..9cc17411046 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/config.go @@ -0,0 +1,84 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpreceiver + +import ( + "fmt" + + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configparser" +) + +const ( + // Protocol values. + protoGRPC = "grpc" + protoHTTP = "http" + protocolsFieldName = "protocols" +) + +// Protocols is the configuration for the supported protocols. +type Protocols struct { + GRPC *configgrpc.GRPCServerSettings `mapstructure:"grpc"` + HTTP *confighttp.HTTPServerSettings `mapstructure:"http"` +} + +// Config defines configuration for OTLP receiver. +type Config struct { + config.ReceiverSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + // Protocols is the configuration for the supported protocols, currently gRPC and HTTP (Proto and JSON). + Protocols `mapstructure:"protocols"` +} + +var _ config.Receiver = (*Config)(nil) +var _ config.Unmarshallable = (*Config)(nil) + +// Validate checks the receiver configuration is valid +func (cfg *Config) Validate() error { + if cfg.GRPC == nil && + cfg.HTTP == nil { + return fmt.Errorf("must specify at least one protocol when using the OTLP receiver") + } + return nil +} + +// Unmarshal a config.Parser into the config struct. +func (cfg *Config) Unmarshal(componentParser *configparser.Parser) error { + if componentParser == nil || len(componentParser.AllKeys()) == 0 { + return fmt.Errorf("empty config for OTLP receiver") + } + // first load the config normally + err := componentParser.UnmarshalExact(cfg) + if err != nil { + return err + } + + // next manually search for protocols in the configparser.Parser, if a protocol is not present it means it is disable. + protocols, err := componentParser.Sub(protocolsFieldName) + if err != nil { + return err + } + + if !protocols.IsSet(protoGRPC) { + cfg.GRPC = nil + } + + if !protocols.IsSet(protoHTTP) { + cfg.HTTP = nil + } + + return nil +} diff --git a/internal/otel_collector/receiver/otlpreceiver/config.md b/internal/otel_collector/receiver/otlpreceiver/config.md new file mode 100644 index 00000000000..0e30e5e2521 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/config.md @@ -0,0 +1,91 @@ +# "otlp" Receiver Reference + +Config defines configuration for OTLP receiver. + + +### Config + +| Name | Type | Default | Docs | +| ---- | ---- | ------- | ---- | +| protocols |[otlpreceiver-Protocols](#otlpreceiver-Protocols)| | Protocols is the configuration for the supported protocols, currently gRPC and HTTP (Proto and JSON). | + +### otlpreceiver-Protocols + +| Name | Type | Default | Docs | +| ---- | ---- | ------- | ---- | +| grpc |[configgrpc-GRPCServerSettings](#configgrpc-GRPCServerSettings)| | GRPCServerSettings defines common settings for a gRPC server configuration. | +| http |[confighttp-HTTPServerSettings](#confighttp-HTTPServerSettings)| | HTTPServerSettings defines settings for creating an HTTP server. | + +### configgrpc-GRPCServerSettings + +| Name | Type | Default | Docs | +| ---- | ---- | ------- | ---- | +| endpoint |string| 0.0.0.0:4317 | Endpoint configures the address for this network connection. For TCP and UDP networks, the address has the form "host:port". The host must be a literal IP address, or a host name that can be resolved to IP addresses. The port must be a literal port number or a service name. If the host is a literal IPv6 address it must be enclosed in square brackets, as in "[2001:db8::1]:80" or "[fe80::1%zone]:80". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007. | +| transport |string| tcp | Transport to use. Known protocols are "tcp", "tcp4" (IPv4-only), "tcp6" (IPv6-only), "udp", "udp4" (IPv4-only), "udp6" (IPv6-only), "ip", "ip4" (IPv4-only), "ip6" (IPv6-only), "unix", "unixgram" and "unixpacket". | +| tls_settings |[configtls-TLSServerSetting](#configtls-TLSServerSetting)| | Configures the protocol to use TLS. The default value is nil, which will cause the protocol to not use TLS. | +| max_recv_msg_size_mib |uint64| | MaxRecvMsgSizeMiB sets the maximum size (in MiB) of messages accepted by the server. | +| max_concurrent_streams |uint32| | MaxConcurrentStreams sets the limit on the number of concurrent streams to each ServerTransport. It has effect only for streaming RPCs. | +| read_buffer_size |int| 524288 | ReadBufferSize for gRPC server. See grpc.ReadBufferSize (https://godoc.org/google.golang.org/grpc#ReadBufferSize). | +| write_buffer_size |int| | WriteBufferSize for gRPC server. See grpc.WriteBufferSize (https://godoc.org/google.golang.org/grpc#WriteBufferSize). | +| keepalive |[configgrpc-KeepaliveServerConfig](#configgrpc-KeepaliveServerConfig)| | Keepalive anchor for all the settings related to keepalive. | +| auth |[configauth-Authentication](#configauth-Authentication)| | Auth for this receiver | + +### configtls-TLSServerSetting + +| Name | Type | Default | Docs | +| ---- | ---- | ------- | ---- | +| ca_file |string| | Path to the CA cert. For a client this verifies the server certificate. For a server this verifies client certificates. If empty uses system root CA. (optional) | +| cert_file |string| | Path to the TLS cert to use for TLS required connections. (optional) | +| key_file |string| | Path to the TLS key to use for TLS required connections. (optional) | +| client_ca_file |string| | Path to the TLS cert to use by the server to verify a client certificate. (optional) This sets the ClientCAs and ClientAuth to RequireAndVerifyClientCert in the TLSConfig. Please refer to https://godoc.org/crypto/tls#Config for more information. (optional) | + +### configgrpc-KeepaliveServerConfig + +| Name | Type | Default | Docs | +| ---- | ---- | ------- | ---- | +| server_parameters |[configgrpc-KeepaliveServerParameters](#configgrpc-KeepaliveServerParameters)| | KeepaliveServerParameters allow configuration of the keepalive.ServerParameters. The same default values as keepalive.ServerParameters are applicable and get applied by the server. See https://godoc.org/google.golang.org/grpc/keepalive#ServerParameters for details. | +| enforcement_policy |[configgrpc-KeepaliveEnforcementPolicy](#configgrpc-KeepaliveEnforcementPolicy)| | KeepaliveEnforcementPolicy allow configuration of the keepalive.EnforcementPolicy. The same default values as keepalive.EnforcementPolicy are applicable and get applied by the server. See https://godoc.org/google.golang.org/grpc/keepalive#EnforcementPolicy for details. | + +### configgrpc-KeepaliveServerParameters + +| Name | Type | Default | Docs | +| ---- | ---- | ------- | ---- | +| max_connection_idle |[time-Duration](#time-Duration)| | | +| max_connection_age |[time-Duration](#time-Duration)| | | +| max_connection_age_grace |[time-Duration](#time-Duration)| | | +| time |[time-Duration](#time-Duration)| | | +| timeout |[time-Duration](#time-Duration)| | | + +### configgrpc-KeepaliveEnforcementPolicy + +| Name | Type | Default | Docs | +| ---- | ---- | ------- | ---- | +| min_time |[time-Duration](#time-Duration)| | | +| permit_without_stream |bool| | | + +### configauth-Authentication + +| Name | Type | Default | Docs | +| ---- | ---- | ------- | ---- | +| authenticator |string| | AuthenticatorName specifies the name of the extension to use in order to authenticate the incoming data point. | + +### confighttp-HTTPServerSettings + +| Name | Type | Default | Docs | +| ---- | ---- | ------- | ---- | +| endpoint |string| 0.0.0.0:4318 | Endpoint configures the listening address for the server. | +| tls_settings |[configtls-TLSServerSetting](#configtls-TLSServerSetting)| | TLSSetting struct exposes TLS client configuration. | +| cors_allowed_origins |[]string| | CorsOrigins are the allowed CORS origins for HTTP/JSON requests to grpc-gateway adapter for the OTLP receiver. See github.com/rs/cors An empty list means that CORS is not enabled at all. A wildcard (*) can be used to match any origin or one or more characters of an origin. | +| cors_allowed_headers |[]string| | CorsHeaders are the allowed CORS headers for HTTP/JSON requests to grpc-gateway adapter for the OTLP receiver. See github.com/rs/cors CORS needs to be enabled first by providing a non-empty list in CorsOrigins A wildcard (*) can be used to match any header. | + +### configtls-TLSServerSetting + +| Name | Type | Default | Docs | +| ---- | ---- | ------- | ---- | +| ca_file |string| | Path to the CA cert. For a client this verifies the server certificate. For a server this verifies client certificates. If empty uses system root CA. (optional) | +| cert_file |string| | Path to the TLS cert to use for TLS required connections. (optional) | +| key_file |string| | Path to the TLS key to use for TLS required connections. (optional) | +| client_ca_file |string| | Path to the TLS cert to use by the server to verify a client certificate. (optional) This sets the ClientCAs and ClientAuth to RequireAndVerifyClientCert in the TLSConfig. Please refer to https://godoc.org/crypto/tls#Config for more information. (optional) | + +### time-Duration +An optionally signed sequence of decimal numbers, each with a unit suffix, such as `300ms`, `-1.5h`, or `2h45m`. Valid time units are `ns`, `us`, `ms`, `s`, `m`, `h`. \ No newline at end of file diff --git a/internal/otel_collector/receiver/otlpreceiver/doc.go b/internal/otel_collector/receiver/otlpreceiver/doc.go new file mode 100644 index 00000000000..08660f683e6 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package otlpreceiver receives data in OTLP format. +package otlpreceiver diff --git a/internal/otel_collector/receiver/otlpreceiver/factory.go b/internal/otel_collector/receiver/otlpreceiver/factory.go new file mode 100644 index 00000000000..e2ee26c1e4e --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/factory.go @@ -0,0 +1,126 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpreceiver + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/internal/sharedcomponent" + "go.opentelemetry.io/collector/receiver/receiverhelper" +) + +const ( + typeStr = "otlp" + + defaultGRPCEndpoint = "0.0.0.0:4317" + defaultHTTPEndpoint = "0.0.0.0:4318" + legacyGRPCEndpoint = "0.0.0.0:55680" + legacyHTTPEndpoint = "0.0.0.0:55681" +) + +// NewFactory creates a new OTLP receiver factory. +func NewFactory() component.ReceiverFactory { + return receiverhelper.NewFactory( + typeStr, + createDefaultConfig, + receiverhelper.WithTraces(createTracesReceiver), + receiverhelper.WithMetrics(createMetricsReceiver), + receiverhelper.WithLogs(createLogReceiver)) +} + +// createDefaultConfig creates the default configuration for receiver. +func createDefaultConfig() config.Receiver { + return &Config{ + ReceiverSettings: config.NewReceiverSettings(config.NewID(typeStr)), + Protocols: Protocols{ + GRPC: &configgrpc.GRPCServerSettings{ + NetAddr: confignet.NetAddr{ + Endpoint: defaultGRPCEndpoint, + Transport: "tcp", + }, + // We almost write 0 bytes, so no need to tune WriteBufferSize. + ReadBufferSize: 512 * 1024, + }, + HTTP: &confighttp.HTTPServerSettings{ + Endpoint: defaultHTTPEndpoint, + }, + }, + } +} + +// CreateTracesReceiver creates a trace receiver based on provided config. +func createTracesReceiver( + _ context.Context, + set component.ReceiverCreateSettings, + cfg config.Receiver, + nextConsumer consumer.Traces, +) (component.TracesReceiver, error) { + r := receivers.GetOrAdd(cfg, func() component.Component { + return newOtlpReceiver(cfg.(*Config), set.Logger) + }) + + if err := r.Unwrap().(*otlpReceiver).registerTraceConsumer(nextConsumer); err != nil { + return nil, err + } + return r, nil +} + +// CreateMetricsReceiver creates a metrics receiver based on provided config. +func createMetricsReceiver( + _ context.Context, + set component.ReceiverCreateSettings, + cfg config.Receiver, + consumer consumer.Metrics, +) (component.MetricsReceiver, error) { + r := receivers.GetOrAdd(cfg, func() component.Component { + return newOtlpReceiver(cfg.(*Config), set.Logger) + }) + + if err := r.Unwrap().(*otlpReceiver).registerMetricsConsumer(consumer); err != nil { + return nil, err + } + return r, nil +} + +// CreateLogReceiver creates a log receiver based on provided config. +func createLogReceiver( + _ context.Context, + set component.ReceiverCreateSettings, + cfg config.Receiver, + consumer consumer.Logs, +) (component.LogsReceiver, error) { + r := receivers.GetOrAdd(cfg, func() component.Component { + return newOtlpReceiver(cfg.(*Config), set.Logger) + }) + + if err := r.Unwrap().(*otlpReceiver).registerLogsConsumer(consumer); err != nil { + return nil, err + } + return r, nil +} + +// This is the map of already created OTLP receivers for particular configurations. +// We maintain this map because the Factory is asked trace and metric receivers separately +// when it gets CreateTracesReceiver() and CreateMetricsReceiver() but they must not +// create separate objects, they must use one otlpReceiver object per configuration. +// When the receiver is shutdown it should be removed from this map so the same configuration +// can be recreated successfully. +var receivers = sharedcomponent.NewSharedComponents() diff --git a/internal/otel_collector/receiver/otlpreceiver/internal/logs/otlp.go b/internal/otel_collector/receiver/otlpreceiver/internal/logs/otlp.go new file mode 100644 index 00000000000..a9bfadf9d80 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/internal/logs/otlp.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logs + +import ( + "context" + + "go.opentelemetry.io/collector/client" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/model/otlpgrpc" + "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/obsreport" +) + +const ( + dataFormatProtobuf = "protobuf" + receiverTransport = "grpc" +) + +// Receiver is the type used to handle spans from OpenTelemetry exporters. +type Receiver struct { + nextConsumer consumer.Logs + obsrecv *obsreport.Receiver +} + +// New creates a new Receiver reference. +func New(id config.ComponentID, nextConsumer consumer.Logs) *Receiver { + return &Receiver{ + nextConsumer: nextConsumer, + obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: id, Transport: receiverTransport}), + } +} + +// Export implements the service Export logs func. +func (r *Receiver) Export(ctx context.Context, ld pdata.Logs) (otlpgrpc.LogsResponse, error) { + numSpans := ld.LogRecordCount() + if numSpans == 0 { + return otlpgrpc.NewLogsResponse(), nil + } + + if c, ok := client.FromGRPC(ctx); ok { + ctx = client.NewContext(ctx, c) + } + + ctx = r.obsrecv.StartLogsOp(ctx) + err := r.nextConsumer.ConsumeLogs(ctx, ld) + r.obsrecv.EndLogsOp(ctx, dataFormatProtobuf, numSpans, err) + + return otlpgrpc.NewLogsResponse(), err +} diff --git a/internal/otel_collector/receiver/otlpreceiver/internal/metrics/otlp.go b/internal/otel_collector/receiver/otlpreceiver/internal/metrics/otlp.go new file mode 100644 index 00000000000..3b993ac74d2 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/internal/metrics/otlp.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "context" + + "go.opentelemetry.io/collector/client" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/model/otlpgrpc" + "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/obsreport" +) + +const ( + dataFormatProtobuf = "protobuf" + receiverTransport = "grpc" +) + +// Receiver is the type used to handle metrics from OpenTelemetry exporters. +type Receiver struct { + nextConsumer consumer.Metrics + obsrecv *obsreport.Receiver +} + +// New creates a new Receiver reference. +func New(id config.ComponentID, nextConsumer consumer.Metrics) *Receiver { + return &Receiver{ + nextConsumer: nextConsumer, + obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: id, Transport: receiverTransport}), + } +} + +// Export implements the service Export metrics func. +func (r *Receiver) Export(ctx context.Context, md pdata.Metrics) (otlpgrpc.MetricsResponse, error) { + dataPointCount := md.DataPointCount() + if dataPointCount == 0 { + return otlpgrpc.NewMetricsResponse(), nil + } + + if c, ok := client.FromGRPC(ctx); ok { + ctx = client.NewContext(ctx, c) + } + + ctx = r.obsrecv.StartMetricsOp(ctx) + err := r.nextConsumer.ConsumeMetrics(ctx, md) + r.obsrecv.EndMetricsOp(ctx, dataFormatProtobuf, dataPointCount, err) + + return otlpgrpc.NewMetricsResponse(), err +} diff --git a/internal/otel_collector/receiver/otlpreceiver/internal/trace/otlp.go b/internal/otel_collector/receiver/otlpreceiver/internal/trace/otlp.go new file mode 100644 index 00000000000..f4e330720a9 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/internal/trace/otlp.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "context" + + "go.opentelemetry.io/collector/client" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/model/otlpgrpc" + "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/obsreport" +) + +const ( + dataFormatProtobuf = "protobuf" + receiverTransport = "grpc" +) + +// Receiver is the type used to handle spans from OpenTelemetry exporters. +type Receiver struct { + nextConsumer consumer.Traces + obsrecv *obsreport.Receiver +} + +// New creates a new Receiver reference. +func New(id config.ComponentID, nextConsumer consumer.Traces) *Receiver { + return &Receiver{ + nextConsumer: nextConsumer, + obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: id, Transport: receiverTransport}), + } +} + +// Export implements the service Export traces func. +func (r *Receiver) Export(ctx context.Context, td pdata.Traces) (otlpgrpc.TracesResponse, error) { + // We need to ensure that it propagates the receiver name as a tag + numSpans := td.SpanCount() + if numSpans == 0 { + return otlpgrpc.NewTracesResponse(), nil + } + + if c, ok := client.FromGRPC(ctx); ok { + ctx = client.NewContext(ctx, c) + } + + ctx = r.obsrecv.StartTracesOp(ctx) + err := r.nextConsumer.ConsumeTraces(ctx, td) + r.obsrecv.EndTracesOp(ctx, dataFormatProtobuf, numSpans, err) + + return otlpgrpc.NewTracesResponse(), err +} diff --git a/internal/otel_collector/receiver/otlpreceiver/mixin.go b/internal/otel_collector/receiver/otlpreceiver/mixin.go new file mode 100644 index 00000000000..e8602b95b1b --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/mixin.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpreceiver + +import ( + "context" + + "google.golang.org/grpc" + + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/model/otlpgrpc" + "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs" + "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics" + "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace" +) + +// RegisterTraceReceiver registers the trace receiver with a gRPC server. +func RegisterTraceReceiver(ctx context.Context, consumer consumer.Traces, serverGRPC *grpc.Server) error { + receiver := trace.New(config.NewID("otlp"), consumer) + otlpgrpc.RegisterTracesServer(serverGRPC, receiver) + return nil +} + +// RegisterMetricsReceiver registers the metrics receiver with a gRPC server. +func RegisterMetricsReceiver(ctx context.Context, consumer consumer.Metrics, serverGRPC *grpc.Server) error { + receiver := metrics.New(config.NewID("otlp"), consumer) + otlpgrpc.RegisterMetricsServer(serverGRPC, receiver) + return nil +} + +// RegisterLogsReceiver registers the logs receiver with a gRPC server. +func RegisterLogsReceiver(ctx context.Context, consumer consumer.Logs, serverGRPC *grpc.Server) error { + receiver := logs.New(config.NewID("otlp"), consumer) + otlpgrpc.RegisterLogsServer(serverGRPC, receiver) + return nil +} diff --git a/internal/otel_collector/receiver/otlpreceiver/otlp.go b/internal/otel_collector/receiver/otlpreceiver/otlp.go new file mode 100644 index 00000000000..a43f4381c00 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/otlp.go @@ -0,0 +1,260 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpreceiver + +import ( + "context" + "net" + "net/http" + "sync" + + "github.com/gorilla/mux" + "go.uber.org/zap" + "google.golang.org/grpc" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/model/otlp" + "go.opentelemetry.io/collector/model/otlpgrpc" + "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs" + "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics" + "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace" +) + +const ( + pbContentType = "application/x-protobuf" + jsonContentType = "application/json" +) + +// otlpReceiver is the type that exposes Trace and Metrics reception. +type otlpReceiver struct { + cfg *Config + serverGRPC *grpc.Server + httpMux *mux.Router + serverHTTP *http.Server + + traceReceiver *trace.Receiver + metricsReceiver *metrics.Receiver + logReceiver *logs.Receiver + shutdownWG sync.WaitGroup + + logger *zap.Logger +} + +// newOtlpReceiver just creates the OpenTelemetry receiver services. It is the caller's +// responsibility to invoke the respective Start*Reception methods as well +// as the various Stop*Reception methods to end it. +func newOtlpReceiver(cfg *Config, logger *zap.Logger) *otlpReceiver { + r := &otlpReceiver{ + cfg: cfg, + logger: logger, + } + if cfg.HTTP != nil { + r.httpMux = mux.NewRouter() + } + + return r +} + +func (r *otlpReceiver) startGRPCServer(cfg *configgrpc.GRPCServerSettings, host component.Host) error { + r.logger.Info("Starting GRPC server on endpoint " + cfg.NetAddr.Endpoint) + + gln, err := cfg.ToListener() + if err != nil { + return err + } + r.shutdownWG.Add(1) + go func() { + defer r.shutdownWG.Done() + + if errGrpc := r.serverGRPC.Serve(gln); errGrpc != nil && errGrpc != grpc.ErrServerStopped { + host.ReportFatalError(errGrpc) + } + }() + return nil +} + +func (r *otlpReceiver) startHTTPServer(cfg *confighttp.HTTPServerSettings, host component.Host) error { + r.logger.Info("Starting HTTP server on endpoint " + cfg.Endpoint) + var hln net.Listener + hln, err := r.cfg.HTTP.ToListener() + if err != nil { + return err + } + r.shutdownWG.Add(1) + go func() { + defer r.shutdownWG.Done() + + if errHTTP := r.serverHTTP.Serve(hln); errHTTP != http.ErrServerClosed { + host.ReportFatalError(errHTTP) + } + }() + return nil +} + +func (r *otlpReceiver) startProtocolServers(host component.Host) error { + var err error + if r.cfg.GRPC != nil { + var opts []grpc.ServerOption + opts, err = r.cfg.GRPC.ToServerOption(host.GetExtensions()) + if err != nil { + return err + } + r.serverGRPC = grpc.NewServer(opts...) + + if r.traceReceiver != nil { + otlpgrpc.RegisterTracesServer(r.serverGRPC, r.traceReceiver) + } + + if r.metricsReceiver != nil { + otlpgrpc.RegisterMetricsServer(r.serverGRPC, r.metricsReceiver) + } + + if r.logReceiver != nil { + otlpgrpc.RegisterLogsServer(r.serverGRPC, r.logReceiver) + } + + err = r.startGRPCServer(r.cfg.GRPC, host) + if err != nil { + return err + } + if r.cfg.GRPC.NetAddr.Endpoint == defaultGRPCEndpoint { + r.logger.Info("Setting up a second GRPC listener on legacy endpoint " + legacyGRPCEndpoint) + + // Copy the config. + cfgLegacyGRPC := r.cfg.GRPC + // And use the legacy endpoint. + cfgLegacyGRPC.NetAddr.Endpoint = legacyGRPCEndpoint + err = r.startGRPCServer(cfgLegacyGRPC, host) + if err != nil { + return err + } + } + } + if r.cfg.HTTP != nil { + r.serverHTTP = r.cfg.HTTP.ToServer( + r.httpMux, + confighttp.WithErrorHandler(errorHandler), + ) + err = r.startHTTPServer(r.cfg.HTTP, host) + if err != nil { + return err + } + if r.cfg.HTTP.Endpoint == defaultHTTPEndpoint { + r.logger.Info("Setting up a second HTTP listener on legacy endpoint " + legacyHTTPEndpoint) + + // Copy the config. + cfgLegacyHTTP := r.cfg.HTTP + // And use the legacy endpoint. + cfgLegacyHTTP.Endpoint = legacyHTTPEndpoint + err = r.startHTTPServer(cfgLegacyHTTP, host) + if err != nil { + return err + } + } + } + + return err +} + +// Start runs the trace receiver on the gRPC server. Currently +// it also enables the metrics receiver too. +func (r *otlpReceiver) Start(_ context.Context, host component.Host) error { + return r.startProtocolServers(host) +} + +// Shutdown is a method to turn off receiving. +func (r *otlpReceiver) Shutdown(ctx context.Context) error { + var err error + + if r.serverHTTP != nil { + err = r.serverHTTP.Shutdown(ctx) + } + + if r.serverGRPC != nil { + r.serverGRPC.GracefulStop() + } + + r.shutdownWG.Wait() + return err +} + +var tracesPbUnmarshaler = otlp.NewProtobufTracesUnmarshaler() +var tracesJSONUnmarshaler = otlp.NewJSONTracesUnmarshaler() + +func (r *otlpReceiver) registerTraceConsumer(tc consumer.Traces) error { + if tc == nil { + return componenterror.ErrNilNextConsumer + } + r.traceReceiver = trace.New(r.cfg.ID(), tc) + if r.httpMux != nil { + r.httpMux.HandleFunc("/v1/traces", func(resp http.ResponseWriter, req *http.Request) { + handleTraces(resp, req, pbContentType, r.traceReceiver, tracesPbUnmarshaler) + }).Methods(http.MethodPost).Headers("Content-Type", pbContentType) + // For backwards compatibility see https://github.com/open-telemetry/opentelemetry-collector/issues/1968 + r.httpMux.HandleFunc("/v1/trace", func(resp http.ResponseWriter, req *http.Request) { + handleTraces(resp, req, pbContentType, r.traceReceiver, tracesPbUnmarshaler) + }).Methods(http.MethodPost).Headers("Content-Type", pbContentType) + r.httpMux.HandleFunc("/v1/traces", func(resp http.ResponseWriter, req *http.Request) { + handleTraces(resp, req, jsonContentType, r.traceReceiver, tracesJSONUnmarshaler) + }).Methods(http.MethodPost).Headers("Content-Type", jsonContentType) + // For backwards compatibility see https://github.com/open-telemetry/opentelemetry-collector/issues/1968 + r.httpMux.HandleFunc("/v1/trace", func(resp http.ResponseWriter, req *http.Request) { + handleTraces(resp, req, jsonContentType, r.traceReceiver, tracesJSONUnmarshaler) + }).Methods(http.MethodPost).Headers("Content-Type", jsonContentType) + } + return nil +} + +var metricsPbUnmarshaler = otlp.NewProtobufMetricsUnmarshaler() +var metricsJSONUnmarshaler = otlp.NewJSONMetricsUnmarshaler() + +func (r *otlpReceiver) registerMetricsConsumer(mc consumer.Metrics) error { + if mc == nil { + return componenterror.ErrNilNextConsumer + } + r.metricsReceiver = metrics.New(r.cfg.ID(), mc) + if r.httpMux != nil { + r.httpMux.HandleFunc("/v1/metrics", func(resp http.ResponseWriter, req *http.Request) { + handleMetrics(resp, req, pbContentType, r.metricsReceiver, metricsPbUnmarshaler) + }).Methods(http.MethodPost).Headers("Content-Type", pbContentType) + r.httpMux.HandleFunc("/v1/metrics", func(resp http.ResponseWriter, req *http.Request) { + handleMetrics(resp, req, jsonContentType, r.metricsReceiver, metricsJSONUnmarshaler) + }).Methods(http.MethodPost).Headers("Content-Type", jsonContentType) + } + return nil +} + +var logsPbUnmarshaler = otlp.NewProtobufLogsUnmarshaler() +var logsJSONUnmarshaler = otlp.NewJSONLogsUnmarshaler() + +func (r *otlpReceiver) registerLogsConsumer(lc consumer.Logs) error { + if lc == nil { + return componenterror.ErrNilNextConsumer + } + r.logReceiver = logs.New(r.cfg.ID(), lc) + if r.httpMux != nil { + r.httpMux.HandleFunc("/v1/logs", func(w http.ResponseWriter, req *http.Request) { + handleLogs(w, req, pbContentType, r.logReceiver, logsPbUnmarshaler) + }).Methods(http.MethodPost).Headers("Content-Type", pbContentType) + r.httpMux.HandleFunc("/v1/logs", func(w http.ResponseWriter, req *http.Request) { + handleLogs(w, req, jsonContentType, r.logReceiver, logsJSONUnmarshaler) + }).Methods(http.MethodPost).Headers("Content-Type", jsonContentType) + } + return nil +} diff --git a/internal/otel_collector/receiver/otlpreceiver/otlphttp.go b/internal/otel_collector/receiver/otlpreceiver/otlphttp.go new file mode 100644 index 00000000000..16ad9b25d26 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/otlphttp.go @@ -0,0 +1,184 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpreceiver + +import ( + "bytes" + "io/ioutil" + "net/http" + + "github.com/gogo/protobuf/jsonpb" + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs" + "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics" + "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace" +) + +var jsonMarshaler = &jsonpb.Marshaler{} + +func handleTraces( + resp http.ResponseWriter, + req *http.Request, + contentType string, + tracesReceiver *trace.Receiver, + tracesUnmarshaler pdata.TracesUnmarshaler) { + body, ok := readAndCloseBody(resp, req, contentType) + if !ok { + return + } + + td, err := tracesUnmarshaler.UnmarshalTraces(body) + if err != nil { + writeError(resp, contentType, err, http.StatusBadRequest) + return + } + + _, err = tracesReceiver.Export(req.Context(), td) + if err != nil { + writeError(resp, contentType, err, http.StatusInternalServerError) + return + } + + // TODO: Pass response from grpc handler when otlpgrpc returns concrete type. + writeResponse(resp, contentType, http.StatusOK, &types.Empty{}) +} + +func handleMetrics( + resp http.ResponseWriter, + req *http.Request, + contentType string, + metricsReceiver *metrics.Receiver, + metricsUnmarshaler pdata.MetricsUnmarshaler) { + body, ok := readAndCloseBody(resp, req, contentType) + if !ok { + return + } + + md, err := metricsUnmarshaler.UnmarshalMetrics(body) + if err != nil { + writeError(resp, contentType, err, http.StatusBadRequest) + return + } + + _, err = metricsReceiver.Export(req.Context(), md) + if err != nil { + writeError(resp, contentType, err, http.StatusInternalServerError) + return + } + + // TODO: Pass response from grpc handler when otlpgrpc returns concrete type. + writeResponse(resp, contentType, http.StatusOK, &types.Empty{}) +} + +func handleLogs( + resp http.ResponseWriter, + req *http.Request, + contentType string, + logsReceiver *logs.Receiver, + logsUnmarshaler pdata.LogsUnmarshaler) { + body, ok := readAndCloseBody(resp, req, contentType) + if !ok { + return + } + + ld, err := logsUnmarshaler.UnmarshalLogs(body) + if err != nil { + writeError(resp, contentType, err, http.StatusBadRequest) + return + } + + _, err = logsReceiver.Export(req.Context(), ld) + if err != nil { + writeError(resp, contentType, err, http.StatusInternalServerError) + return + } + + // TODO: Pass response from grpc handler when otlpgrpc returns concrete type. + writeResponse(resp, contentType, http.StatusOK, &types.Empty{}) +} + +func readAndCloseBody(resp http.ResponseWriter, req *http.Request, contentType string) ([]byte, bool) { + body, err := ioutil.ReadAll(req.Body) + if err != nil { + writeError(resp, contentType, err, http.StatusBadRequest) + return nil, false + } + if err = req.Body.Close(); err != nil { + writeError(resp, contentType, err, http.StatusBadRequest) + return nil, false + } + return body, true +} + +// writeError encodes the HTTP error inside a rpc.Status message as required by the OTLP protocol. +func writeError(w http.ResponseWriter, contentType string, err error, statusCode int) { + s, ok := status.FromError(err) + if ok { + writeResponse(w, contentType, statusCode, s.Proto()) + } else { + writeErrorMsg(w, contentType, err.Error(), statusCode) + } +} + +// writeErrorMsg encodes the HTTP error message inside a rpc.Status message as required +// by the OTLP protocol. +func writeErrorMsg(w http.ResponseWriter, contentType string, errMsg string, statusCode int) { + var s *status.Status + if statusCode == http.StatusBadRequest { + s = status.New(codes.InvalidArgument, errMsg) + } else { + s = status.New(codes.Unknown, errMsg) + } + + writeResponse(w, contentType, statusCode, s.Proto()) +} + +// errorHandler encodes the HTTP error message inside a rpc.Status message as required +// by the OTLP protocol. +func errorHandler(w http.ResponseWriter, r *http.Request, errMsg string, statusCode int) { + writeErrorMsg(w, r.Header.Get("Content-Type"), errMsg, statusCode) +} + +// Pre-computed status with code=Internal to be used in case of a marshaling error. +var fallbackMsg = []byte(`{"code": 13, "message": "failed to marshal error message"}`) + +const fallbackContentType = "application/json" + +func writeResponse(w http.ResponseWriter, contentType string, statusCode int, rsp proto.Message) { + var err error + var msg []byte + if contentType == "application/json" { + buf := new(bytes.Buffer) + err = jsonMarshaler.Marshal(buf, rsp) + msg = buf.Bytes() + } else { + msg, err = proto.Marshal(rsp) + } + + if err != nil { + msg = fallbackMsg + contentType = fallbackContentType + statusCode = http.StatusInternalServerError + } + w.Header().Set("Content-Type", contentType) + w.WriteHeader(statusCode) + // Nothing we can do with the error if we cannot write to the response. + _, _ = w.Write(msg) +} diff --git a/internal/otel_collector/receiver/otlpreceiver/testdata/bad_empty_config.yaml b/internal/otel_collector/receiver/otlpreceiver/testdata/bad_empty_config.yaml new file mode 100644 index 00000000000..a204b9998ed --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/testdata/bad_empty_config.yaml @@ -0,0 +1,15 @@ +receivers: + otlp: + +processors: + nop: + +exporters: + nop: + +service: + pipelines: + traces: + receivers: [otlp] + processors: [nop] + exporters: [nop] diff --git a/internal/otel_collector/receiver/otlpreceiver/testdata/bad_no_proto_config.yaml b/internal/otel_collector/receiver/otlpreceiver/testdata/bad_no_proto_config.yaml new file mode 100644 index 00000000000..cd1ffd31a35 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/testdata/bad_no_proto_config.yaml @@ -0,0 +1,16 @@ +receivers: + otlp: + protocols: + +processors: + nop: + +exporters: + nop: + +service: + pipelines: + traces: + receivers: [otlp] + processors: [nop] + exporters: [nop] diff --git a/internal/otel_collector/receiver/otlpreceiver/testdata/bad_proto_config.yaml b/internal/otel_collector/receiver/otlpreceiver/testdata/bad_proto_config.yaml new file mode 100644 index 00000000000..2191f7bb7ad --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/testdata/bad_proto_config.yaml @@ -0,0 +1,18 @@ +receivers: + otlp: + protocols: + thrift: + endpoint: "127.0.0.1:1234" + +processors: + nop: + +exporters: + nop: + +service: + pipelines: + traces: + receivers: [otlp] + processors: [nop] + exporters: [nop] diff --git a/internal/otel_collector/receiver/otlpreceiver/testdata/config.yaml b/internal/otel_collector/receiver/otlpreceiver/testdata/config.yaml new file mode 100644 index 00000000000..7e9a8ab94e0 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/testdata/config.yaml @@ -0,0 +1,104 @@ +receivers: + # The following entry initializes the default OTLP receiver. + # The full name of this receiver is `otlp` and can be referenced in pipelines by 'otlp'. + otlp: + protocols: + grpc: + http: + # The following entry initializes the default OTLP receiver with only gRPC support. + otlp/only_grpc: + protocols: + grpc: + # The following entry initializes the default OTLP receiver with only http support. + otlp/only_http: + protocols: + http: + # The following entry demonstrates configuring the common receiver settings: + # - endpoint + # This configuration is of type 'otlp' and has the name 'customname' with a full name of 'otlp/customname' + # ('/'. To reference this configuration in a pipeline, use the full name `otlp/customname`. + otlp/customname: + protocols: + grpc: + # The receiver will listen on endpoint: "localhost:9090". + endpoint: localhost:9090 + # The following entry configures all of the keep alive settings. These settings are used to configure the receiver. + otlp/keepalive: + protocols: + grpc: + keepalive: + server_parameters: + max_connection_idle: 11s + max_connection_age: 12s + max_connection_age_grace: 13s + time: 30s + timeout: 5s + enforcement_policy: + min_time: 10s + permit_without_stream: true + # The following demonstrates how to set maximum limits on stream, message size and connection idle time. + # Note: The test yaml has demonstrated configuration on a grouped by their structure; however, all of the settings can + # be mix and matched like adding the maximum connection idle setting in this example. + otlp/msg-size-conc-connect-max-idle: + protocols: + grpc: + max_recv_msg_size_mib: 32 + max_concurrent_streams: 16 + read_buffer_size: 1024 + write_buffer_size: 1024 + keepalive: + server_parameters: + max_connection_idle: 10s + # The following entry demonstrates how to specify TLS credentials for the server. + # Note: These files do not exist. If the receiver is started with this configuration, it will fail. + otlp/tlscredentials: + protocols: + grpc: + tls_settings: + cert_file: test.crt + key_file: test.key + http: + tls_settings: + cert_file: test.crt + key_file: test.key + # The following entry demonstrates how to specify a Unix Domain Socket for the server. + otlp/uds: + protocols: + grpc: + transport: unix + endpoint: /tmp/grpc_otlp.sock + http: + # transport: unix + endpoint: /tmp/http_otlp.sock + # The following entry demonstrates how to configure the OTLP receiver to allow Cross-Origin Resource Sharing (CORS). + # Both fully qualified domain names and the use of wildcards are supported. + otlp/cors: + protocols: + http: + cors_allowed_origins: + - https://*.test.com # Wildcard subdomain. Allows domains like https://www.test.com and https://foo.test.com but not https://wwwtest.com. + - https://test.com # Fully qualified domain name. Allows https://test.com only. + # The following entry demonstrates how to use CORS Header configuration. + otlp/corsheader: + protocols: + http: + cors_allowed_origins: + - https://*.test.com # Wildcard subdomain. Allows domains like https://www.test.com and https://foo.test.com but not https://wwwtest.com. + - https://test.com # Fully qualified domain name. Allows https://test.com only. + cors_allowed_headers: + - ExampleHeader +processors: + nop: + +exporters: + nop: + +service: + pipelines: + traces: + receivers: [otlp/customname] + processors: [nop] + exporters: [nop] + metrics: + receivers: [otlp] + exporters: [nop] diff --git a/internal/otel_collector/receiver/otlpreceiver/testdata/typo_default_proto_config.yaml b/internal/otel_collector/receiver/otlpreceiver/testdata/typo_default_proto_config.yaml new file mode 100644 index 00000000000..841eac51f55 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/testdata/typo_default_proto_config.yaml @@ -0,0 +1,18 @@ +receivers: + otlp: + protocols: + grpc: + htttp: + +processors: + nop: + +exporters: + nop: + +service: + pipelines: + traces: + receivers: [otlp] + processors: [nop] + exporters: [nop] diff --git a/internal/otel_collector/receiver/receiverhelper/doc.go b/internal/otel_collector/receiver/receiverhelper/doc.go new file mode 100644 index 00000000000..412bb53dd92 --- /dev/null +++ b/internal/otel_collector/receiver/receiverhelper/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package receiverhelper contains utilities for receivers. +package receiverhelper diff --git a/internal/otel_collector/receiver/receiverhelper/factory.go b/internal/otel_collector/receiver/receiverhelper/factory.go new file mode 100644 index 00000000000..650afcf7d2c --- /dev/null +++ b/internal/otel_collector/receiver/receiverhelper/factory.go @@ -0,0 +1,130 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package receiverhelper + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" +) + +// FactoryOption apply changes to ReceiverOptions. +type FactoryOption func(o *factory) + +// WithTraces overrides the default "error not supported" implementation for CreateTracesReceiver. +func WithTraces(createTracesReceiver CreateTracesReceiver) FactoryOption { + return func(o *factory) { + o.createTracesReceiver = createTracesReceiver + } +} + +// WithMetrics overrides the default "error not supported" implementation for CreateMetricsReceiver. +func WithMetrics(createMetricsReceiver CreateMetricsReceiver) FactoryOption { + return func(o *factory) { + o.createMetricsReceiver = createMetricsReceiver + } +} + +// WithLogs overrides the default "error not supported" implementation for CreateLogsReceiver. +func WithLogs(createLogsReceiver CreateLogsReceiver) FactoryOption { + return func(o *factory) { + o.createLogsReceiver = createLogsReceiver + } +} + +// CreateDefaultConfig is the equivalent of component.ReceiverFactory.CreateDefaultConfig() +type CreateDefaultConfig func() config.Receiver + +// CreateTracesReceiver is the equivalent of component.ReceiverFactory.CreateTracesReceiver() +type CreateTracesReceiver func(context.Context, component.ReceiverCreateSettings, config.Receiver, consumer.Traces) (component.TracesReceiver, error) + +// CreateMetricsReceiver is the equivalent of component.ReceiverFactory.CreateMetricsReceiver() +type CreateMetricsReceiver func(context.Context, component.ReceiverCreateSettings, config.Receiver, consumer.Metrics) (component.MetricsReceiver, error) + +// CreateLogsReceiver is the equivalent of component.ReceiverFactory.CreateLogsReceiver() +type CreateLogsReceiver func(context.Context, component.ReceiverCreateSettings, config.Receiver, consumer.Logs) (component.LogsReceiver, error) + +type factory struct { + cfgType config.Type + createDefaultConfig CreateDefaultConfig + createTracesReceiver CreateTracesReceiver + createMetricsReceiver CreateMetricsReceiver + createLogsReceiver CreateLogsReceiver +} + +// NewFactory returns a component.ReceiverFactory. +func NewFactory( + cfgType config.Type, + createDefaultConfig CreateDefaultConfig, + options ...FactoryOption) component.ReceiverFactory { + f := &factory{ + cfgType: cfgType, + createDefaultConfig: createDefaultConfig, + } + for _, opt := range options { + opt(f) + } + return f +} + +// Type gets the type of the Receiver config created by this factory. +func (f *factory) Type() config.Type { + return f.cfgType +} + +// CreateDefaultConfig creates the default configuration for receiver. +func (f *factory) CreateDefaultConfig() config.Receiver { + return f.createDefaultConfig() +} + +// CreateTracesReceiver creates a component.TracesReceiver based on this config. +func (f *factory) CreateTracesReceiver( + ctx context.Context, + set component.ReceiverCreateSettings, + cfg config.Receiver, + nextConsumer consumer.Traces) (component.TracesReceiver, error) { + if f.createTracesReceiver != nil { + return f.createTracesReceiver(ctx, set, cfg, nextConsumer) + } + return nil, componenterror.ErrDataTypeIsNotSupported +} + +// CreateMetricsReceiver creates a component.MetricsReceiver based on this config. +func (f *factory) CreateMetricsReceiver( + ctx context.Context, + set component.ReceiverCreateSettings, + cfg config.Receiver, + nextConsumer consumer.Metrics) (component.MetricsReceiver, error) { + if f.createMetricsReceiver != nil { + return f.createMetricsReceiver(ctx, set, cfg, nextConsumer) + } + return nil, componenterror.ErrDataTypeIsNotSupported +} + +// CreateLogsReceiver creates a metrics processor based on this config. +func (f *factory) CreateLogsReceiver( + ctx context.Context, + set component.ReceiverCreateSettings, + cfg config.Receiver, + nextConsumer consumer.Logs, +) (component.LogsReceiver, error) { + if f.createLogsReceiver != nil { + return f.createLogsReceiver(ctx, set, cfg, nextConsumer) + } + return nil, componenterror.ErrDataTypeIsNotSupported +} diff --git a/internal/otel_collector/receiver/scrapererror/doc.go b/internal/otel_collector/receiver/scrapererror/doc.go new file mode 100644 index 00000000000..acc3c48e365 --- /dev/null +++ b/internal/otel_collector/receiver/scrapererror/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package scraperror provides custom error types for scrapers. +package scrapererror diff --git a/internal/otel_collector/receiver/scrapererror/partialscrapeerror.go b/internal/otel_collector/receiver/scrapererror/partialscrapeerror.go new file mode 100644 index 00000000000..6487fd46d5e --- /dev/null +++ b/internal/otel_collector/receiver/scrapererror/partialscrapeerror.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scrapererror + +import "errors" + +// PartialScrapeError is an error to represent +// that a subset of metrics were failed to be scraped. +type PartialScrapeError struct { + error + Failed int +} + +// NewPartialScrapeError creates PartialScrapeError for failed metrics. +// Use this error type only when a subset of data was failed to be scraped. +func NewPartialScrapeError(err error, failed int) PartialScrapeError { + return PartialScrapeError{ + error: err, + Failed: failed, + } +} + +// IsPartialScrapeError checks if an error was wrapped with PartialScrapeError. +func IsPartialScrapeError(err error) bool { + if err == nil { + return false + } + + var partialScrapeErr PartialScrapeError + return errors.As(err, &partialScrapeErr) +} diff --git a/internal/otel_collector/receiver/scrapererror/scrapeerror.go b/internal/otel_collector/receiver/scrapererror/scrapeerror.go new file mode 100644 index 00000000000..de6e499c1c2 --- /dev/null +++ b/internal/otel_collector/receiver/scrapererror/scrapeerror.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scrapererror + +import ( + "go.opentelemetry.io/collector/consumer/consumererror" +) + +// ScrapeErrors contains multiple PartialScrapeErrors and can also contain generic errors. +type ScrapeErrors struct { + errs []error + failedScrapeCount int +} + +// AddPartial adds a PartialScrapeError with the provided failed count and error. +func (s *ScrapeErrors) AddPartial(failed int, err error) { + s.errs = append(s.errs, NewPartialScrapeError(err, failed)) + s.failedScrapeCount += failed +} + +// Add adds a regular error. +func (s *ScrapeErrors) Add(err error) { + s.errs = append(s.errs, err) +} + +// Combine converts a slice of errors into one error. +// It will return a PartialScrapeError if at least one error in the slice is a PartialScrapeError. +func (s *ScrapeErrors) Combine() error { + partialScrapeErr := false + for _, err := range s.errs { + if IsPartialScrapeError(err) { + partialScrapeErr = true + } + } + + if !partialScrapeErr { + return consumererror.Combine(s.errs) + } + + return NewPartialScrapeError(consumererror.Combine(s.errs), s.failedScrapeCount) +} diff --git a/internal/otel_collector/receiver/scraperhelper/doc.go b/internal/otel_collector/receiver/scraperhelper/doc.go new file mode 100644 index 00000000000..a8bdb3877f4 --- /dev/null +++ b/internal/otel_collector/receiver/scraperhelper/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package scraperhelper provides utilities for scrapers. +package scraperhelper diff --git a/internal/otel_collector/receiver/scraperhelper/scraper.go b/internal/otel_collector/receiver/scraperhelper/scraper.go new file mode 100644 index 00000000000..d891431ec8d --- /dev/null +++ b/internal/otel_collector/receiver/scraperhelper/scraper.go @@ -0,0 +1,167 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scraperhelper + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenthelper" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/receiver/scrapererror" +) + +// ScrapeMetrics scrapes metrics. +type ScrapeMetrics func(context.Context) (pdata.MetricSlice, error) + +// ScrapeResourceMetrics scrapes resource metrics. +type ScrapeResourceMetrics func(context.Context) (pdata.ResourceMetricsSlice, error) + +type baseSettings struct { + componentOptions []componenthelper.Option +} + +// ScraperOption apply changes to internal options. +type ScraperOption func(*baseSettings) + +// Scraper is the base interface for scrapers. +type Scraper interface { + component.Component + + // ID returns the scraper id. + ID() config.ComponentID + Scrape(context.Context, config.ComponentID) (pdata.Metrics, error) +} + +type baseScraper struct { + component.Component + id config.ComponentID +} + +func (b baseScraper) ID() config.ComponentID { + return b.id +} + +// WithStart sets the function that will be called on startup. +func WithStart(start componenthelper.StartFunc) ScraperOption { + return func(o *baseSettings) { + o.componentOptions = append(o.componentOptions, componenthelper.WithStart(start)) + } +} + +// WithShutdown sets the function that will be called on shutdown. +func WithShutdown(shutdown componenthelper.ShutdownFunc) ScraperOption { + return func(o *baseSettings) { + o.componentOptions = append(o.componentOptions, componenthelper.WithShutdown(shutdown)) + } +} + +type metricsScraper struct { + baseScraper + ScrapeMetrics +} + +var _ Scraper = (*metricsScraper)(nil) + +// NewMetricsScraper creates a Scraper that calls Scrape at the specified +// collection interval, reports observability information, and passes the +// scraped metrics to the next consumer. +func NewMetricsScraper( + name string, + scrape ScrapeMetrics, + options ...ScraperOption, +) Scraper { + set := &baseSettings{} + for _, op := range options { + op(set) + } + + ms := &metricsScraper{ + baseScraper: baseScraper{ + Component: componenthelper.New(set.componentOptions...), + id: config.NewID(config.Type(name)), + }, + ScrapeMetrics: scrape, + } + + return ms +} + +func (ms metricsScraper) Scrape(ctx context.Context, receiverID config.ComponentID) (pdata.Metrics, error) { + ctx = obsreport.ScraperContext(ctx, receiverID, ms.ID()) + scrp := obsreport.NewScraper(obsreport.ScraperSettings{ReceiverID: receiverID, Scraper: ms.ID()}) + ctx = scrp.StartMetricsOp(ctx) + metrics, err := ms.ScrapeMetrics(ctx) + count := 0 + md := pdata.Metrics{} + if err == nil || scrapererror.IsPartialScrapeError(err) { + md = pdata.NewMetrics() + metrics.MoveAndAppendTo(md.ResourceMetrics().AppendEmpty().InstrumentationLibraryMetrics().AppendEmpty().Metrics()) + count = md.MetricCount() + } + scrp.EndMetricsOp(ctx, count, err) + return md, err +} + +type resourceMetricsScraper struct { + baseScraper + ScrapeResourceMetrics +} + +var _ Scraper = (*resourceMetricsScraper)(nil) + +// NewResourceMetricsScraper creates a Scraper that calls Scrape at the +// specified collection interval, reports observability information, and +// passes the scraped resource metrics to the next consumer. +func NewResourceMetricsScraper( + id config.ComponentID, + scrape ScrapeResourceMetrics, + options ...ScraperOption, +) Scraper { + set := &baseSettings{} + for _, op := range options { + op(set) + } + + rms := &resourceMetricsScraper{ + baseScraper: baseScraper{ + Component: componenthelper.New(set.componentOptions...), + id: id, + }, + ScrapeResourceMetrics: scrape, + } + + return rms +} + +func (rms resourceMetricsScraper) Scrape(ctx context.Context, receiverID config.ComponentID) (pdata.Metrics, error) { + ctx = obsreport.ScraperContext(ctx, receiverID, rms.ID()) + scrp := obsreport.NewScraper(obsreport.ScraperSettings{ReceiverID: receiverID, Scraper: rms.ID()}) + ctx = scrp.StartMetricsOp(ctx) + resourceMetrics, err := rms.ScrapeResourceMetrics(ctx) + + count := 0 + md := pdata.Metrics{} + if err == nil || scrapererror.IsPartialScrapeError(err) { + md = pdata.NewMetrics() + resourceMetrics.MoveAndAppendTo(md.ResourceMetrics()) + count = md.MetricCount() + } + + scrp.EndMetricsOp(ctx, count, err) + return md, err +} diff --git a/internal/otel_collector/receiver/scraperhelper/scrapercontroller.go b/internal/otel_collector/receiver/scraperhelper/scrapercontroller.go new file mode 100644 index 00000000000..a7f994ea114 --- /dev/null +++ b/internal/otel_collector/receiver/scraperhelper/scrapercontroller.go @@ -0,0 +1,205 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scraperhelper + +import ( + "context" + "errors" + "time" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/receiver/scrapererror" +) + +// ScraperControllerSettings defines common settings for a scraper controller +// configuration. Scraper controller receivers can embed this struct, instead +// of config.ReceiverSettings, and extend it with more fields if needed. +type ScraperControllerSettings struct { + config.ReceiverSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + CollectionInterval time.Duration `mapstructure:"collection_interval"` +} + +// DefaultScraperControllerSettings returns default scraper controller +// settings with a collection interval of one minute. +func DefaultScraperControllerSettings(cfgType config.Type) ScraperControllerSettings { + return ScraperControllerSettings{ + ReceiverSettings: config.NewReceiverSettings(config.NewID(cfgType)), + CollectionInterval: time.Minute, + } +} + +// ScraperControllerOption apply changes to internal options. +type ScraperControllerOption func(*controller) + +// AddScraper configures the provided scrape function to be called +// with the specified options, and at the specified collection interval. +// +// Observability information will be reported, and the scraped metrics +// will be passed to the next consumer. +func AddScraper(scraper Scraper) ScraperControllerOption { + return func(o *controller) { + o.scrapers = append(o.scrapers, scraper) + } +} + +// WithTickerChannel allows you to override the scraper controllers ticker +// channel to specify when scrape is called. This is only expected to be +// used by tests. +func WithTickerChannel(tickerCh <-chan time.Time) ScraperControllerOption { + return func(o *controller) { + o.tickerCh = tickerCh + } +} + +type controller struct { + id config.ComponentID + logger *zap.Logger + collectionInterval time.Duration + nextConsumer consumer.Metrics + + scrapers []Scraper + + tickerCh <-chan time.Time + + initialized bool + done chan struct{} + terminated chan struct{} + + obsrecv *obsreport.Receiver +} + +// NewScraperControllerReceiver creates a Receiver with the configured options, that can control multiple scrapers. +func NewScraperControllerReceiver( + cfg *ScraperControllerSettings, + logger *zap.Logger, + nextConsumer consumer.Metrics, + options ...ScraperControllerOption, +) (component.Receiver, error) { + if nextConsumer == nil { + return nil, componenterror.ErrNilNextConsumer + } + + if cfg.CollectionInterval <= 0 { + return nil, errors.New("collection_interval must be a positive duration") + } + + sc := &controller{ + id: cfg.ID(), + logger: logger, + collectionInterval: cfg.CollectionInterval, + nextConsumer: nextConsumer, + done: make(chan struct{}), + terminated: make(chan struct{}), + obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: cfg.ID(), Transport: ""}), + } + + for _, op := range options { + op(sc) + } + + return sc, nil +} + +// Start the receiver, invoked during service start. +func (sc *controller) Start(ctx context.Context, host component.Host) error { + for _, scraper := range sc.scrapers { + if err := scraper.Start(ctx, host); err != nil { + return err + } + } + + sc.initialized = true + sc.startScraping() + return nil +} + +// Shutdown the receiver, invoked during service shutdown. +func (sc *controller) Shutdown(ctx context.Context) error { + sc.stopScraping() + + // wait until scraping ticker has terminated + if sc.initialized { + <-sc.terminated + } + + var errs []error + for _, scraper := range sc.scrapers { + if err := scraper.Shutdown(ctx); err != nil { + errs = append(errs, err) + } + } + + return consumererror.Combine(errs) +} + +// startScraping initiates a ticker that calls Scrape based on the configured +// collection interval. +func (sc *controller) startScraping() { + go func() { + if sc.tickerCh == nil { + ticker := time.NewTicker(sc.collectionInterval) + defer ticker.Stop() + + sc.tickerCh = ticker.C + } + + for { + select { + case <-sc.tickerCh: + sc.scrapeMetricsAndReport(context.Background()) + case <-sc.done: + sc.terminated <- struct{}{} + return + } + } + }() +} + +// scrapeMetricsAndReport calls the Scrape function for each of the configured +// Scrapers, records observability information, and passes the scraped metrics +// to the next component. +func (sc *controller) scrapeMetricsAndReport(ctx context.Context) { + metrics := pdata.NewMetrics() + + for _, scraper := range sc.scrapers { + md, err := scraper.Scrape(ctx, sc.id) + if err != nil { + sc.logger.Error("Error scraping metrics", zap.Error(err), zap.Stringer("scraper", scraper.ID())) + + if !scrapererror.IsPartialScrapeError(err) { + continue + } + } + md.ResourceMetrics().MoveAndAppendTo(metrics.ResourceMetrics()) + } + + dataPointCount := metrics.DataPointCount() + ctx = sc.obsrecv.StartMetricsOp(ctx) + err := sc.nextConsumer.ConsumeMetrics(ctx, metrics) + sc.obsrecv.EndMetricsOp(ctx, "", dataPointCount, err) +} + +// stopScraping stops the ticker +func (sc *controller) stopScraping() { + close(sc.done) +} diff --git a/internal/otel_collector/service/collector.go b/internal/otel_collector/service/collector.go new file mode 100644 index 00000000000..7ad04497f9d --- /dev/null +++ b/internal/otel_collector/service/collector.go @@ -0,0 +1,395 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package service handles the command-line, configuration, and runs the +// OpenTelemetry Collector. +package service + +import ( + "context" + "errors" + "flag" + "fmt" + "os" + "os/signal" + "runtime" + "syscall" + + "github.com/spf13/cobra" + "go.opentelemetry.io/contrib/zpages" + "go.opentelemetry.io/otel" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcheck" + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/config/configunmarshaler" + "go.opentelemetry.io/collector/config/experimental/configsource" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/extension/ballastextension" + "go.opentelemetry.io/collector/internal/collector/telemetry" + "go.opentelemetry.io/collector/service/internal" + "go.opentelemetry.io/collector/service/internal/builder" + "go.opentelemetry.io/collector/service/parserprovider" +) + +// State defines Collector's state. +type State int + +const ( + Starting State = iota + Running + Closing + Closed +) + +// (Internal note) Collector Lifecycle: +// - New constructs a new Collector. +// - Run starts the collector and calls (*Collector).execute. +// - execute calls setupConfigurationComponents to handle configuration. +// If configuration parser fails, collector's config can be reloaded. +// Collector can be shutdown if parser gets a shutdown error. +// - execute runs runAndWaitForShutdownEvent and waits for a shutdown event. +// SIGINT and SIGTERM, errors, and (*Collector).Shutdown can trigger the shutdown events. +// - Upon shutdown, pipelines are notified, then pipelines and extensions are shut down. +// - Users can call (*Collector).Shutdown anytime to shutdown the collector. + +// Collector represents a server providing the OpenTelemetry Collector service. +type Collector struct { + info component.BuildInfo + rootCmd *cobra.Command + logger *zap.Logger + + tracerProvider trace.TracerProvider + zPagesSpanProcessor *zpages.SpanProcessor + + service *service + stateChannel chan State + + factories component.Factories + + parserProvider parserprovider.ParserProvider + configUnmarshaler configunmarshaler.ConfigUnmarshaler + + // shutdownChan is used to terminate the collector. + shutdownChan chan struct{} + + // signalsChannel is used to receive termination signals from the OS. + signalsChannel chan os.Signal + + allowGracefulShutodwn bool + + // asyncErrorChannel is used to signal a fatal error from any component. + asyncErrorChannel chan error +} + +// New creates and returns a new instance of Collector. +func New(set CollectorSettings) (*Collector, error) { + if err := configcheck.ValidateConfigFromFactories(set.Factories); err != nil { + return nil, err + } + + col := &Collector{ + info: set.BuildInfo, + factories: set.Factories, + stateChannel: make(chan State, Closed+1), + parserProvider: set.ParserProvider, + configUnmarshaler: set.ConfigUnmarshaler, + // We use a negative in the settings not to break the existing + // behavior. Internally, allowGracefulShutodwn is more readable. + allowGracefulShutodwn: !set.DisableGracefulShutdown, + } + + if col.parserProvider == nil { + // use default provider. + col.parserProvider = parserprovider.Default() + } + + if col.configUnmarshaler == nil { + // use default provider. + col.configUnmarshaler = configunmarshaler.NewDefault() + } + + rootCmd := &cobra.Command{ + Use: set.BuildInfo.Command, + Version: set.BuildInfo.Version, + RunE: func(cmd *cobra.Command, args []string) error { + var err error + if col.logger, err = newLogger(set.LoggingOptions); err != nil { + return fmt.Errorf("failed to get logger: %w", err) + } + + col.zPagesSpanProcessor = zpages.NewSpanProcessor() + col.tracerProvider = sdktrace.NewTracerProvider( + sdktrace.WithSampler(internal.AlwaysRecord()), + sdktrace.WithSpanProcessor(col.zPagesSpanProcessor)) + + // Set the constructed tracer provider as Global, in case any component uses the + // global TracerProvider. + otel.SetTracerProvider(col.tracerProvider) + + return col.execute(cmd.Context()) + }, + } + + // TODO: coalesce this code and expose this information to other components. + flagSet := new(flag.FlagSet) + addFlagsFns := []func(*flag.FlagSet){ + configtelemetry.Flags, + parserprovider.Flags, + telemetry.Flags, + builder.Flags, + loggerFlags, + } + for _, addFlags := range addFlagsFns { + addFlags(flagSet) + } + rootCmd.Flags().AddGoFlagSet(flagSet) + col.rootCmd = rootCmd + + return col, nil +} + +// Run starts the collector according to the command and configuration +// given by the user, and waits for it to complete. +// Consecutive calls to Run are not allowed, Run shouldn't be called +// once a collector is shut down. +func (col *Collector) Run() error { + // From this point on do not show usage in case of error. + col.rootCmd.SilenceUsage = true + + return col.rootCmd.Execute() +} + +// GetStateChannel returns state channel of the collector server. +func (col *Collector) GetStateChannel() chan State { + return col.stateChannel +} + +// Command returns Collector's root command. +func (col *Collector) Command() *cobra.Command { + return col.rootCmd +} + +// GetLogger returns logger used by the Collector. +// The logger is initialized after collector server start. +func (col *Collector) GetLogger() *zap.Logger { + return col.logger +} + +// Shutdown shuts down the collector server. +func (col *Collector) Shutdown() { + // TODO: Implement a proper shutdown with graceful draining of the pipeline. + // See https://github.com/open-telemetry/opentelemetry-collector/issues/483. + defer func() { + if r := recover(); r != nil { + col.logger.Info("shutdownChan already closed") + } + }() + close(col.shutdownChan) +} + +func (col *Collector) setupTelemetry(ballastSizeBytes uint64) error { + col.logger.Info("Setting up own telemetry...") + + err := collectorTelemetry.init(col.asyncErrorChannel, ballastSizeBytes, col.logger) + if err != nil { + return fmt.Errorf("failed to initialize telemetry: %w", err) + } + + return nil +} + +// runAndWaitForShutdownEvent waits for one of the shutdown events that can happen. +func (col *Collector) runAndWaitForShutdownEvent() { + col.logger.Info("Everything is ready. Begin running and processing data.") + + col.signalsChannel = make(chan os.Signal, 1) + // Only notify with SIGTERM and SIGINT if graceful shutdown is enabled. + if col.allowGracefulShutodwn { + signal.Notify(col.signalsChannel, os.Interrupt, syscall.SIGTERM) + } + + col.shutdownChan = make(chan struct{}) + col.stateChannel <- Running + select { + case err := <-col.asyncErrorChannel: + col.logger.Error("Asynchronous error received, terminating process", zap.Error(err)) + case s := <-col.signalsChannel: + col.logger.Info("Received signal from OS", zap.String("signal", s.String())) + case <-col.shutdownChan: + col.logger.Info("Received shutdown request") + } + col.stateChannel <- Closing +} + +// setupConfigurationComponents loads the config and starts the components. If all the steps succeeds it +// sets the col.service with the service currently running. +func (col *Collector) setupConfigurationComponents(ctx context.Context) error { + col.logger.Info("Loading configuration...") + + cp, err := col.parserProvider.Get() + if err != nil { + return fmt.Errorf("cannot load configuration's parser: %w", err) + } + + cfg, err := col.configUnmarshaler.Unmarshal(cp, col.factories) + if err != nil { + return fmt.Errorf("cannot load configuration: %w", err) + } + + if err = cfg.Validate(); err != nil { + return fmt.Errorf("invalid configuration: %w", err) + } + + col.logger.Info("Applying configuration...") + + service, err := newService(&svcSettings{ + BuildInfo: col.info, + Factories: col.factories, + Config: cfg, + Logger: col.logger, + TracerProvider: col.tracerProvider, + ZPagesSpanProcessor: col.zPagesSpanProcessor, + AsyncErrorChannel: col.asyncErrorChannel, + }) + if err != nil { + return err + } + + err = service.Start(ctx) + if err != nil { + return err + } + + col.service = service + + // If provider is watchable start a goroutine watching for updates. + if watchable, ok := col.parserProvider.(parserprovider.Watchable); ok { + go func() { + err := watchable.WatchForUpdate() + switch { + // TODO: Move configsource.ErrSessionClosed to providerparser package to avoid depending on configsource. + case errors.Is(err, configsource.ErrSessionClosed): + // This is the case of shutdown of the whole collector server, nothing to do. + col.logger.Info("Config WatchForUpdate closed", zap.Error(err)) + return + default: + col.logger.Warn("Config WatchForUpdated exited", zap.Error(err)) + if err := col.reloadService(context.Background()); err != nil { + col.asyncErrorChannel <- err + } + } + }() + } + + return nil +} + +func (col *Collector) execute(ctx context.Context) error { + col.logger.Info("Starting "+col.info.Command+"...", + zap.String("Version", col.info.Version), + zap.Int("NumCPU", runtime.NumCPU()), + ) + col.stateChannel <- Starting + + // Add `mem-ballast-size-mib` warning message if it is still enabled + // TODO: will remove all `mem-ballast-size-mib` footprints after some baking time. + if builder.MemBallastSize() > 0 { + col.logger.Warn("`mem-ballast-size-mib` command line option has been deprecated. Please use `ballast extension` instead!") + } + + col.asyncErrorChannel = make(chan error) + + err := col.setupConfigurationComponents(ctx) + if err != nil { + return err + } + + // Get ballastSizeBytes if ballast extension is enabled and setup Telemetry. + err = col.setupTelemetry(col.getBallastSize()) + if err != nil { + return err + } + + // Everything is ready, now run until an event requiring shutdown happens. + col.runAndWaitForShutdownEvent() + + // Accumulate errors and proceed with shutting down remaining components. + var errs []error + + // Begin shutdown sequence. + col.logger.Info("Starting shutdown...") + + if closable, ok := col.parserProvider.(parserprovider.Closeable); ok { + if err := closable.Close(ctx); err != nil { + errs = append(errs, fmt.Errorf("failed to close config: %w", err)) + } + } + + if col.service != nil { + if err := col.service.Shutdown(ctx); err != nil { + errs = append(errs, fmt.Errorf("failed to shutdown service: %w", err)) + } + } + + if err := collectorTelemetry.shutdown(); err != nil { + errs = append(errs, fmt.Errorf("failed to shutdown collector telemetry: %w", err)) + } + + col.logger.Info("Shutdown complete.") + col.stateChannel <- Closed + close(col.stateChannel) + + return consumererror.Combine(errs) +} + +// reloadService shutdowns the current col.service and setups a new one according +// to the latest configuration. It requires that col.parserProvider and col.factories +// are properly populated to finish successfully. +func (col *Collector) reloadService(ctx context.Context) error { + if closeable, ok := col.parserProvider.(parserprovider.Closeable); ok { + if err := closeable.Close(ctx); err != nil { + return fmt.Errorf("failed close current config provider: %w", err) + } + } + + if col.service != nil { + retiringService := col.service + col.service = nil + if err := retiringService.Shutdown(ctx); err != nil { + return fmt.Errorf("failed to shutdown the retiring config: %w", err) + } + } + + if err := col.setupConfigurationComponents(ctx); err != nil { + return fmt.Errorf("failed to setup configuration components: %w", err) + } + + return nil +} + +func (col *Collector) getBallastSize() uint64 { + var ballastSize uint64 + extensions := col.service.GetExtensions() + for _, extension := range extensions { + if ext, ok := extension.(*ballastextension.MemoryBallast); ok { + ballastSize = ext.GetBallastSize() + break + } + } + return ballastSize +} diff --git a/internal/otel_collector/service/collector_windows.go b/internal/otel_collector/service/collector_windows.go new file mode 100644 index 00000000000..814a453ad6a --- /dev/null +++ b/internal/otel_collector/service/collector_windows.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build windows +// +build windows + +package service + +import ( + "fmt" + "syscall" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "golang.org/x/sys/windows/svc" + "golang.org/x/sys/windows/svc/eventlog" +) + +type WindowsService struct { + settings CollectorSettings + col *Collector +} + +func NewWindowsService(set CollectorSettings) *WindowsService { + return &WindowsService{settings: set} +} + +// Execute implements https://godoc.org/golang.org/x/sys/windows/svc#Handler +func (s *WindowsService) Execute(args []string, requests <-chan svc.ChangeRequest, changes chan<- svc.Status) (ssec bool, errno uint32) { + // The first argument supplied to service.Execute is the service name. If this is + // not provided for some reason, raise a relevant error to the system event log + if len(args) == 0 { + return false, 1213 // 1213: ERROR_INVALID_SERVICENAME + } + + elog, err := openEventLog(args[0]) + if err != nil { + return false, 1501 // 1501: ERROR_EVENTLOG_CANT_START + } + + colErrorChannel := make(chan error, 1) + + changes <- svc.Status{State: svc.StartPending} + if err = s.start(elog, colErrorChannel); err != nil { + elog.Error(3, fmt.Sprintf("failed to start service: %v", err)) + return false, 1064 // 1064: ERROR_EXCEPTION_IN_SERVICE + } + changes <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown} + + for req := range requests { + switch req.Cmd { + case svc.Interrogate: + changes <- req.CurrentStatus + + case svc.Stop, svc.Shutdown: + changes <- svc.Status{State: svc.StopPending} + if err := s.stop(colErrorChannel); err != nil { + elog.Error(3, fmt.Sprintf("errors occurred while shutting down the service: %v", err)) + } + changes <- svc.Status{State: svc.Stopped} + return false, 0 + + default: + elog.Error(3, fmt.Sprintf("unexpected service control request #%d", req.Cmd)) + return false, 1052 // 1052: ERROR_INVALID_SERVICE_CONTROL + } + } + + return false, 0 +} + +func (s *WindowsService) start(elog *eventlog.Log, colErrorChannel chan error) error { + var err error + s.col, err = newWithWindowsEventLogCore(s.settings, elog) + if err != nil { + return err + } + + // col.Start blocks until receiving a SIGTERM signal, so needs to be started + // asynchronously, but it will exit early if an error occurs on startup + go func() { colErrorChannel <- s.col.Run() }() + + // wait until the collector server is in the Running state + go func() { + for state := range s.col.GetStateChannel() { + if state == Running { + colErrorChannel <- nil + break + } + } + }() + + // wait until the collector server is in the Running state, or an error was returned + return <-colErrorChannel +} + +func (s *WindowsService) stop(colErrorChannel chan error) error { + // simulate a SIGTERM signal to terminate the collector server + s.col.signalsChannel <- syscall.SIGTERM + // return the response of col.Start + return <-colErrorChannel +} + +func openEventLog(serviceName string) (*eventlog.Log, error) { + elog, err := eventlog.Open(serviceName) + if err != nil { + return nil, fmt.Errorf("service failed to open event log: %w", err) + } + + return elog, nil +} + +func newWithWindowsEventLogCore(set CollectorSettings, elog *eventlog.Log) (*Collector, error) { + set.LoggingOptions = append( + set.LoggingOptions, + zap.WrapCore(withWindowsCore(elog)), + ) + return New(set) +} + +var _ zapcore.Core = (*windowsEventLogCore)(nil) + +type windowsEventLogCore struct { + core zapcore.Core + elog *eventlog.Log + encoder zapcore.Encoder +} + +func (w windowsEventLogCore) Enabled(level zapcore.Level) bool { + return w.core.Enabled(level) +} + +func (w windowsEventLogCore) With(fields []zapcore.Field) zapcore.Core { + return withWindowsCore(w.elog)(w.core.With(fields)) +} + +func (w windowsEventLogCore) Check(ent zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { + if w.Enabled(ent.Level) { + return ce.AddCore(ent, w) + } + return ce +} + +func (w windowsEventLogCore) Write(ent zapcore.Entry, fields []zapcore.Field) error { + buf, err := w.encoder.EncodeEntry(ent, fields) + if err != nil { + w.elog.Warning(2, fmt.Sprintf("failed encoding log entry %v\r\n", err)) + return err + } + msg := buf.String() + buf.Free() + + switch ent.Level { + case zapcore.FatalLevel, zapcore.PanicLevel, zapcore.DPanicLevel: + // golang.org/x/sys/windows/svc/eventlog does not support Critical level event logs + return w.elog.Error(3, msg) + case zapcore.ErrorLevel: + return w.elog.Error(3, msg) + case zapcore.WarnLevel: + return w.elog.Warning(2, msg) + case zapcore.InfoLevel: + return w.elog.Info(1, msg) + } + // We would not be here if debug were disabled so log as info to not drop. + return w.elog.Info(1, msg) +} + +func (w windowsEventLogCore) Sync() error { + return w.core.Sync() +} + +func withWindowsCore(elog *eventlog.Log) func(zapcore.Core) zapcore.Core { + return func(core zapcore.Core) zapcore.Core { + encoderConfig := zap.NewProductionEncoderConfig() + encoderConfig.LineEnding = "\r\n" + return windowsEventLogCore{core, elog, zapcore.NewConsoleEncoder(encoderConfig)} + } +} diff --git a/internal/otel_collector/service/defaultcomponents/defaults.go b/internal/otel_collector/service/defaultcomponents/defaults.go new file mode 100644 index 00000000000..23dbabfbd49 --- /dev/null +++ b/internal/otel_collector/service/defaultcomponents/defaults.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package defaultcomponents composes the default set of components used by the otel service +package defaultcomponents + +import ( + "go.opentelemetry.io/collector/component" +) + +// Components returns the default set of components used by the +// OpenTelemetry collector. +func Components() (component.Factories, error) { + return component.Factories{}, nil +} diff --git a/internal/otel_collector/service/internal/builder/builder.go b/internal/otel_collector/service/internal/builder/builder.go new file mode 100644 index 00000000000..a7817851bbc --- /dev/null +++ b/internal/otel_collector/service/internal/builder/builder.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builder + +import ( + "flag" + "fmt" +) + +const ( + // flags + memBallastFlag = "mem-ballast-size-mib" + + zapKindKey = "kind" + zapKindReceiver = "receiver" + zapKindProcessor = "processor" + zapKindLogExporter = "exporter" + zapKindExtension = "extension" + zapNameKey = "name" +) + +var ( + memBallastSize *uint +) + +// Flags adds flags related to basic building of the collector server to the given flagset. +// Deprecated: keep this flag for preventing the breaking change. Use `ballast extension` instead. +func Flags(flags *flag.FlagSet) { + memBallastSize = flags.Uint(memBallastFlag, 0, + fmt.Sprintf("Flag to specify size of memory (MiB) ballast to set. Ballast is not used when this is not specified. "+ + "default settings: 0")) +} + +// MemBallastSize returns the size of memory ballast to use in MBs +func MemBallastSize() int { + return int(*memBallastSize) +} diff --git a/internal/otel_collector/service/internal/builder/doc.go b/internal/otel_collector/service/internal/builder/doc.go new file mode 100644 index 00000000000..c5bd4259703 --- /dev/null +++ b/internal/otel_collector/service/internal/builder/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package builder handles the options to build the OpenTelemetry collector +// pipeline. +package builder diff --git a/internal/otel_collector/service/internal/builder/exporters_builder.go b/internal/otel_collector/service/internal/builder/exporters_builder.go new file mode 100644 index 00000000000..b1542b24fec --- /dev/null +++ b/internal/otel_collector/service/internal/builder/exporters_builder.go @@ -0,0 +1,278 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builder + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer/consumererror" +) + +// builtExporter is an exporter that is built based on a config. It can have +// a trace and/or a metrics consumer and have a shutdown function. +type builtExporter struct { + logger *zap.Logger + expByDataType map[config.DataType]component.Exporter +} + +// Start the exporter. +func (bexp *builtExporter) Start(ctx context.Context, host component.Host) error { + var errors []error + for _, exporter := range bexp.expByDataType { + err := exporter.Start(ctx, host) + if err != nil { + errors = append(errors, err) + } + } + + return consumererror.Combine(errors) +} + +// Shutdown the trace component and the metrics component of an exporter. +func (bexp *builtExporter) Shutdown(ctx context.Context) error { + var errors []error + for _, exporter := range bexp.expByDataType { + err := exporter.Shutdown(ctx) + if err != nil { + errors = append(errors, err) + } + } + + return consumererror.Combine(errors) +} + +func (bexp *builtExporter) getTracesExporter() component.TracesExporter { + exp := bexp.expByDataType[config.TracesDataType] + if exp == nil { + return nil + } + return exp.(component.TracesExporter) +} + +func (bexp *builtExporter) getMetricExporter() component.MetricsExporter { + exp := bexp.expByDataType[config.MetricsDataType] + if exp == nil { + return nil + } + return exp.(component.MetricsExporter) +} + +func (bexp *builtExporter) getLogExporter() component.LogsExporter { + exp := bexp.expByDataType[config.LogsDataType] + if exp == nil { + return nil + } + return exp.(component.LogsExporter) +} + +// Exporters is a map of exporters created from exporter configs. +type Exporters map[config.ComponentID]*builtExporter + +// StartAll starts all exporters. +func (exps Exporters) StartAll(ctx context.Context, host component.Host) error { + for _, exp := range exps { + exp.logger.Info("Exporter is starting...") + + if err := exp.Start(ctx, newHostWrapper(host, exp.logger)); err != nil { + return err + } + exp.logger.Info("Exporter started.") + } + return nil +} + +// ShutdownAll stops all exporters. +func (exps Exporters) ShutdownAll(ctx context.Context) error { + var errs []error + for _, exp := range exps { + err := exp.Shutdown(ctx) + if err != nil { + errs = append(errs, err) + } + } + + return consumererror.Combine(errs) +} + +func (exps Exporters) ToMapByDataType() map[config.DataType]map[config.ComponentID]component.Exporter { + + exportersMap := make(map[config.DataType]map[config.ComponentID]component.Exporter) + + exportersMap[config.TracesDataType] = make(map[config.ComponentID]component.Exporter, len(exps)) + exportersMap[config.MetricsDataType] = make(map[config.ComponentID]component.Exporter, len(exps)) + exportersMap[config.LogsDataType] = make(map[config.ComponentID]component.Exporter, len(exps)) + + for expID, bexp := range exps { + for t, exp := range bexp.expByDataType { + exportersMap[t][expID] = exp + } + } + + return exportersMap +} + +type dataTypeRequirement struct { + // Pipeline that requires the data type. + requiredBy *config.Pipeline +} + +// Map of data type requirements. +type dataTypeRequirements map[config.DataType]dataTypeRequirement + +// Data type requirements for all exporters. +type exportersRequiredDataTypes map[config.ComponentID]dataTypeRequirements + +// BuildExporters builds Exporters from config. +func BuildExporters( + logger *zap.Logger, + tracerProvider trace.TracerProvider, + buildInfo component.BuildInfo, + cfg *config.Config, + factories map[config.Type]component.ExporterFactory, +) (Exporters, error) { + logger = logger.With(zap.String(zapKindKey, zapKindLogExporter)) + + // We need to calculate required input data types for each exporter so that we know + // which data type must be started for each exporter. + exporterInputDataTypes := calcExportersRequiredDataTypes(cfg) + + exporters := make(Exporters) + + // Build exporters exporters based on configuration and required input data types. + for expID, expCfg := range cfg.Exporters { + set := component.ExporterCreateSettings{ + Logger: logger.With(zap.String(zapNameKey, expID.String())), + TracerProvider: tracerProvider, + BuildInfo: buildInfo, + } + + factory, exists := factories[expID.Type()] + if !exists || factory == nil { + return nil, fmt.Errorf("exporter factory not found for type: %s", expID.Type()) + } + + exp, err := buildExporter(context.Background(), factory, set, expCfg, exporterInputDataTypes[expID]) + if err != nil { + return nil, err + } + + exporters[expID] = exp + } + + return exporters, nil +} + +func calcExportersRequiredDataTypes(cfg *config.Config) exportersRequiredDataTypes { + // Go over all pipelines. The data type of the pipeline defines what data type + // each exporter is expected to receive. Collect all required types for each + // exporter. + // + // We also remember the last pipeline that requested the particular data type. + // This is only needed for logging purposes in error cases when we need to + // print that a particular exporter does not support the data type required for + // a particular pipeline. + + result := make(exportersRequiredDataTypes) + + // Iterate over pipelines. + for _, pipeline := range cfg.Service.Pipelines { + // Iterate over all exporters for this pipeline. + for _, expID := range pipeline.Exporters { + // Create the data type requirement for the expCfg if it does not exist. + if _, ok := result[expID]; !ok { + result[expID] = make(dataTypeRequirements) + } + + // Remember that this data type is required for the expCfg and also which + // pipeline the requirement is coming from. + result[expID][pipeline.InputType] = dataTypeRequirement{pipeline} + } + } + return result +} + +func buildExporter( + ctx context.Context, + factory component.ExporterFactory, + set component.ExporterCreateSettings, + cfg config.Exporter, + inputDataTypes dataTypeRequirements, +) (*builtExporter, error) { + exporter := &builtExporter{ + logger: set.Logger, + expByDataType: make(map[config.DataType]component.Exporter, 3), + } + + if inputDataTypes == nil { + set.Logger.Info("Ignoring exporter as it is not used by any pipeline") + return exporter, nil + } + + var err error + var createdExporter component.Exporter + for dataType, requirement := range inputDataTypes { + switch dataType { + case config.TracesDataType: + createdExporter, err = factory.CreateTracesExporter(ctx, set, cfg) + + case config.MetricsDataType: + createdExporter, err = factory.CreateMetricsExporter(ctx, set, cfg) + + case config.LogsDataType: + createdExporter, err = factory.CreateLogsExporter(ctx, set, cfg) + + default: + // Could not create because this exporter does not support this data type. + return nil, exporterTypeMismatchErr(cfg, requirement.requiredBy, dataType) + } + + if err != nil { + if err == componenterror.ErrDataTypeIsNotSupported { + // Could not create because this exporter does not support this data type. + return nil, exporterTypeMismatchErr(cfg, requirement.requiredBy, dataType) + } + return nil, fmt.Errorf("error creating %v exporter: %v", cfg.ID(), err) + } + + // Check if the factory really created the exporter. + if createdExporter == nil { + return nil, fmt.Errorf("factory for %v produced a nil exporter", cfg.ID()) + } + + exporter.expByDataType[dataType] = createdExporter + } + + set.Logger.Info("Exporter was built.") + + return exporter, nil +} + +func exporterTypeMismatchErr( + config config.Exporter, + requiredByPipeline *config.Pipeline, + dataType config.DataType, +) error { + return fmt.Errorf( + "pipeline %q of data type %q has an exporter %v, which does not support that data type", + requiredByPipeline.Name, dataType, config.ID(), + ) +} diff --git a/internal/otel_collector/service/internal/builder/extensions_builder.go b/internal/otel_collector/service/internal/builder/extensions_builder.go new file mode 100644 index 00000000000..cf610ceee18 --- /dev/null +++ b/internal/otel_collector/service/internal/builder/extensions_builder.go @@ -0,0 +1,170 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builder + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer/consumererror" +) + +// builtExporter is an exporter that is built based on a config. It can have +// a trace and/or a metrics consumer and have a shutdown function. +type builtExtension struct { + logger *zap.Logger + extension component.Extension +} + +// Start the receiver. +func (ext *builtExtension) Start(ctx context.Context, host component.Host) error { + return ext.extension.Start(ctx, host) +} + +// Shutdown the receiver. +func (ext *builtExtension) Shutdown(ctx context.Context) error { + return ext.extension.Shutdown(ctx) +} + +var _ component.Extension = (*builtExtension)(nil) + +// Extensions is a map of extensions created from extension configs. +type Extensions map[config.ComponentID]*builtExtension + +// StartAll starts all exporters. +func (exts Extensions) StartAll(ctx context.Context, host component.Host) error { + for _, ext := range exts { + ext.logger.Info("Extension is starting...") + + if err := ext.Start(ctx, newHostWrapper(host, ext.logger)); err != nil { + return err + } + + ext.logger.Info("Extension started.") + } + return nil +} + +// ShutdownAll stops all exporters. +func (exts Extensions) ShutdownAll(ctx context.Context) error { + var errs []error + for _, ext := range exts { + err := ext.Shutdown(ctx) + if err != nil { + errs = append(errs, err) + } + } + + return consumererror.Combine(errs) +} + +func (exts Extensions) NotifyPipelineReady() error { + for _, ext := range exts { + if pw, ok := ext.extension.(component.PipelineWatcher); ok { + if err := pw.Ready(); err != nil { + ext.logger.Error("Error notifying extension that the pipeline was started.") + return err + } + } + } + + return nil +} + +func (exts Extensions) NotifyPipelineNotReady() error { + // Notify extensions in reverse order. + var errs []error + for _, ext := range exts { + if pw, ok := ext.extension.(component.PipelineWatcher); ok { + if err := pw.NotReady(); err != nil { + ext.logger.Error("Error notifying extension that the pipeline was shutdown.") + errs = append(errs, err) + } + } + } + + return consumererror.Combine(errs) +} + +func (exts Extensions) ToMap() map[config.ComponentID]component.Extension { + result := make(map[config.ComponentID]component.Extension, len(exts)) + for extID, v := range exts { + result[extID] = v.extension + } + return result +} + +// BuildExtensions builds Extensions from config. +func BuildExtensions( + logger *zap.Logger, + tracerProvider trace.TracerProvider, + buildInfo component.BuildInfo, + config *config.Config, + factories map[config.Type]component.ExtensionFactory, +) (Extensions, error) { + extensions := make(Extensions) + for _, extID := range config.Service.Extensions { + extCfg, existsCfg := config.Extensions[extID] + if !existsCfg { + return nil, fmt.Errorf("extension %q is not configured", extID) + } + + factory, existsFactory := factories[extID.Type()] + if !existsFactory { + return nil, fmt.Errorf("extension factory for type %q is not configured", extID.Type()) + } + + set := component.ExtensionCreateSettings{ + Logger: logger.With( + zap.String(zapKindKey, zapKindExtension), + zap.String(zapNameKey, extID.String())), + TracerProvider: tracerProvider, + BuildInfo: buildInfo, + } + ext, err := buildExtension(context.Background(), factory, set, extCfg) + if err != nil { + return nil, err + } + + extensions[extID] = ext + } + + return extensions, nil +} + +func buildExtension(ctx context.Context, factory component.ExtensionFactory, creationSet component.ExtensionCreateSettings, cfg config.Extension) (*builtExtension, error) { + ext := &builtExtension{ + logger: creationSet.Logger, + } + + ex, err := factory.CreateExtension(ctx, creationSet, cfg) + if err != nil { + return nil, fmt.Errorf("failed to create extension %v: %w", cfg.ID(), err) + } + + // Check if the factory really created the extension. + if ex == nil { + return nil, fmt.Errorf("factory for %v produced a nil extension", cfg.ID()) + } + + ext.extension = ex + + return ext, nil +} diff --git a/internal/otel_collector/service/internal/builder/host_wrapper.go b/internal/otel_collector/service/internal/builder/host_wrapper.go new file mode 100644 index 00000000000..cea7cdb9b7f --- /dev/null +++ b/internal/otel_collector/service/internal/builder/host_wrapper.go @@ -0,0 +1,54 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builder + +import ( + "net/http" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" +) + +// hostWrapper adds behavior on top of the component.Host being passed when starting the built components. +type hostWrapper struct { + component.Host + *zap.Logger +} + +func newHostWrapper(host component.Host, logger *zap.Logger) component.Host { + return &hostWrapper{ + host, + logger, + } +} + +func (hw *hostWrapper) ReportFatalError(err error) { + // The logger from the built component already identifies the component. + hw.Logger.Error("Component fatal error", zap.Error(err)) + hw.Host.ReportFatalError(err) +} + +// RegisterZPages is used by zpages extension to register handles from service. +// When the wrapper is passed to the extension it won't be successful when casting +// the interface, for the time being expose the interface here. +// TODO: Find a better way to add the service zpages to the extension. This a temporary fix. +func (hw *hostWrapper) RegisterZPages(mux *http.ServeMux, pathPrefix string) { + if zpagesHost, ok := hw.Host.(interface { + RegisterZPages(mux *http.ServeMux, pathPrefix string) + }); ok { + zpagesHost.RegisterZPages(mux, pathPrefix) + } +} diff --git a/internal/otel_collector/service/internal/builder/pipelines_builder.go b/internal/otel_collector/service/internal/builder/pipelines_builder.go new file mode 100644 index 00000000000..c589b69180c --- /dev/null +++ b/internal/otel_collector/service/internal/builder/pipelines_builder.go @@ -0,0 +1,273 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builder + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/service/internal/fanoutconsumer" +) + +// builtPipeline is a pipeline that is built based on a config. +// It can have a trace and/or a metrics consumer (the consumer is either the first +// processor in the pipeline or the exporter if pipeline has no processors). +type builtPipeline struct { + logger *zap.Logger + firstTC consumer.Traces + firstMC consumer.Metrics + firstLC consumer.Logs + + // MutatesData is set to true if any processors in the pipeline + // can mutate the TraceData or MetricsData input argument. + MutatesData bool + + processors []component.Processor +} + +// BuiltPipelines is a map of build pipelines created from pipeline configs. +type BuiltPipelines map[*config.Pipeline]*builtPipeline + +func (bps BuiltPipelines) StartProcessors(ctx context.Context, host component.Host) error { + for _, bp := range bps { + bp.logger.Info("Pipeline is starting...") + hostWrapper := newHostWrapper(host, bp.logger) + // Start in reverse order, starting from the back of processors pipeline. + // This is important so that processors that are earlier in the pipeline and + // reference processors that are later in the pipeline do not start sending + // data to later pipelines which are not yet started. + for i := len(bp.processors) - 1; i >= 0; i-- { + if err := bp.processors[i].Start(ctx, hostWrapper); err != nil { + return err + } + } + bp.logger.Info("Pipeline is started.") + } + return nil +} + +func (bps BuiltPipelines) ShutdownProcessors(ctx context.Context) error { + var errs []error + for _, bp := range bps { + bp.logger.Info("Pipeline is shutting down...") + for _, p := range bp.processors { + if err := p.Shutdown(ctx); err != nil { + errs = append(errs, err) + } + } + bp.logger.Info("Pipeline is shutdown.") + } + + return consumererror.Combine(errs) +} + +// pipelinesBuilder builds Pipelines from config. +type pipelinesBuilder struct { + logger *zap.Logger + tracerProvider trace.TracerProvider + buildInfo component.BuildInfo + config *config.Config + exporters Exporters + factories map[config.Type]component.ProcessorFactory +} + +// BuildPipelines builds pipeline processors from config. Requires exporters to be already +// built via BuildExporters. +func BuildPipelines( + logger *zap.Logger, + tracerProvider trace.TracerProvider, + buildInfo component.BuildInfo, + config *config.Config, + exporters Exporters, + factories map[config.Type]component.ProcessorFactory, +) (BuiltPipelines, error) { + pb := &pipelinesBuilder{logger, tracerProvider, buildInfo, config, exporters, factories} + + pipelineProcessors := make(BuiltPipelines) + for _, pipeline := range pb.config.Service.Pipelines { + firstProcessor, err := pb.buildPipeline(context.Background(), pipeline) + if err != nil { + return nil, err + } + pipelineProcessors[pipeline] = firstProcessor + } + + return pipelineProcessors, nil +} + +// Builds a pipeline of processors. Returns the first processor in the pipeline. +// The last processor in the pipeline will be plugged to fan out the data into exporters +// that are configured for this pipeline. +func (pb *pipelinesBuilder) buildPipeline(ctx context.Context, pipelineCfg *config.Pipeline) (*builtPipeline, error) { + + // BuildProcessors the pipeline backwards. + + // First create a consumer junction point that fans out the data to all exporters. + var tc consumer.Traces + var mc consumer.Metrics + var lc consumer.Logs + + switch pipelineCfg.InputType { + case config.TracesDataType: + tc = pb.buildFanoutExportersTracesConsumer(pipelineCfg.Exporters) + case config.MetricsDataType: + mc = pb.buildFanoutExportersMetricsConsumer(pipelineCfg.Exporters) + case config.LogsDataType: + lc = pb.buildFanoutExportersLogsConsumer(pipelineCfg.Exporters) + } + + mutatesConsumedData := false + + processors := make([]component.Processor, len(pipelineCfg.Processors)) + + // Now build the processors backwards, starting from the last one. + // The last processor points to consumer which fans out to exporters, then + // the processor itself becomes a consumer for the one that precedes it in + // in the pipeline and so on. + for i := len(pipelineCfg.Processors) - 1; i >= 0; i-- { + procID := pipelineCfg.Processors[i] + + procCfg, existsCfg := pb.config.Processors[procID] + if !existsCfg { + return nil, fmt.Errorf("processor %q is not configured", procID) + } + + factory, existsFactory := pb.factories[procID.Type()] + if !existsFactory { + return nil, fmt.Errorf("processor factory for type %q is not configured", procID.Type()) + } + + // This processor must point to the next consumer and then + // it becomes the next for the previous one (previous in the pipeline, + // which we will build in the next loop iteration). + var err error + set := component.ProcessorCreateSettings{ + Logger: pb.logger.With(zap.String(zapKindKey, zapKindProcessor), zap.String(zapNameKey, procID.String())), + TracerProvider: pb.tracerProvider, + BuildInfo: pb.buildInfo, + } + + switch pipelineCfg.InputType { + case config.TracesDataType: + var proc component.TracesProcessor + proc, err = factory.CreateTracesProcessor(ctx, set, procCfg, tc) + if proc != nil { + mutatesConsumedData = mutatesConsumedData || proc.Capabilities().MutatesData + } + processors[i] = proc + tc = proc + case config.MetricsDataType: + var proc component.MetricsProcessor + proc, err = factory.CreateMetricsProcessor(ctx, set, procCfg, mc) + if proc != nil { + mutatesConsumedData = mutatesConsumedData || proc.Capabilities().MutatesData + } + processors[i] = proc + mc = proc + + case config.LogsDataType: + var proc component.LogsProcessor + proc, err = factory.CreateLogsProcessor(ctx, set, procCfg, lc) + if proc != nil { + mutatesConsumedData = mutatesConsumedData || proc.Capabilities().MutatesData + } + processors[i] = proc + lc = proc + + default: + return nil, fmt.Errorf("error creating processor %q in pipeline %q, data type %s is not supported", + procID, pipelineCfg.Name, pipelineCfg.InputType) + } + + if err != nil { + return nil, fmt.Errorf("error creating processor %q in pipeline %q: %v", + procID, pipelineCfg.Name, err) + } + + // Check if the factory really created the processor. + if tc == nil && mc == nil && lc == nil { + return nil, fmt.Errorf("factory for %v produced a nil processor", procID) + } + } + + pipelineLogger := pb.logger.With(zap.String("pipeline_name", pipelineCfg.Name), + zap.String("pipeline_datatype", string(pipelineCfg.InputType))) + pipelineLogger.Info("Pipeline was built.") + + bp := &builtPipeline{ + pipelineLogger, + tc, + mc, + lc, + mutatesConsumedData, + processors, + } + + return bp, nil +} + +// Converts the list of exporter names to a list of corresponding builtExporters. +func (pb *pipelinesBuilder) getBuiltExportersByIDs(exporterIDs []config.ComponentID) []*builtExporter { + var result []*builtExporter + for _, expID := range exporterIDs { + exporter := pb.exporters[expID] + result = append(result, exporter) + } + + return result +} + +func (pb *pipelinesBuilder) buildFanoutExportersTracesConsumer(exporterIDs []config.ComponentID) consumer.Traces { + builtExporters := pb.getBuiltExportersByIDs(exporterIDs) + + var exporters []consumer.Traces + for _, builtExp := range builtExporters { + exporters = append(exporters, builtExp.getTracesExporter()) + } + + // Create a junction point that fans out to all exporters. + return fanoutconsumer.NewTraces(exporters) +} + +func (pb *pipelinesBuilder) buildFanoutExportersMetricsConsumer(exporterIDs []config.ComponentID) consumer.Metrics { + builtExporters := pb.getBuiltExportersByIDs(exporterIDs) + + var exporters []consumer.Metrics + for _, builtExp := range builtExporters { + exporters = append(exporters, builtExp.getMetricExporter()) + } + + // Create a junction point that fans out to all exporters. + return fanoutconsumer.NewMetrics(exporters) +} + +func (pb *pipelinesBuilder) buildFanoutExportersLogsConsumer(exporterIDs []config.ComponentID) consumer.Logs { + builtExporters := pb.getBuiltExportersByIDs(exporterIDs) + + exporters := make([]consumer.Logs, len(builtExporters)) + for i, builtExp := range builtExporters { + exporters[i] = builtExp.getLogExporter() + } + + // Create a junction point that fans out to all exporters. + return fanoutconsumer.NewLogs(exporters) +} diff --git a/internal/otel_collector/service/internal/builder/receivers_builder.go b/internal/otel_collector/service/internal/builder/receivers_builder.go new file mode 100644 index 00000000000..84f7104756e --- /dev/null +++ b/internal/otel_collector/service/internal/builder/receivers_builder.go @@ -0,0 +1,344 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builder + +import ( + "context" + "errors" + "fmt" + + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/service/internal/fanoutconsumer" +) + +var errUnusedReceiver = errors.New("receiver defined but not used by any pipeline") + +// builtReceiver is a receiver that is built based on a config. It can have +// a trace and/or a metrics component. +type builtReceiver struct { + logger *zap.Logger + receiver component.Receiver +} + +// Start starts the receiver. +func (rcv *builtReceiver) Start(ctx context.Context, host component.Host) error { + return rcv.receiver.Start(ctx, host) +} + +// Shutdown stops the receiver. +func (rcv *builtReceiver) Shutdown(ctx context.Context) error { + return rcv.receiver.Shutdown(ctx) +} + +// Receivers is a map of receivers created from receiver configs. +type Receivers map[config.ComponentID]*builtReceiver + +// ShutdownAll stops all receivers. +func (rcvs Receivers) ShutdownAll(ctx context.Context) error { + var errs []error + for _, rcv := range rcvs { + err := rcv.Shutdown(ctx) + if err != nil { + errs = append(errs, err) + } + } + + return consumererror.Combine(errs) +} + +// StartAll starts all receivers. +func (rcvs Receivers) StartAll(ctx context.Context, host component.Host) error { + for _, rcv := range rcvs { + rcv.logger.Info("Receiver is starting...") + + if err := rcv.Start(ctx, newHostWrapper(host, rcv.logger)); err != nil { + return err + } + rcv.logger.Info("Receiver started.") + } + return nil +} + +// receiversBuilder builds receivers from config. +type receiversBuilder struct { + config *config.Config + builtPipelines BuiltPipelines + factories map[config.Type]component.ReceiverFactory +} + +// BuildReceivers builds Receivers from config. +func BuildReceivers( + logger *zap.Logger, + tracerProvider trace.TracerProvider, + buildInfo component.BuildInfo, + cfg *config.Config, + builtPipelines BuiltPipelines, + factories map[config.Type]component.ReceiverFactory, +) (Receivers, error) { + rb := &receiversBuilder{cfg, builtPipelines, factories} + + receivers := make(Receivers) + for recvID, recvCfg := range cfg.Receivers { + set := component.ReceiverCreateSettings{ + Logger: logger.With(zap.String(zapKindKey, zapKindReceiver), zap.String(zapNameKey, recvID.String())), + TracerProvider: tracerProvider, + BuildInfo: buildInfo, + } + + rcv, err := rb.buildReceiver(context.Background(), set, recvCfg) + if err != nil { + if err == errUnusedReceiver { + set.Logger.Info("Ignoring receiver as it is not used by any pipeline") + continue + } + return nil, err + } + receivers[recvID] = rcv + } + + return receivers, nil +} + +// hasReceiver returns true if the pipeline is attached to specified receiver. +func hasReceiver(pipeline *config.Pipeline, receiverID config.ComponentID) bool { + for _, id := range pipeline.Receivers { + if id == receiverID { + return true + } + } + return false +} + +type attachedPipelines map[config.DataType][]*builtPipeline + +func (rb *receiversBuilder) findPipelinesToAttach(receiverID config.ComponentID) (attachedPipelines, error) { + // A receiver may be attached to multiple pipelines. Pipelines may consume different + // data types. We need to compile the list of pipelines of each type that must be + // attached to this receiver according to configuration. + + pipelinesToAttach := make(attachedPipelines) + pipelinesToAttach[config.TracesDataType] = make([]*builtPipeline, 0) + pipelinesToAttach[config.MetricsDataType] = make([]*builtPipeline, 0) + + // Iterate over all pipelines. + for _, pipelineCfg := range rb.config.Service.Pipelines { + // Get the first processor of the pipeline. + pipelineProcessor := rb.builtPipelines[pipelineCfg] + if pipelineProcessor == nil { + return nil, fmt.Errorf("cannot find pipeline processor for pipeline %s", + pipelineCfg.Name) + } + + // Is this receiver attached to the pipeline? + if hasReceiver(pipelineCfg, receiverID) { + if _, exists := pipelinesToAttach[pipelineCfg.InputType]; !exists { + pipelinesToAttach[pipelineCfg.InputType] = make([]*builtPipeline, 0) + } + + // Yes, add it to the list of pipelines of corresponding data type. + pipelinesToAttach[pipelineCfg.InputType] = + append(pipelinesToAttach[pipelineCfg.InputType], pipelineProcessor) + } + } + + return pipelinesToAttach, nil +} + +func attachReceiverToPipelines( + ctx context.Context, + set component.ReceiverCreateSettings, + factory component.ReceiverFactory, + dataType config.DataType, + cfg config.Receiver, + rcv *builtReceiver, + builtPipelines []*builtPipeline, +) error { + // There are pipelines of the specified data type that must be attached to + // the receiver. Create the receiver of corresponding data type and make + // sure its output is fanned out to all attached pipelines. + var err error + var createdReceiver component.Receiver + + switch dataType { + case config.TracesDataType: + junction := buildFanoutTraceConsumer(builtPipelines) + createdReceiver, err = factory.CreateTracesReceiver(ctx, set, cfg, junction) + + case config.MetricsDataType: + junction := buildFanoutMetricConsumer(builtPipelines) + createdReceiver, err = factory.CreateMetricsReceiver(ctx, set, cfg, junction) + + case config.LogsDataType: + junction := buildFanoutLogConsumer(builtPipelines) + createdReceiver, err = factory.CreateLogsReceiver(ctx, set, cfg, junction) + + default: + err = componenterror.ErrDataTypeIsNotSupported + } + + if err != nil { + if err == componenterror.ErrDataTypeIsNotSupported { + return fmt.Errorf( + "receiver %v does not support %s but it was used in a %s pipeline", + cfg.ID(), dataType, dataType) + } + return fmt.Errorf("cannot create receiver %v: %w", cfg.ID(), err) + } + + // Check if the factory really created the receiver. + if createdReceiver == nil { + return fmt.Errorf("factory for %v produced a nil receiver", cfg.ID()) + } + + if rcv.receiver != nil { + // The receiver was previously created for this config. This can happen if the + // same receiver type supports more than one data type. In that case we expect + // that CreateTracesReceiver and CreateMetricsReceiver return the same value. + if rcv.receiver != createdReceiver { + return fmt.Errorf( + "factory for %v is implemented incorrectly: "+ + "CreateTracesReceiver and CreateMetricsReceiver must return the same "+ + "receiver pointer when creating receivers of different data types", + cfg.ID(), + ) + } + } + rcv.receiver = createdReceiver + + set.Logger.Info("Receiver was built.", zap.String("datatype", string(dataType))) + + return nil +} + +func (rb *receiversBuilder) buildReceiver(ctx context.Context, set component.ReceiverCreateSettings, cfg config.Receiver) (*builtReceiver, error) { + + // First find pipelines that must be attached to this receiver. + pipelinesToAttach, err := rb.findPipelinesToAttach(cfg.ID()) + if err != nil { + return nil, err + } + + // Prepare to build the receiver. + factory := rb.factories[cfg.ID().Type()] + if factory == nil { + return nil, fmt.Errorf("receiver factory not found for: %v", cfg.ID()) + } + rcv := &builtReceiver{ + logger: set.Logger, + } + + // Now we have list of pipelines broken down by data type. Iterate for each data type. + for dataType, pipelines := range pipelinesToAttach { + if len(pipelines) == 0 { + // No pipelines of this data type are attached to this receiver. + continue + } + + // Attach the corresponding part of the receiver to all pipelines that require + // this data type. + err := attachReceiverToPipelines(ctx, set, factory, dataType, cfg, rcv, pipelines) + if err != nil { + return nil, err + } + } + + if rcv.receiver == nil { + return nil, errUnusedReceiver + } + + return rcv, nil +} + +func buildFanoutTraceConsumer(pipelines []*builtPipeline) consumer.Traces { + // Optimize for the case when there is only one processor, no need to create junction point. + if len(pipelines) == 1 { + return pipelines[0].firstTC + } + + var pipelineConsumers []consumer.Traces + anyPipelineMutatesData := false + for _, pipeline := range pipelines { + pipelineConsumers = append(pipelineConsumers, pipeline.firstTC) + anyPipelineMutatesData = anyPipelineMutatesData || pipeline.MutatesData + } + + // Create a junction point that fans out to all pipelines. + if anyPipelineMutatesData { + // If any pipeline mutates data use a cloning fan out connector + // so that it is safe to modify fanned out data. + // TODO: if there are more than 2 pipelines only clone data for pipelines that + // declare the intent to mutate the data. Pipelines that do not mutate the data + // can consume shared data. + return fanoutconsumer.NewTracesCloning(pipelineConsumers) + } + return fanoutconsumer.NewTraces(pipelineConsumers) +} + +func buildFanoutMetricConsumer(pipelines []*builtPipeline) consumer.Metrics { + // Optimize for the case when there is only one processor, no need to create junction point. + if len(pipelines) == 1 { + return pipelines[0].firstMC + } + + var pipelineConsumers []consumer.Metrics + anyPipelineMutatesData := false + for _, pipeline := range pipelines { + pipelineConsumers = append(pipelineConsumers, pipeline.firstMC) + anyPipelineMutatesData = anyPipelineMutatesData || pipeline.MutatesData + } + + // Create a junction point that fans out to all pipelines. + if anyPipelineMutatesData { + // If any pipeline mutates data use a cloning fan out connector + // so that it is safe to modify fanned out data. + // TODO: if there are more than 2 pipelines only clone data for pipelines that + // declare the intent to mutate the data. Pipelines that do not mutate the data + // can consume shared data. + return fanoutconsumer.NewMetricsCloning(pipelineConsumers) + } + return fanoutconsumer.NewMetrics(pipelineConsumers) +} + +func buildFanoutLogConsumer(pipelines []*builtPipeline) consumer.Logs { + // Optimize for the case when there is only one processor, no need to create junction point. + if len(pipelines) == 1 { + return pipelines[0].firstLC + } + + var pipelineConsumers []consumer.Logs + anyPipelineMutatesData := false + for _, pipeline := range pipelines { + pipelineConsumers = append(pipelineConsumers, pipeline.firstLC) + anyPipelineMutatesData = anyPipelineMutatesData || pipeline.MutatesData + } + + // Create a junction point that fans out to all pipelines. + if anyPipelineMutatesData { + // If any pipeline mutates data use a cloning fan out connector + // so that it is safe to modify fanned out data. + // TODO: if there are more than 2 pipelines only clone data for pipelines that + // declare the intent to mutate the data. Pipelines that do not mutate the data + // can consume shared data. + return fanoutconsumer.NewLogsCloning(pipelineConsumers) + } + return fanoutconsumer.NewLogs(pipelineConsumers) +} diff --git a/internal/otel_collector/service/internal/builder/testdata/not_supported_exporter_logs.yaml b/internal/otel_collector/service/internal/builder/testdata/not_supported_exporter_logs.yaml new file mode 100644 index 00000000000..5dcb27600fe --- /dev/null +++ b/internal/otel_collector/service/internal/builder/testdata/not_supported_exporter_logs.yaml @@ -0,0 +1,10 @@ +receivers: + examplereceiver: +exporters: + bf: + +service: + pipelines: + logs: + receivers: [examplereceiver] + exporters: [bf] diff --git a/internal/otel_collector/service/internal/builder/testdata/not_supported_exporter_metrics.yaml b/internal/otel_collector/service/internal/builder/testdata/not_supported_exporter_metrics.yaml new file mode 100644 index 00000000000..c46e8b5c40d --- /dev/null +++ b/internal/otel_collector/service/internal/builder/testdata/not_supported_exporter_metrics.yaml @@ -0,0 +1,10 @@ +receivers: + examplereceiver: +exporters: + bf: + +service: + pipelines: + metrics: + receivers: [examplereceiver] + exporters: [bf] diff --git a/internal/otel_collector/service/internal/builder/testdata/not_supported_exporter_traces.yaml b/internal/otel_collector/service/internal/builder/testdata/not_supported_exporter_traces.yaml new file mode 100644 index 00000000000..b1b4283e8ee --- /dev/null +++ b/internal/otel_collector/service/internal/builder/testdata/not_supported_exporter_traces.yaml @@ -0,0 +1,10 @@ +receivers: + examplereceiver: +exporters: + bf: + +service: + pipelines: + traces: + receivers: [examplereceiver] + exporters: [bf] diff --git a/internal/otel_collector/service/internal/builder/testdata/not_supported_processor_logs.yaml b/internal/otel_collector/service/internal/builder/testdata/not_supported_processor_logs.yaml new file mode 100644 index 00000000000..bf8ce83017e --- /dev/null +++ b/internal/otel_collector/service/internal/builder/testdata/not_supported_processor_logs.yaml @@ -0,0 +1,13 @@ +receivers: + examplereceiver: +processors: + bf: +exporters: + exampleexporter: + +service: + pipelines: + logs: + receivers: [examplereceiver] + processors: [bf] + exporters: [exampleexporter] diff --git a/internal/otel_collector/service/internal/builder/testdata/not_supported_processor_metrics.yaml b/internal/otel_collector/service/internal/builder/testdata/not_supported_processor_metrics.yaml new file mode 100644 index 00000000000..38d0cd79a14 --- /dev/null +++ b/internal/otel_collector/service/internal/builder/testdata/not_supported_processor_metrics.yaml @@ -0,0 +1,13 @@ +receivers: + examplereceiver: +processors: + bf: +exporters: + exampleexporter: + +service: + pipelines: + metrics: + receivers: [examplereceiver] + processors: [bf] + exporters: [exampleexporter] diff --git a/internal/otel_collector/service/internal/builder/testdata/not_supported_processor_traces.yaml b/internal/otel_collector/service/internal/builder/testdata/not_supported_processor_traces.yaml new file mode 100644 index 00000000000..d9c931a2d7b --- /dev/null +++ b/internal/otel_collector/service/internal/builder/testdata/not_supported_processor_traces.yaml @@ -0,0 +1,13 @@ +receivers: + examplereceiver: +processors: + bf: +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [examplereceiver] + processors: [bf] + exporters: [exampleexporter] diff --git a/internal/otel_collector/service/internal/builder/testdata/not_supported_receiver_logs.yaml b/internal/otel_collector/service/internal/builder/testdata/not_supported_receiver_logs.yaml new file mode 100644 index 00000000000..e4ac06f49b8 --- /dev/null +++ b/internal/otel_collector/service/internal/builder/testdata/not_supported_receiver_logs.yaml @@ -0,0 +1,10 @@ +receivers: + bf: # this is the bad receiver factory +exporters: + exampleexporter: + +service: + pipelines: + logs: + receivers: [bf] + exporters: [exampleexporter] diff --git a/internal/otel_collector/service/internal/builder/testdata/not_supported_receiver_metrics.yaml b/internal/otel_collector/service/internal/builder/testdata/not_supported_receiver_metrics.yaml new file mode 100644 index 00000000000..e73eb326c00 --- /dev/null +++ b/internal/otel_collector/service/internal/builder/testdata/not_supported_receiver_metrics.yaml @@ -0,0 +1,10 @@ +receivers: + bf: # this is the bad receiver factory +exporters: + exampleexporter: + +service: + pipelines: + metrics: + receivers: [bf] + exporters: [exampleexporter] diff --git a/internal/otel_collector/service/internal/builder/testdata/not_supported_receiver_traces.yaml b/internal/otel_collector/service/internal/builder/testdata/not_supported_receiver_traces.yaml new file mode 100644 index 00000000000..1359d79578c --- /dev/null +++ b/internal/otel_collector/service/internal/builder/testdata/not_supported_receiver_traces.yaml @@ -0,0 +1,10 @@ +receivers: + bf: # this is the bad receiver factory +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [bf] + exporters: [exampleexporter] diff --git a/internal/otel_collector/service/internal/builder/testdata/pipelines_builder.yaml b/internal/otel_collector/service/internal/builder/testdata/pipelines_builder.yaml new file mode 100644 index 00000000000..ef965adb5fa --- /dev/null +++ b/internal/otel_collector/service/internal/builder/testdata/pipelines_builder.yaml @@ -0,0 +1,40 @@ +receivers: + examplereceiver: + examplereceiver/2: + examplereceiver/3: + examplereceiver/multi: + +processors: + exampleprocessor: + +exporters: + exampleexporter: + exampleexporter/2: + +service: + pipelines: + traces: + receivers: [examplereceiver, examplereceiver/multi] + processors: [exampleprocessor] + exporters: [exampleexporter] + + traces/2: + receivers: [examplereceiver/2, examplereceiver/multi] + processors: [exampleprocessor] + exporters: [exampleexporter, exampleexporter/2] + + metrics: + receivers: [examplereceiver] + exporters: [exampleexporter] + + metrics/2: + receivers: [examplereceiver/3] + exporters: [exampleexporter] + + metrics/3: + receivers: [examplereceiver/3] + exporters: [exampleexporter/2] + + logs: + receivers: [examplereceiver/3] + exporters: [exampleexporter/2] diff --git a/internal/otel_collector/service/internal/builder/testdata/unused_receiver.yaml b/internal/otel_collector/service/internal/builder/testdata/unused_receiver.yaml new file mode 100644 index 00000000000..f8dc448ac28 --- /dev/null +++ b/internal/otel_collector/service/internal/builder/testdata/unused_receiver.yaml @@ -0,0 +1,12 @@ +receivers: + examplereceiver: + examplereceiver/2: +processors: +exporters: + exampleexporter: + +service: + pipelines: + traces: + receivers: [examplereceiver] + exporters: [exampleexporter] \ No newline at end of file diff --git a/internal/otel_collector/service/internal/fanoutconsumer/cloningconsumer.go b/internal/otel_collector/service/internal/fanoutconsumer/cloningconsumer.go new file mode 100644 index 00000000000..ad266c4dc33 --- /dev/null +++ b/internal/otel_collector/service/internal/fanoutconsumer/cloningconsumer.go @@ -0,0 +1,146 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fanoutconsumer + +import ( + "context" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/model/pdata" +) + +// NewMetricsCloning wraps multiple metrics consumers in a single one and clones the data +// before fanning out. +func NewMetricsCloning(mcs []consumer.Metrics) consumer.Metrics { + if len(mcs) == 1 { + // Don't wrap if no need to do it. + return mcs[0] + } + return metricsCloningConsumer(mcs) +} + +type metricsCloningConsumer []consumer.Metrics + +var _ consumer.Metrics = (*metricsCloningConsumer)(nil) + +func (mfc metricsCloningConsumer) Capabilities() consumer.Capabilities { + return consumer.Capabilities{MutatesData: true} +} + +// ConsumeMetrics exports the pdata.Metrics to all consumers wrapped by the current one. +func (mfc metricsCloningConsumer) ConsumeMetrics(ctx context.Context, md pdata.Metrics) error { + var errs []error + + // Fan out to first len-1 consumers. + for i := 0; i < len(mfc)-1; i++ { + // Create a clone of data. We need to clone because consumers may modify the data. + if err := mfc[i].ConsumeMetrics(ctx, md.Clone()); err != nil { + errs = append(errs, err) + } + } + + if len(mfc) > 0 { + // Give the original data to the last consumer. + lastTc := mfc[len(mfc)-1] + if err := lastTc.ConsumeMetrics(ctx, md); err != nil { + errs = append(errs, err) + } + } + + return consumererror.Combine(errs) +} + +// NewTracesCloning wraps multiple traces consumers in a single one and clones the data +// before fanning out. +func NewTracesCloning(tcs []consumer.Traces) consumer.Traces { + if len(tcs) == 1 { + // Don't wrap if no need to do it. + return tcs[0] + } + return tracesCloningConsumer(tcs) +} + +type tracesCloningConsumer []consumer.Traces + +var _ consumer.Traces = (*tracesCloningConsumer)(nil) + +func (tfc tracesCloningConsumer) Capabilities() consumer.Capabilities { + return consumer.Capabilities{MutatesData: true} +} + +// ConsumeTraces exports the pdata.Traces to all consumers wrapped by the current one. +func (tfc tracesCloningConsumer) ConsumeTraces(ctx context.Context, td pdata.Traces) error { + var errs []error + + // Fan out to first len-1 consumers. + for i := 0; i < len(tfc)-1; i++ { + // Create a clone of data. We need to clone because consumers may modify the data. + if err := tfc[i].ConsumeTraces(ctx, td.Clone()); err != nil { + errs = append(errs, err) + } + } + + if len(tfc) > 0 { + // Give the original data to the last consumer. + lastTc := tfc[len(tfc)-1] + if err := lastTc.ConsumeTraces(ctx, td); err != nil { + errs = append(errs, err) + } + } + + return consumererror.Combine(errs) +} + +// NewLogsCloning wraps multiple trace consumers in a single one and clones the data +// before fanning out. +func NewLogsCloning(lcs []consumer.Logs) consumer.Logs { + if len(lcs) == 1 { + // Don't wrap if no need to do it. + return lcs[0] + } + return logsCloningConsumer(lcs) +} + +type logsCloningConsumer []consumer.Logs + +var _ consumer.Logs = (*logsCloningConsumer)(nil) + +func (lfc logsCloningConsumer) Capabilities() consumer.Capabilities { + return consumer.Capabilities{MutatesData: true} +} + +// ConsumeLogs exports the pdata.Logs to all consumers wrapped by the current one. +func (lfc logsCloningConsumer) ConsumeLogs(ctx context.Context, ld pdata.Logs) error { + var errs []error + + // Fan out to first len-1 consumers. + for i := 0; i < len(lfc)-1; i++ { + // Create a clone of data. We need to clone because consumers may modify the data. + if err := lfc[i].ConsumeLogs(ctx, ld.Clone()); err != nil { + errs = append(errs, err) + } + } + + if len(lfc) > 0 { + // Give the original data to the last consumer. + lastTc := lfc[len(lfc)-1] + if err := lastTc.ConsumeLogs(ctx, ld); err != nil { + errs = append(errs, err) + } + } + + return consumererror.Combine(errs) +} diff --git a/internal/otel_collector/service/internal/fanoutconsumer/consumer.go b/internal/otel_collector/service/internal/fanoutconsumer/consumer.go new file mode 100644 index 00000000000..77c474298f0 --- /dev/null +++ b/internal/otel_collector/service/internal/fanoutconsumer/consumer.go @@ -0,0 +1,112 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package fanoutconsumer contains implementations of Traces/Metrics/Logs consumers +// that fan out the data to multiple other consumers. +// +// Cloning connectors create clones of data before fanning out, which ensures each +// consumer gets their own copy of data and is free to modify it. +package fanoutconsumer + +import ( + "context" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/model/pdata" +) + +// NewMetrics wraps multiple metrics consumers in a single one. +func NewMetrics(mcs []consumer.Metrics) consumer.Metrics { + if len(mcs) == 1 { + // Don't wrap if no need to do it. + return mcs[0] + } + return metricsConsumer(mcs) +} + +type metricsConsumer []consumer.Metrics + +var _ consumer.Metrics = (*metricsConsumer)(nil) + +func (mfc metricsConsumer) Capabilities() consumer.Capabilities { + return consumer.Capabilities{MutatesData: false} +} + +// ConsumeMetrics exports the pdata.Metrics to all consumers wrapped by the current one. +func (mfc metricsConsumer) ConsumeMetrics(ctx context.Context, md pdata.Metrics) error { + var errs []error + for _, mc := range mfc { + if err := mc.ConsumeMetrics(ctx, md); err != nil { + errs = append(errs, err) + } + } + return consumererror.Combine(errs) +} + +// NewTraces wraps multiple trace consumers in a single one. +func NewTraces(tcs []consumer.Traces) consumer.Traces { + if len(tcs) == 1 { + // Don't wrap if no need to do it. + return tcs[0] + } + return traceConsumer(tcs) +} + +type traceConsumer []consumer.Traces + +var _ consumer.Traces = (*traceConsumer)(nil) + +func (tfc traceConsumer) Capabilities() consumer.Capabilities { + return consumer.Capabilities{MutatesData: false} +} + +// ConsumeTraces exports the pdata.Traces to all consumers wrapped by the current one. +func (tfc traceConsumer) ConsumeTraces(ctx context.Context, td pdata.Traces) error { + var errs []error + for _, tc := range tfc { + if err := tc.ConsumeTraces(ctx, td); err != nil { + errs = append(errs, err) + } + } + return consumererror.Combine(errs) +} + +// NewLogs wraps multiple log consumers in a single one. +func NewLogs(lcs []consumer.Logs) consumer.Logs { + if len(lcs) == 1 { + // Don't wrap if no need to do it. + return lcs[0] + } + return logsConsumer(lcs) +} + +type logsConsumer []consumer.Logs + +var _ consumer.Logs = (*logsConsumer)(nil) + +func (lfc logsConsumer) Capabilities() consumer.Capabilities { + return consumer.Capabilities{MutatesData: false} +} + +// ConsumeLogs exports the pdata.Logs to all consumers wrapped by the current one. +func (lfc logsConsumer) ConsumeLogs(ctx context.Context, ld pdata.Logs) error { + var errs []error + for _, lc := range lfc { + if err := lc.ConsumeLogs(ctx, ld); err != nil { + errs = append(errs, err) + } + } + return consumererror.Combine(errs) +} diff --git a/internal/otel_collector/service/internal/otel_trace_sampler.go b/internal/otel_collector/service/internal/otel_trace_sampler.go new file mode 100644 index 00000000000..15141bee7b4 --- /dev/null +++ b/internal/otel_collector/service/internal/otel_trace_sampler.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + sdktrace "go.opentelemetry.io/otel/sdk/trace" +) + +type recordSampler struct{} + +func (r recordSampler) ShouldSample(parameters sdktrace.SamplingParameters) sdktrace.SamplingResult { + return sdktrace.SamplingResult{Decision: sdktrace.RecordOnly} +} + +func (r recordSampler) Description() string { + return "Always record sampler" +} + +func AlwaysRecord() sdktrace.Sampler { + rs := &recordSampler{} + return sdktrace.ParentBased( + rs, + sdktrace.WithRemoteParentSampled(sdktrace.AlwaysSample()), + sdktrace.WithRemoteParentNotSampled(rs), + sdktrace.WithLocalParentSampled(sdktrace.AlwaysSample()), + sdktrace.WithRemoteParentSampled(rs)) +} diff --git a/internal/otel_collector/service/internal/telemetry/process_telemetry.go b/internal/otel_collector/service/internal/telemetry/process_telemetry.go new file mode 100644 index 00000000000..d93bb93a3c3 --- /dev/null +++ b/internal/otel_collector/service/internal/telemetry/process_telemetry.go @@ -0,0 +1,185 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package telemetry + +import ( + "context" + "os" + "runtime" + "time" + + "github.com/shirou/gopsutil/process" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" +) + +// ProcessMetricsViews is a struct that contains views related to process metrics (cpu, mem, etc) +type ProcessMetricsViews struct { + prevTimeUnixNano int64 + ballastSizeBytes uint64 + views []*view.View + done chan struct{} + proc *process.Process +} + +var mUptime = stats.Float64( + "process/uptime", + "Uptime of the process", + stats.UnitSeconds) +var viewProcessUptime = &view.View{ + Name: mUptime.Name(), + Description: mUptime.Description(), + Measure: mUptime, + Aggregation: view.Sum(), + TagKeys: nil, +} + +var mRuntimeAllocMem = stats.Int64( + "process/runtime/heap_alloc_bytes", + "Bytes of allocated heap objects (see 'go doc runtime.MemStats.HeapAlloc')", + stats.UnitBytes) +var viewAllocMem = &view.View{ + Name: mRuntimeAllocMem.Name(), + Description: mRuntimeAllocMem.Description(), + Measure: mRuntimeAllocMem, + Aggregation: view.LastValue(), + TagKeys: nil, +} + +var mRuntimeTotalAllocMem = stats.Int64( + "process/runtime/total_alloc_bytes", + "Cumulative bytes allocated for heap objects (see 'go doc runtime.MemStats.TotalAlloc')", + stats.UnitBytes) +var viewTotalAllocMem = &view.View{ + Name: mRuntimeTotalAllocMem.Name(), + Description: mRuntimeTotalAllocMem.Description(), + Measure: mRuntimeTotalAllocMem, + Aggregation: view.LastValue(), + TagKeys: nil, +} + +var mRuntimeSysMem = stats.Int64( + "process/runtime/total_sys_memory_bytes", + "Total bytes of memory obtained from the OS (see 'go doc runtime.MemStats.Sys')", + stats.UnitBytes) +var viewSysMem = &view.View{ + Name: mRuntimeSysMem.Name(), + Description: mRuntimeSysMem.Description(), + Measure: mRuntimeSysMem, + Aggregation: view.LastValue(), + TagKeys: nil, +} + +var mCPUSeconds = stats.Float64( + "process/cpu_seconds", + "Total CPU user and system time in seconds", + stats.UnitSeconds) +var viewCPUSeconds = &view.View{ + Name: mCPUSeconds.Name(), + Description: mCPUSeconds.Description(), + Measure: mCPUSeconds, + Aggregation: view.LastValue(), + TagKeys: nil, +} + +var mRSSMemory = stats.Int64( + "process/memory/rss", + "Total physical memory (resident set size)", + stats.UnitBytes) +var viewRSSMemory = &view.View{ + Name: mRSSMemory.Name(), + Description: mRSSMemory.Description(), + Measure: mRSSMemory, + Aggregation: view.LastValue(), + TagKeys: nil, +} + +// NewProcessMetricsViews creates a new set of ProcessMetrics (mem, cpu) that can be used to measure +// basic information about this process. +func NewProcessMetricsViews(ballastSizeBytes uint64) (*ProcessMetricsViews, error) { + pmv := &ProcessMetricsViews{ + prevTimeUnixNano: time.Now().UnixNano(), + views: []*view.View{viewProcessUptime, viewAllocMem, viewTotalAllocMem, viewSysMem, viewCPUSeconds, viewRSSMemory}, + ballastSizeBytes: ballastSizeBytes, + done: make(chan struct{}), + } + + pid := os.Getpid() + + var err error + pmv.proc, err = process.NewProcess(int32(pid)) + if err != nil { + return nil, err + } + + return pmv, nil +} + +// StartCollection starts a ticker'd goroutine that will update the PMV measurements every 5 seconds +func (pmv *ProcessMetricsViews) StartCollection() { + go func() { + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + pmv.updateViews() + case <-pmv.done: + return + } + } + }() +} + +// Views returns the views internal to the PMV. +func (pmv *ProcessMetricsViews) Views() []*view.View { + return pmv.views +} + +// StopCollection stops the collection of the process metric information. +func (pmv *ProcessMetricsViews) StopCollection() { + close(pmv.done) +} + +func (pmv *ProcessMetricsViews) updateViews() { + now := time.Now().UnixNano() + stats.Record(context.Background(), mUptime.M(float64(now-pmv.prevTimeUnixNano)/1e9)) + pmv.prevTimeUnixNano = now + + ms := &runtime.MemStats{} + pmv.readMemStats(ms) + stats.Record(context.Background(), mRuntimeAllocMem.M(int64(ms.Alloc))) + stats.Record(context.Background(), mRuntimeTotalAllocMem.M(int64(ms.TotalAlloc))) + stats.Record(context.Background(), mRuntimeSysMem.M(int64(ms.Sys))) + + if pmv.proc != nil { + if times, err := pmv.proc.Times(); err == nil { + stats.Record(context.Background(), mCPUSeconds.M(times.Total())) + } + if mem, err := pmv.proc.MemoryInfo(); err == nil { + stats.Record(context.Background(), mRSSMemory.M(int64(mem.RSS))) + } + } +} + +func (pmv *ProcessMetricsViews) readMemStats(ms *runtime.MemStats) { + runtime.ReadMemStats(ms) + if pmv.ballastSizeBytes > 0 { + ms.Alloc -= pmv.ballastSizeBytes + ms.HeapAlloc -= pmv.ballastSizeBytes + ms.HeapSys -= pmv.ballastSizeBytes + ms.HeapInuse -= pmv.ballastSizeBytes + } +} diff --git a/internal/otel_collector/service/internal/zpages/templates.go b/internal/otel_collector/service/internal/zpages/templates.go new file mode 100644 index 00000000000..364eb5b2078 --- /dev/null +++ b/internal/otel_collector/service/internal/zpages/templates.go @@ -0,0 +1,154 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zpages + +import ( + "html/template" + "io" + "io/ioutil" + "log" + + "go.opentelemetry.io/collector/service/internal/zpages/tmplgen" +) + +var ( + fs = tmplgen.FS(false) + templateFunctions = template.FuncMap{ + "even": even, + "getKey": getKey, + "getValue": getValue, + } + componentHeaderTemplate = parseTemplate("component_header") + extensionsTableTemplate = parseTemplate("extensions_table") + headerTemplate = parseTemplate("header") + footerTemplate = parseTemplate("footer") + pipelinesTableTemplate = parseTemplate("pipelines_table") + propertiesTableTemplate = parseTemplate("properties_table") +) + +func parseTemplate(name string) *template.Template { + f, err := fs.Open("/templates/" + name + ".html") + if err != nil { + log.Panicf("%v: %v", name, err) + } + defer f.Close() + text, err := ioutil.ReadAll(f) + if err != nil { + log.Panicf("%v: %v", name, err) + } + return template.Must(template.New(name).Funcs(templateFunctions).Parse(string(text))) +} + +// HeaderData contains data for the header template. +type HeaderData struct { + Title string +} + +// WriteHTMLHeader writes the header. +func WriteHTMLHeader(w io.Writer, hd HeaderData) { + if err := headerTemplate.Execute(w, hd); err != nil { + log.Printf("zpages: executing template: %v", err) + } +} + +// SummaryExtensionsTableData contains data for extensions summary table template. +type SummaryExtensionsTableData struct { + ComponentEndpoint string + Rows []SummaryExtensionsTableRowData +} + +// SummaryExtensionsTableRowData contains data for one row in extensions summary table template. +type SummaryExtensionsTableRowData struct { + FullName string + Enabled bool +} + +// WriteHTMLExtensionsSummaryTable writes the summary table for one component type (receivers, processors, exporters). +// Id does not write the header or footer. +func WriteHTMLExtensionsSummaryTable(w io.Writer, spd SummaryExtensionsTableData) { + if err := extensionsTableTemplate.Execute(w, spd); err != nil { + log.Printf("zpages: executing template: %v", err) + } +} + +// SummaryPipelinesTableData contains data for pipelines summary table template. +type SummaryPipelinesTableData struct { + ComponentEndpoint string + Rows []SummaryPipelinesTableRowData +} + +// SummaryPipelinesTableRowData contains data for one row in pipelines summary table template. +type SummaryPipelinesTableRowData struct { + FullName string + InputType string + MutatesData bool + Receivers []string + Processors []string + Exporters []string +} + +// WriteHTMLPipelinesSummaryTable writes the summary table for one component type (receivers, processors, exporters). +// Id does not write the header or footer. +func WriteHTMLPipelinesSummaryTable(w io.Writer, spd SummaryPipelinesTableData) { + if err := pipelinesTableTemplate.Execute(w, spd); err != nil { + log.Printf("zpages: executing template: %v", err) + } +} + +// ComponentHeaderData contains data for component header template. +type ComponentHeaderData struct { + Name string + ComponentEndpoint string + Link bool +} + +// WriteHTMLComponentHeader writes the header for components. +func WriteHTMLComponentHeader(w io.Writer, chd ComponentHeaderData) { + if err := componentHeaderTemplate.Execute(w, chd); err != nil { + log.Printf("zpages: executing template: %v", err) + } +} + +// PropertiesTableData contains data for properties table template. +type PropertiesTableData struct { + Name string + Properties [][2]string +} + +// WriteHTMLPropertiesTable writes the HTML for properties table. +func WriteHTMLPropertiesTable(w io.Writer, chd PropertiesTableData) { + if err := propertiesTableTemplate.Execute(w, chd); err != nil { + log.Printf("zpages: executing template: %v", err) + } +} + +// WriteHTMLFooter writes the footer. +func WriteHTMLFooter(w io.Writer) { + if err := footerTemplate.Execute(w, nil); err != nil { + log.Printf("zpages: executing template: %v", err) + } +} + +func even(x int) bool { + return x%2 == 0 +} + +func getKey(row [2]string) string { + return row[0] +} + +func getValue(row [2]string) string { + return row[1] +} diff --git a/internal/otel_collector/service/internal/zpages/templates/component_header.html b/internal/otel_collector/service/internal/zpages/templates/component_header.html new file mode 100644 index 00000000000..463a15cc7e6 --- /dev/null +++ b/internal/otel_collector/service/internal/zpages/templates/component_header.html @@ -0,0 +1,7 @@ +{{$a := .ComponentEndpoint}} +{{$link := .Link}} +{{- if $link -}} +
{{.Name}}
+{{- else -}} +
{{.Name}}
+{{- end -}} \ No newline at end of file diff --git a/internal/otel_collector/service/internal/zpages/templates/extensions_table.html b/internal/otel_collector/service/internal/zpages/templates/extensions_table.html new file mode 100644 index 00000000000..0b39ee933e1 --- /dev/null +++ b/internal/otel_collector/service/internal/zpages/templates/extensions_table.html @@ -0,0 +1,11 @@ + + {{$a := .ComponentEndpoint}} + {{range $rowindex, $row := .Rows}} + {{- if even $rowindex}} + + {{else}} + {{end -}} + + + {{end}} +
{{.FullName}}
\ No newline at end of file diff --git a/internal/otel_collector/service/internal/zpages/templates/footer.html b/internal/otel_collector/service/internal/zpages/templates/footer.html new file mode 100644 index 00000000000..691287b6e35 --- /dev/null +++ b/internal/otel_collector/service/internal/zpages/templates/footer.html @@ -0,0 +1,2 @@ + + \ No newline at end of file diff --git a/internal/otel_collector/service/internal/zpages/templates/header.html b/internal/otel_collector/service/internal/zpages/templates/header.html new file mode 100644 index 00000000000..c381c0ff6c8 --- /dev/null +++ b/internal/otel_collector/service/internal/zpages/templates/header.html @@ -0,0 +1,11 @@ + + + + {{.Title}} + + + + + + +

{{.Title}}

\ No newline at end of file diff --git a/internal/otel_collector/service/internal/zpages/templates/pipelines_table.html b/internal/otel_collector/service/internal/zpages/templates/pipelines_table.html new file mode 100644 index 00000000000..a9461627e89 --- /dev/null +++ b/internal/otel_collector/service/internal/zpages/templates/pipelines_table.html @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + + {{$a := .ComponentEndpoint}} + {{range $rowindex, $row := .Rows}} + {{- if even $rowindex}} + + {{else}} + {{end -}} + + + + + + + + {{end}} +
FullName  |  InputType  |  MutatesData  |  Receivers  |  Processors  |  Exporters
{{$row.FullName}}  |  {{$row.InputType}}  |  {{$row.MutatesData}}  |   + {{range $recindex, $rec := $row.Receivers}} + {{$rec}} +
+ {{end}} +
  |   + → + {{range $proindex, $pro := $row.Processors}} + {{$pro}} + → + {{end}} +   |   + {{range $expindex, $exp := $row.Exporters}} + {{$exp}} +
+ {{end}} +
\ No newline at end of file diff --git a/internal/otel_collector/service/internal/zpages/templates/properties_table.html b/internal/otel_collector/service/internal/zpages/templates/properties_table.html new file mode 100644 index 00000000000..a43841ba770 --- /dev/null +++ b/internal/otel_collector/service/internal/zpages/templates/properties_table.html @@ -0,0 +1,14 @@ +{{.Name}}: + + {{ $index := 0 }} + {{range $index, $element := .Properties}} + {{- if even $index}} + + {{else}} + {{end -}} + + + + + {{end}} +
{{$element|getKey}}  |  {{$element|getValue}}
\ No newline at end of file diff --git a/internal/otel_collector/service/internal/zpages/tmplgen/gen.go b/internal/otel_collector/service/internal/zpages/tmplgen/gen.go new file mode 100644 index 00000000000..19aaa91c531 --- /dev/null +++ b/internal/otel_collector/service/internal/zpages/tmplgen/gen.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tmplgen + +//go:generate esc -pkg tmplgen -o resources.go -modtime "0" ../templates/ +//go:generate addlicense -y "" -c "The OpenTelemetry Authors" resources.go diff --git a/internal/otel_collector/service/internal/zpages/tmplgen/resources.go b/internal/otel_collector/service/internal/zpages/tmplgen/resources.go new file mode 100644 index 00000000000..6e15c8c3320 --- /dev/null +++ b/internal/otel_collector/service/internal/zpages/tmplgen/resources.go @@ -0,0 +1,309 @@ +// Code generated by "esc -pkg tmplgen -o resources.go -modtime 0 ../templates/"; DO NOT EDIT. + +package tmplgen + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "path" + "sync" + "time" +) + +type _escLocalFS struct{} + +var _escLocal _escLocalFS + +type _escStaticFS struct{} + +var _escStatic _escStaticFS + +type _escDirectory struct { + fs http.FileSystem + name string +} + +type _escFile struct { + compressed string + size int64 + modtime int64 + local string + isDir bool + + once sync.Once + data []byte + name string +} + +func (_escLocalFS) Open(name string) (http.File, error) { + f, present := _escData[path.Clean(name)] + if !present { + return nil, os.ErrNotExist + } + return os.Open(f.local) +} + +func (_escStaticFS) prepare(name string) (*_escFile, error) { + f, present := _escData[path.Clean(name)] + if !present { + return nil, os.ErrNotExist + } + var err error + f.once.Do(func() { + f.name = path.Base(name) + if f.size == 0 { + return + } + var gr *gzip.Reader + b64 := base64.NewDecoder(base64.StdEncoding, bytes.NewBufferString(f.compressed)) + gr, err = gzip.NewReader(b64) + if err != nil { + return + } + f.data, err = ioutil.ReadAll(gr) + }) + if err != nil { + return nil, err + } + return f, nil +} + +func (fs _escStaticFS) Open(name string) (http.File, error) { + f, err := fs.prepare(name) + if err != nil { + return nil, err + } + return f.File() +} + +func (dir _escDirectory) Open(name string) (http.File, error) { + return dir.fs.Open(dir.name + name) +} + +func (f *_escFile) File() (http.File, error) { + type httpFile struct { + *bytes.Reader + *_escFile + } + return &httpFile{ + Reader: bytes.NewReader(f.data), + _escFile: f, + }, nil +} + +func (f *_escFile) Close() error { + return nil +} + +func (f *_escFile) Readdir(count int) ([]os.FileInfo, error) { + if !f.isDir { + return nil, fmt.Errorf(" escFile.Readdir: '%s' is not directory", f.name) + } + + fis, ok := _escDirs[f.local] + if !ok { + return nil, fmt.Errorf(" escFile.Readdir: '%s' is directory, but we have no info about content of this dir, local=%s", f.name, f.local) + } + limit := count + if count <= 0 || limit > len(fis) { + limit = len(fis) + } + + if len(fis) == 0 && count > 0 { + return nil, io.EOF + } + + return fis[0:limit], nil +} + +func (f *_escFile) Stat() (os.FileInfo, error) { + return f, nil +} + +func (f *_escFile) Name() string { + return f.name +} + +func (f *_escFile) Size() int64 { + return f.size +} + +func (f *_escFile) Mode() os.FileMode { + return 0 +} + +func (f *_escFile) ModTime() time.Time { + return time.Unix(f.modtime, 0) +} + +func (f *_escFile) IsDir() bool { + return f.isDir +} + +func (f *_escFile) Sys() interface{} { + return f +} + +// FS returns a http.Filesystem for the embedded assets. If useLocal is true, +// the filesystem's contents are instead used. +func FS(useLocal bool) http.FileSystem { + if useLocal { + return _escLocal + } + return _escStatic +} + +// Dir returns a http.Filesystem for the embedded assets on a given prefix dir. +// If useLocal is true, the filesystem's contents are instead used. +func Dir(useLocal bool, name string) http.FileSystem { + if useLocal { + return _escDirectory{fs: _escLocal, name: name} + } + return _escDirectory{fs: _escStatic, name: name} +} + +// FSByte returns the named file from the embedded assets. If useLocal is +// true, the filesystem's contents are instead used. +func FSByte(useLocal bool, name string) ([]byte, error) { + if useLocal { + f, err := _escLocal.Open(name) + if err != nil { + return nil, err + } + b, err := ioutil.ReadAll(f) + _ = f.Close() + return b, err + } + f, err := _escStatic.prepare(name) + if err != nil { + return nil, err + } + return f.data, nil +} + +// FSMustByte is the same as FSByte, but panics if name is not present. +func FSMustByte(useLocal bool, name string) []byte { + b, err := FSByte(useLocal, name) + if err != nil { + panic(err) + } + return b +} + +// FSString is the string version of FSByte. +func FSString(useLocal bool, name string) (string, error) { + b, err := FSByte(useLocal, name) + return string(b), err +} + +// FSMustString is the string version of FSMustByte. +func FSMustString(useLocal bool, name string) string { + return string(FSMustByte(useLocal, name)) +} + +var _escData = map[string]*_escFile{ + + "/templates/component_header.html": { + name: "component_header.html", + local: "../templates/component_header.html", + size: 156, + modtime: 0, + compressed: ` +H4sIAAAAAAAC/1SMsQqDMBRFd7/iIq7q5lBiltKt9B8CPklQX6R1e9x/L6ZQ2vXcc65ZE3AZ0V3ztmcV +PW467TnpQVZmzZp0Kfs96VJQizTjw1uyAgAXB+8C4lPmsT4fydqbdY+wCen64F0fB19iWV/yF/54X0en +U3kHAAD//zT+SdCcAAAA +`, + }, + + "/templates/extensions_table.html": { + name: "extensions_table.html", + local: "../templates/extensions_table.html", + size: 353, + modtime: 0, + compressed: ` +H4sIAAAAAAAC/2SQwU7DMBBE7/2KlemRNJwjxxwQHDnwB248DRbOOnK2tGD531HTQIvqk1fzZjU7Wuw2 +gCb5CmjVNiaHVE2j7Tz3DT0osyIiynltqWlp8xSHMTJYntmN0bOUsgDJcg9ap3jw7HC8n7+z5y0epgU7 +oxX5HeETfMGv9NPTkv4i2e6jT3HPrqE7AEui8yaECbdWkzPYUXWlaHFkg++5VR1YkJTRlt4Tdq06HVfK +4zeOAp58ZLYD2pw3L/sQXu2AUpT5N+raGl2Lu0TRtaTfqsCulJWu52bNTwAAAP//sz5qjmEBAAA= +`, + }, + + "/templates/footer.html": { + name: "footer.html", + local: "../templates/footer.html", + size: 15, + modtime: 0, + compressed: ` +H4sIAAAAAAAC/7LRT8pPqbTjstHPKMnNsQMEAAD//wEFevAPAAAA +`, + }, + + "/templates/header.html": { + name: "header.html", + local: "../templates/header.html", + size: 467, + modtime: 0, + compressed: ` +H4sIAAAAAAAC/5TRMU8sIRAH8P4+BY/25eC9szGGxUItLIwW11giO7uMB8wG5rxsLvfdDdnTxNhoBeFP +fpnM3/y5fbzZPj/dicAp2pVph4guj52ELK0J4Hq7EkIIk4Cd8MGVCtzJPQ/rS3mOGDmCPR7Vtl1OJ6OX +lyWNmHeiQOxkDVTY71mgpyxFKDB0UuvD4aBogswQIQGXWSHpwb21Xwo9Sf1d4jlCDQD8wQTmqV5pPVDm +qkaiMYKbsCpPSTfpenAJ49w9OIaCLv6995Sr/AXtqQc1Aqc+tgn/qwv1T6czpzD3ONJ6wrxTCbPy9ROv +vuDEoocBiqjF/5RszGuV1uhFsCujl0bMC/Vz62vzZe1hY98DAAD//7qRGmLTAQAA +`, + }, + + "/templates/pipelines_table.html": { + name: "pipelines_table.html", + local: "../templates/pipelines_table.html", + size: 1930, + modtime: 0, + compressed: ` +H4sIAAAAAAAC/7SVwXLTMBCG7zyFxnRyIjVcU1scoMxwgGE6vIAsbYKmykqzklu3xu/OWLZVp84Fgi8Z +Kfq1/+y3f5QiiMoA8+HJQJlVlhTQ1jshNR527H3G3zDGWBFoWAwbxaQ13gksPzBh9AFLA/vAi4p/qY35 +Lo5Q5BUv8qBObvENVt7dDJ+/55uFdGEgAQNQb/EVXR1+Prk1Pb7VQQTwn0UQK7rcgQT9AORX9PhBVoL3 +dlWT28ZZCotGinyKTdteCbYr2fUne3QWAcMtKmc1hq4bBSTwAOyK7KNGBc27uIx37uyjH2WDdMv0nsED +4It8dj7mNUVayPsD2RrVjr0FgIzPKoHxsLzK2xZQse3spEfVtr3d9RTxroud/h3EqUgK8UVVZjH9pzrj +ILNhkjMypyMBmUYCsh9JNE/pfQUw1hbsF8G+zPrBd93HZ6cdGI2A4gjlAuTmWU65SAKQJ9/fa1QljZYZ +nxRFLvjSvaLXrQCq+TT/M6oNCaKb8/Qc2YmeI5vovfwuV8HnyC7xuckz8ouSs/zOtrMuwYQLGjfhgsYl +XOmFWYUWNG5JC0bLCCsqLgjbfJfexEFX5PEvmP8JAAD//50711CKBwAA +`, + }, + + "/templates/properties_table.html": { + name: "properties_table.html", + local: "../templates/properties_table.html", + size: 420, + modtime: 0, + compressed: ` +H4sIAAAAAAAC/2SRwW7DIBBE7/6KVRr1VMc5u5gfqFT11Ds2U8sqWVuwqRoR/r1yTCpb4YAEO48ZDarV +MR7ezQkp1apqdaHEtA4U5OLQ7NrRW/gyTKYbuK/puNMFEVGMtB/Y4pfqho6UUr71hnvk0Qvt4XACyyw6 +fPhxgpcBIasXoqThi/ADztRqOC8l/j+L6b57P57Z1vQEIEdZnoELeER1jGBL5WqixJJxQ89NBxZ4favg +nvTaQ95wSWnuQlVi9RrUz9yG6XXZr+vDg3TrsTX4NO6M2WLDVOLv3YJtSoWqbl+h/wIAAP//aLmk3KQB +AAA= +`, + }, + + "/templates": { + name: "templates", + local: `../templates/`, + isDir: true, + }, +} + +var _escDirs = map[string][]os.FileInfo{ + + "../templates/": { + _escData["/templates/component_header.html"], + _escData["/templates/extensions_table.html"], + _escData["/templates/footer.html"], + _escData["/templates/header.html"], + _escData["/templates/pipelines_table.html"], + _escData["/templates/properties_table.html"], + }, +} diff --git a/internal/otel_collector/service/logger.go b/internal/otel_collector/service/logger.go new file mode 100644 index 00000000000..900ea69d800 --- /dev/null +++ b/internal/otel_collector/service/logger.go @@ -0,0 +1,71 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package service + +import ( + "flag" + "fmt" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +const ( + logLevelCfg = "log-level" + logProfileCfg = "log-profile" + logFormatCfg = "log-format" +) + +var ( + // Command line pointer to logger level flag configuration. + loggerLevelPtr *zapcore.Level + loggerProfilePtr *string + loggerFormatPtr *string +) + +func loggerFlags(flags *flag.FlagSet) { + defaultLevel := zapcore.InfoLevel + loggerLevelPtr = &defaultLevel + flags.Var(loggerLevelPtr, logLevelCfg, "Output level of logs (DEBUG, INFO, WARN, ERROR, DPANIC, PANIC, FATAL)") + + loggerProfilePtr = flags.String(logProfileCfg, "prod", "Logging profile to use (dev, prod)") + + // Note: we use "console" by default for more human-friendly mode of logging (tab delimited, formatted timestamps). + loggerFormatPtr = flags.String(logFormatCfg, "console", "Format of logs to use (json, console)") +} + +func newLogger(options []zap.Option) (*zap.Logger, error) { + var conf zap.Config + + // Use logger profile if set on command line before falling back + // to default based on build type. + switch *loggerProfilePtr { + case "dev": + conf = zap.NewDevelopmentConfig() + case "prod": + conf = zap.NewProductionConfig() + default: + return nil, fmt.Errorf("invalid value %s for %s flag", *loggerProfilePtr, logProfileCfg) + } + + conf.Encoding = *loggerFormatPtr + if conf.Encoding == "console" { + // Human-readable timestamps for console format of logs. + conf.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + } + + conf.Level.SetLevel(*loggerLevelPtr) + return conf.Build(options...) +} diff --git a/internal/otel_collector/service/parserprovider/default.go b/internal/otel_collector/service/parserprovider/default.go new file mode 100644 index 00000000000..11efd264356 --- /dev/null +++ b/internal/otel_collector/service/parserprovider/default.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parserprovider + +// Default is the default ParserProvider and it creates configuration from a file +// defined by the --config command line flag and overwrites properties from --set +// command line flag (if the flag is present). +func Default() ParserProvider { + return NewSetFlag(NewFile()) +} diff --git a/internal/otel_collector/service/parserprovider/file.go b/internal/otel_collector/service/parserprovider/file.go new file mode 100644 index 00000000000..9c0ea40bd30 --- /dev/null +++ b/internal/otel_collector/service/parserprovider/file.go @@ -0,0 +1,44 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parserprovider + +import ( + "errors" + "fmt" + + "go.opentelemetry.io/collector/config/configparser" +) + +type fileProvider struct{} + +// NewFile returns a new ParserProvider that reads the configuration from a file configured +// via the --config command line flag. +func NewFile() ParserProvider { + return &fileProvider{} +} + +func (fl *fileProvider) Get() (*configparser.Parser, error) { + fileName := getConfigFlag() + if fileName == "" { + return nil, errors.New("config file not specified") + } + + cp, err := configparser.NewParserFromFile(fileName) + if err != nil { + return nil, fmt.Errorf("error loading config file %q: %v", fileName, err) + } + + return cp, nil +} diff --git a/internal/otel_collector/service/parserprovider/flags.go b/internal/otel_collector/service/parserprovider/flags.go new file mode 100644 index 00000000000..dc204c78a14 --- /dev/null +++ b/internal/otel_collector/service/parserprovider/flags.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parserprovider + +import ( + "flag" + "strings" +) + +const ( + configFlagName = "config" + setFlagName = "set" +) + +var ( + configFlag *string + setFlag *stringArrayValue +) + +type stringArrayValue struct { + values []string +} + +func (s *stringArrayValue) Set(val string) error { + s.values = append(s.values, val) + return nil +} + +func (s *stringArrayValue) String() string { + return "[" + strings.Join(s.values, ",") + "]" +} + +// Flags adds flags related to basic configuration's parser loader to the flags. +func Flags(flags *flag.FlagSet) { + configFlag = flags.String(configFlagName, "", "Path to the config file") + setFlag = new(stringArrayValue) + flags.Var(setFlag, setFlagName, + "Set arbitrary component config property. The component has to be defined in the config file and the flag"+ + " has a higher precedence. Array config properties are overridden and maps are joined, note that only a single"+ + " (first) array property can be set e.g. -set=processors.attributes.actions.key=some_key. Example --set=processors.batch.timeout=2s") +} + +func getConfigFlag() string { + return *configFlag +} + +func getSetFlag() []string { + return setFlag.values +} diff --git a/internal/otel_collector/service/parserprovider/inmemory.go b/internal/otel_collector/service/parserprovider/inmemory.go new file mode 100644 index 00000000000..80dd4120b66 --- /dev/null +++ b/internal/otel_collector/service/parserprovider/inmemory.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parserprovider + +import ( + "io" + + "go.opentelemetry.io/collector/config/configparser" +) + +type inMemoryProvider struct { + buf io.Reader +} + +// NewInMemory returns a new ParserProvider that reads the configuration from the provided buffer as YAML. +func NewInMemory(buf io.Reader) ParserProvider { + return &inMemoryProvider{buf: buf} +} + +func (inp *inMemoryProvider) Get() (*configparser.Parser, error) { + return configparser.NewParserFromBuffer(inp.buf) +} diff --git a/internal/otel_collector/service/parserprovider/provider.go b/internal/otel_collector/service/parserprovider/provider.go new file mode 100644 index 00000000000..b78342dfc04 --- /dev/null +++ b/internal/otel_collector/service/parserprovider/provider.go @@ -0,0 +1,44 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parserprovider + +import ( + "context" + + "go.opentelemetry.io/collector/config/configparser" +) + +// ParserProvider is an interface that helps providing configuration's parser. +// Implementations may load the parser from a file, a database or any other source. +type ParserProvider interface { + // Get returns the config.Parser if succeed or error otherwise. + Get() (*configparser.Parser, error) +} + +// Watchable is an extension for ParserProvider that is implemented if the given provider +// supports monitoring of configuration updates. +type Watchable interface { + // WatchForUpdate waits for updates on any of the values retrieved from config sources. + // It blocks until configuration updates are received and can + // return an error if anything fails. WatchForUpdate is used once during the + // first evaluation of the configuration and is not used to watch configuration + // changes continuously. + WatchForUpdate() error +} + +// Closeable is an extension interface for ParserProvider that should be added if they need to be closed. +type Closeable interface { + Close(ctx context.Context) error +} diff --git a/internal/otel_collector/service/parserprovider/setflag.go b/internal/otel_collector/service/parserprovider/setflag.go new file mode 100644 index 00000000000..8ce4f09f052 --- /dev/null +++ b/internal/otel_collector/service/parserprovider/setflag.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parserprovider + +import ( + "bytes" + "fmt" + "strings" + + "github.com/knadh/koanf/maps" + "github.com/magiconair/properties" + + "go.opentelemetry.io/collector/config/configparser" +) + +type setFlagProvider struct { + base ParserProvider +} + +// NewSetFlag returns a config.ParserProvider, that wraps a "base" config.ParserProvider, then +// overrides properties from set flag(s) in the loaded Parser. +// +// The implementation reads set flag(s) from the cmd and concatenates them as a "properties" file. +// Then the properties file is read and properties are set to the loaded Parser. +func NewSetFlag(base ParserProvider) ParserProvider { + return &setFlagProvider{ + base: base, + } +} + +func (sfl *setFlagProvider) Get() (*configparser.Parser, error) { + flagProperties := getSetFlag() + if len(flagProperties) == 0 { + return sfl.base.Get() + } + + b := &bytes.Buffer{} + for _, property := range flagProperties { + property = strings.TrimSpace(property) + if _, err := fmt.Fprintf(b, "%s\n", property); err != nil { + return nil, err + } + } + + var props *properties.Properties + var err error + if props, err = properties.Load(b.Bytes(), properties.UTF8); err != nil { + return nil, err + } + + // Create a map manually instead of using props.Map() to allow env var expansion + // as used by original Viper-based configparser.Parser. + parsed := make(map[string]interface{}, props.Len()) + for _, key := range props.Keys() { + value, _ := props.Get(key) + parsed[key] = value + } + prop := maps.Unflatten(parsed, ".") + + var cp *configparser.Parser + if cp, err = sfl.base.Get(); err != nil { + return nil, err + } + return cp, cp.MergeStringMap(prop) +} diff --git a/internal/otel_collector/service/parserprovider/testdata/otelcol-config.yaml b/internal/otel_collector/service/parserprovider/testdata/otelcol-config.yaml new file mode 100644 index 00000000000..0858017c1dc --- /dev/null +++ b/internal/otel_collector/service/parserprovider/testdata/otelcol-config.yaml @@ -0,0 +1,24 @@ +receivers: + otlp: + protocols: + grpc: + +exporters: + otlp: + endpoint: "locahost:55678" + +processors: + batch: + +extensions: + memory_ballast: + size_mib: 512 + zpages: + +service: + extensions: [memory_ballast, zpages] + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp] diff --git a/internal/otel_collector/service/service.go b/internal/otel_collector/service/service.go new file mode 100644 index 00000000000..de08cc615ff --- /dev/null +++ b/internal/otel_collector/service/service.go @@ -0,0 +1,232 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package service + +import ( + "context" + "fmt" + + "go.opentelemetry.io/contrib/zpages" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/service/internal/builder" +) + +// service represents the implementation of a component.Host. +type service struct { + factories component.Factories + buildInfo component.BuildInfo + config *config.Config + logger *zap.Logger + tracerProvider trace.TracerProvider + zPagesSpanProcessor *zpages.SpanProcessor + asyncErrorChannel chan error + + builtExporters builder.Exporters + builtReceivers builder.Receivers + builtPipelines builder.BuiltPipelines + builtExtensions builder.Extensions +} + +func newService(set *svcSettings) (*service, error) { + srv := &service{ + factories: set.Factories, + buildInfo: set.BuildInfo, + config: set.Config, + logger: set.Logger, + tracerProvider: set.TracerProvider, + zPagesSpanProcessor: set.ZPagesSpanProcessor, + asyncErrorChannel: set.AsyncErrorChannel, + } + + if err := srv.config.Validate(); err != nil { + return nil, fmt.Errorf("invalid configuration: %w", err) + } + + if err := srv.buildExtensions(); err != nil { + return nil, fmt.Errorf("cannot build extensions: %w", err) + } + + if err := srv.buildPipelines(); err != nil { + return nil, fmt.Errorf("cannot build pipelines: %w", err) + } + + return srv, nil +} + +func (srv *service) Start(ctx context.Context) error { + if err := srv.startExtensions(ctx); err != nil { + return fmt.Errorf("cannot setup extensions: %w", err) + } + + if err := srv.startPipelines(ctx); err != nil { + return fmt.Errorf("cannot setup pipelines: %w", err) + } + + return srv.builtExtensions.NotifyPipelineReady() +} + +func (srv *service) Shutdown(ctx context.Context) error { + // Accumulate errors and proceed with shutting down remaining components. + var errs []error + + if err := srv.builtExtensions.NotifyPipelineNotReady(); err != nil { + errs = append(errs, fmt.Errorf("failed to notify that pipeline is not ready: %w", err)) + } + + if err := srv.shutdownPipelines(ctx); err != nil { + errs = append(errs, fmt.Errorf("failed to shutdown pipelines: %w", err)) + } + + if err := srv.shutdownExtensions(ctx); err != nil { + errs = append(errs, fmt.Errorf("failed to shutdown extensions: %w", err)) + } + + return consumererror.Combine(errs) +} + +// ReportFatalError is used to report to the host that the receiver encountered +// a fatal error (i.e.: an error that the instance can't recover from) after +// its start function has already returned. +func (srv *service) ReportFatalError(err error) { + srv.asyncErrorChannel <- err +} + +func (srv *service) GetFactory(kind component.Kind, componentType config.Type) component.Factory { + switch kind { + case component.KindReceiver: + return srv.factories.Receivers[componentType] + case component.KindProcessor: + return srv.factories.Processors[componentType] + case component.KindExporter: + return srv.factories.Exporters[componentType] + case component.KindExtension: + return srv.factories.Extensions[componentType] + } + return nil +} + +func (srv *service) GetExtensions() map[config.ComponentID]component.Extension { + return srv.builtExtensions.ToMap() +} + +func (srv *service) GetExporters() map[config.DataType]map[config.ComponentID]component.Exporter { + return srv.builtExporters.ToMapByDataType() +} + +func (srv *service) buildExtensions() error { + var err error + srv.builtExtensions, err = builder.BuildExtensions(srv.logger, srv.tracerProvider, srv.buildInfo, srv.config, srv.factories.Extensions) + if err != nil { + return fmt.Errorf("cannot build builtExtensions: %w", err) + } + return nil +} + +func (srv *service) startExtensions(ctx context.Context) error { + srv.logger.Info("Starting extensions...") + err := srv.builtExtensions.StartAll(ctx, srv) + if err != nil { + return fmt.Errorf("failed to start extensions: %w", err) + } + return nil +} + +func (srv *service) shutdownExtensions(ctx context.Context) error { + srv.logger.Info("Stopping extensions...") + err := srv.builtExtensions.ShutdownAll(ctx) + if err != nil { + return fmt.Errorf("failed to shutdown extensions: %w", err) + } + return nil +} + +func (srv *service) buildPipelines() error { + // Pipeline is built backwards, starting from exporters, so that we create objects + // which are referenced before objects which reference them. + + // First create exporters. + var err error + srv.builtExporters, err = builder.BuildExporters(srv.logger, srv.tracerProvider, srv.buildInfo, srv.config, srv.factories.Exporters) + if err != nil { + return fmt.Errorf("cannot build builtExporters: %w", err) + } + + // Create pipelines and their processors and plug exporters to the + // end of the pipelines. + srv.builtPipelines, err = builder.BuildPipelines(srv.logger, srv.tracerProvider, srv.buildInfo, srv.config, srv.builtExporters, srv.factories.Processors) + if err != nil { + return fmt.Errorf("cannot build pipelines: %w", err) + } + + // Create receivers and plug them into the start of the pipelines. + srv.builtReceivers, err = builder.BuildReceivers(srv.logger, srv.tracerProvider, srv.buildInfo, srv.config, srv.builtPipelines, srv.factories.Receivers) + if err != nil { + return fmt.Errorf("cannot build receivers: %w", err) + } + + return nil +} + +func (srv *service) startPipelines(ctx context.Context) error { + srv.logger.Info("Starting exporters...") + if err := srv.builtExporters.StartAll(ctx, srv); err != nil { + return fmt.Errorf("cannot start builtExporters: %w", err) + } + + srv.logger.Info("Starting processors...") + if err := srv.builtPipelines.StartProcessors(ctx, srv); err != nil { + return fmt.Errorf("cannot start processors: %w", err) + } + + srv.logger.Info("Starting receivers...") + if err := srv.builtReceivers.StartAll(ctx, srv); err != nil { + return fmt.Errorf("cannot start receivers: %w", err) + } + + return nil +} + +func (srv *service) shutdownPipelines(ctx context.Context) error { + // Shutdown order is the reverse of building: first receivers, then flushing pipelines + // giving senders a chance to send all their data. This may take time, the allowed + // time should be part of configuration. + + var errs []error + + srv.logger.Info("Stopping receivers...") + err := srv.builtReceivers.ShutdownAll(ctx) + if err != nil { + errs = append(errs, fmt.Errorf("failed to stop receivers: %w", err)) + } + + srv.logger.Info("Stopping processors...") + err = srv.builtPipelines.ShutdownProcessors(ctx) + if err != nil { + errs = append(errs, fmt.Errorf("failed to shutdown processors: %w", err)) + } + + srv.logger.Info("Stopping exporters...") + err = srv.builtExporters.ShutdownAll(ctx) + if err != nil { + errs = append(errs, fmt.Errorf("failed to shutdown exporters: %w", err)) + } + + return consumererror.Combine(errs) +} diff --git a/internal/otel_collector/service/settings.go b/internal/otel_collector/service/settings.go new file mode 100644 index 00000000000..e105b38afe5 --- /dev/null +++ b/internal/otel_collector/service/settings.go @@ -0,0 +1,80 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package service + +import ( + "go.opentelemetry.io/contrib/zpages" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configunmarshaler" + "go.opentelemetry.io/collector/service/parserprovider" +) + +// svcSettings holds configuration for building a new service. +type svcSettings struct { + // Factories component factories. + Factories component.Factories + + // BuildInfo provides collector start information. + BuildInfo component.BuildInfo + + // Config represents the configuration of the service. + Config *config.Config + + // Logger represents the logger used for all the components. + Logger *zap.Logger + + // TracerProvider represents the TracerProvider used for all the components. + TracerProvider trace.TracerProvider + + // ZPagesSpanProcessor represents the SpanProcessor for tracez page. + ZPagesSpanProcessor *zpages.SpanProcessor + + // AsyncErrorChannel is the channel that is used to report fatal errors. + AsyncErrorChannel chan error +} + +// CollectorSettings holds configuration for creating a new Collector. +type CollectorSettings struct { + // Factories component factories. + Factories component.Factories + + // BuildInfo provides collector start information. + BuildInfo component.BuildInfo + + // DisableGracefulShutdown disables the automatic graceful shutdown + // of the collector on SIGINT or SIGTERM. + // Users who want to handle signals themselves can disable this behavior + // and manually handle the signals to shutdown the collector. + DisableGracefulShutdown bool + + // ParserProvider provides the configuration's Parser. + // If it is not provided a default provider is used. The default provider loads the configuration + // from a config file define by the --config command line flag and overrides component's configuration + // properties supplied via --set command line flag. + // If the provider is parserprovider.Watchable, collector + // may reload the configuration upon error. + ParserProvider parserprovider.ParserProvider + + // ConfigUnmarshaler unmarshalls the configuration's Parser into the service configuration. + // If it is not provided a default unmarshaler is used. + ConfigUnmarshaler configunmarshaler.ConfigUnmarshaler + + // LoggingOptions provides a way to change behavior of zap logging. + LoggingOptions []zap.Option +} diff --git a/internal/otel_collector/service/telemetry.go b/internal/otel_collector/service/telemetry.go new file mode 100644 index 00000000000..26214c9137e --- /dev/null +++ b/internal/otel_collector/service/telemetry.go @@ -0,0 +1,138 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package service + +import ( + "net/http" + "strings" + "unicode" + + "contrib.go.opencensus.io/exporter/prometheus" + "github.com/google/uuid" + "go.opencensus.io/stats/view" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/internal/collector/telemetry" + "go.opentelemetry.io/collector/internal/obsreportconfig" + semconv "go.opentelemetry.io/collector/model/semconv/v1.5.0" + "go.opentelemetry.io/collector/processor/batchprocessor" + telemetry2 "go.opentelemetry.io/collector/service/internal/telemetry" +) + +// collectorTelemetry is collector's own telemetry. +var collectorTelemetry collectorTelemetryExporter = &colTelemetry{} + +type collectorTelemetryExporter interface { + init(asyncErrorChannel chan<- error, ballastSizeBytes uint64, logger *zap.Logger) error + shutdown() error +} + +type colTelemetry struct { + views []*view.View + server *http.Server +} + +func (tel *colTelemetry) init(asyncErrorChannel chan<- error, ballastSizeBytes uint64, logger *zap.Logger) error { + level := configtelemetry.GetMetricsLevelFlagValue() + metricsAddr := telemetry.GetMetricsAddr() + + if level == configtelemetry.LevelNone || metricsAddr == "" { + return nil + } + + processMetricsViews, err := telemetry2.NewProcessMetricsViews(ballastSizeBytes) + if err != nil { + return err + } + + var views []*view.View + obsMetrics := obsreportconfig.Configure(level) + views = append(views, batchprocessor.MetricViews()...) + views = append(views, obsMetrics.Views...) + views = append(views, processMetricsViews.Views()...) + + tel.views = views + if err = view.Register(views...); err != nil { + return err + } + + processMetricsViews.StartCollection() + + // Until we can use a generic metrics exporter, default to Prometheus. + opts := prometheus.Options{ + Namespace: telemetry.GetMetricsPrefix(), + } + + var instanceID string + if telemetry.GetAddInstanceID() { + instanceUUID, _ := uuid.NewRandom() + instanceID = instanceUUID.String() + opts.ConstLabels = map[string]string{ + sanitizePrometheusKey(semconv.AttributeServiceInstanceID): instanceID, + } + } + + pe, err := prometheus.NewExporter(opts) + if err != nil { + return err + } + + view.RegisterExporter(pe) + + logger.Info( + "Serving Prometheus metrics", + zap.String("address", metricsAddr), + zap.Int8("level", int8(level)), // TODO: make it human friendly + zap.String(semconv.AttributeServiceInstanceID, instanceID), + ) + + mux := http.NewServeMux() + mux.Handle("/metrics", pe) + + tel.server = &http.Server{ + Addr: metricsAddr, + Handler: mux, + } + + go func() { + serveErr := tel.server.ListenAndServe() + if serveErr != nil && serveErr != http.ErrServerClosed { + asyncErrorChannel <- serveErr + } + }() + + return nil +} + +func (tel *colTelemetry) shutdown() error { + view.Unregister(tel.views...) + + if tel.server != nil { + return tel.server.Close() + } + + return nil +} + +func sanitizePrometheusKey(str string) string { + runeFilterMap := func(r rune) rune { + if unicode.IsDigit(r) || unicode.IsLetter(r) || r == '_' { + return r + } + return '_' + } + return strings.Map(runeFilterMap, str) +} diff --git a/internal/otel_collector/service/testdata/otelcol-config-minimal.yaml b/internal/otel_collector/service/testdata/otelcol-config-minimal.yaml new file mode 100644 index 00000000000..5cc350383c4 --- /dev/null +++ b/internal/otel_collector/service/testdata/otelcol-config-minimal.yaml @@ -0,0 +1,14 @@ +receivers: + otlp: + protocols: + grpc: + +exporters: + otlp: + endpoint: "locahost:14250" + +service: + pipelines: + traces: + receivers: [otlp] + exporters: [otlp] diff --git a/internal/otel_collector/service/testdata/otelcol-config.yaml b/internal/otel_collector/service/testdata/otelcol-config.yaml new file mode 100644 index 00000000000..dfd725639fa --- /dev/null +++ b/internal/otel_collector/service/testdata/otelcol-config.yaml @@ -0,0 +1,22 @@ +receivers: + otlp: + protocols: + grpc: + +exporters: + otlp: + endpoint: "locahost:55678" + +processors: + batch: + +extensions: + zpages: + +service: + extensions: [zpages] + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp] diff --git a/internal/otel_collector/service/testdata/otelcol-nop.yaml b/internal/otel_collector/service/testdata/otelcol-nop.yaml new file mode 100644 index 00000000000..b4f799a782b --- /dev/null +++ b/internal/otel_collector/service/testdata/otelcol-nop.yaml @@ -0,0 +1,27 @@ +receivers: + nop: + +processors: + nop: + +exporters: + nop: + +extensions: + nop: + +service: + extensions: [nop] + pipelines: + traces: + receivers: [nop] + processors: [nop] + exporters: [nop] + metrics: + receivers: [nop] + processors: [nop] + exporters: [nop] + logs: + receivers: [nop] + processors: [nop] + exporters: [nop] diff --git a/internal/otel_collector/service/zpages.go b/internal/otel_collector/service/zpages.go new file mode 100644 index 00000000000..928e239d855 --- /dev/null +++ b/internal/otel_collector/service/zpages.go @@ -0,0 +1,157 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package service + +import ( + "net/http" + "path" + "sort" + + otelzpages "go.opentelemetry.io/contrib/zpages" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/internal/version" + "go.opentelemetry.io/collector/service/internal/zpages" +) + +const ( + tracezPath = "tracez" + servicezPath = "servicez" + pipelinezPath = "pipelinez" + extensionzPath = "extensionz" + + zPipelineName = "zpipelinename" + zComponentName = "zcomponentname" + zComponentKind = "zcomponentkind" + zExtensionName = "zextensionname" +) + +func (srv *service) RegisterZPages(mux *http.ServeMux, pathPrefix string) { + mux.Handle(path.Join(pathPrefix, tracezPath), otelzpages.NewTracezHandler(srv.zPagesSpanProcessor)) + mux.HandleFunc(path.Join(pathPrefix, servicezPath), srv.handleServicezRequest) + mux.HandleFunc(path.Join(pathPrefix, pipelinezPath), srv.handlePipelinezRequest) + mux.HandleFunc(path.Join(pathPrefix, extensionzPath), func(w http.ResponseWriter, r *http.Request) { + handleExtensionzRequest(srv, w, r) + }) +} + +func (srv *service) handleServicezRequest(w http.ResponseWriter, r *http.Request) { + r.ParseForm() // nolint:errcheck + w.Header().Set("Content-Type", "text/html; charset=utf-8") + zpages.WriteHTMLHeader(w, zpages.HeaderData{Title: "service"}) + zpages.WriteHTMLComponentHeader(w, zpages.ComponentHeaderData{ + Name: "Pipelines", + ComponentEndpoint: pipelinezPath, + Link: true, + }) + zpages.WriteHTMLComponentHeader(w, zpages.ComponentHeaderData{ + Name: "Extensions", + ComponentEndpoint: extensionzPath, + Link: true, + }) + zpages.WriteHTMLPropertiesTable(w, zpages.PropertiesTableData{Name: "Build And Runtime", Properties: version.RuntimeVar()}) + zpages.WriteHTMLFooter(w) +} + +func (srv *service) handlePipelinezRequest(w http.ResponseWriter, r *http.Request) { + r.ParseForm() // nolint:errcheck + w.Header().Set("Content-Type", "text/html; charset=utf-8") + pipelineName := r.Form.Get(zPipelineName) + componentName := r.Form.Get(zComponentName) + componentKind := r.Form.Get(zComponentKind) + zpages.WriteHTMLHeader(w, zpages.HeaderData{Title: "Pipelines"}) + zpages.WriteHTMLPipelinesSummaryTable(w, srv.getPipelinesSummaryTableData()) + if pipelineName != "" && componentName != "" && componentKind != "" { + fullName := componentName + if componentKind == "processor" { + fullName = pipelineName + "/" + componentName + } + zpages.WriteHTMLComponentHeader(w, zpages.ComponentHeaderData{ + Name: componentKind + ": " + fullName, + }) + // TODO: Add config + status info. + } + zpages.WriteHTMLFooter(w) +} + +func (srv *service) getPipelinesSummaryTableData() zpages.SummaryPipelinesTableData { + data := zpages.SummaryPipelinesTableData{ + ComponentEndpoint: pipelinezPath, + } + + data.Rows = make([]zpages.SummaryPipelinesTableRowData, 0, len(srv.builtPipelines)) + for c, p := range srv.builtPipelines { + // TODO: Change the template to use ID. + var recvs []string + for _, recvID := range c.Receivers { + recvs = append(recvs, recvID.String()) + } + var procs []string + for _, procID := range c.Processors { + procs = append(procs, procID.String()) + } + var exps []string + for _, expID := range c.Exporters { + exps = append(exps, expID.String()) + } + row := zpages.SummaryPipelinesTableRowData{ + FullName: c.Name, + InputType: string(c.InputType), + MutatesData: p.MutatesData, + Receivers: recvs, + Processors: procs, + Exporters: exps, + } + data.Rows = append(data.Rows, row) + } + + sort.Slice(data.Rows, func(i, j int) bool { + return data.Rows[i].FullName < data.Rows[j].FullName + }) + return data +} + +func handleExtensionzRequest(host component.Host, w http.ResponseWriter, r *http.Request) { + r.ParseForm() // nolint:errcheck + w.Header().Set("Content-Type", "text/html; charset=utf-8") + extensionName := r.Form.Get(zExtensionName) + zpages.WriteHTMLHeader(w, zpages.HeaderData{Title: "Extensions"}) + zpages.WriteHTMLExtensionsSummaryTable(w, getExtensionsSummaryTableData(host)) + if extensionName != "" { + zpages.WriteHTMLComponentHeader(w, zpages.ComponentHeaderData{ + Name: extensionName, + }) + // TODO: Add config + status info. + } + zpages.WriteHTMLFooter(w) +} + +func getExtensionsSummaryTableData(host component.Host) zpages.SummaryExtensionsTableData { + data := zpages.SummaryExtensionsTableData{ + ComponentEndpoint: extensionzPath, + } + + extensions := host.GetExtensions() + data.Rows = make([]zpages.SummaryExtensionsTableRowData, 0, len(extensions)) + for c := range extensions { + row := zpages.SummaryExtensionsTableRowData{FullName: c.Name()} + data.Rows = append(data.Rows, row) + } + + sort.Slice(data.Rows, func(i, j int) bool { + return data.Rows[i].FullName < data.Rows[j].FullName + }) + return data +} diff --git a/internal/otel_collector/website_docs/_index.md b/internal/otel_collector/website_docs/_index.md new file mode 100644 index 00000000000..bf7dcaaef23 --- /dev/null +++ b/internal/otel_collector/website_docs/_index.md @@ -0,0 +1,29 @@ +--- +title: Collector +weight: 10 +description: >- + Collector logo + Vendor-agnostic way to receive, process and export telemetry data. +cascade: + github_repo: &repo https://github.com/open-telemetry/opentelemetry-collector + github_subdir: website_docs + path_base_for_github_subdir: content/en/docs/collector/ + github_project_repo: *repo +--- + +Otel-Collector diagram with Jaeger, OTLP and Prometheus integration + +The OpenTelemetry Collector offers a vendor-agnostic implementation on how to +receive, process and export telemetry data. It removes the need to run, +operate, and maintain multiple agents/collectors. This works with improved scalability and supports +open-source observability data formats (e.g. Jaeger, Prometheus, Fluent Bit, +etc.) sending to one or more open-source or commercial back-ends. The Collector +is the default location instrumentation libraries export their telemetry data. + +Objectives: + +- *Usability*: Reasonable default configuration, supports popular protocols, runs and collects out of the box. +- *Performance*: Highly stable and performant under varying loads and configurations. +- *Observability*: An exemplar of an observable service. +- *Extensibility*: Customizable without touching the core code. +- *Unification*: Single codebase, deployable as an agent or collector with support for traces, metrics, and logs (future). diff --git a/internal/otel_collector/website_docs/configuration.md b/internal/otel_collector/website_docs/configuration.md new file mode 100644 index 00000000000..d2209bcf29c --- /dev/null +++ b/internal/otel_collector/website_docs/configuration.md @@ -0,0 +1,453 @@ +--- +title: "Configuration" +weight: 20 +--- + +Please be sure to review the following documentation: + +- [Data Collection concepts](../../concepts/data-collection) in order to + understand the repositories applicable to the OpenTelemetry Collector. +- [Security + guidance](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security.md) + +## Basics + +The Collector consists of three components that access telemetry data: + +- +[Receivers](#receivers) +- +[Processors](#processors) +- +[Exporters](#exporters) + +These components once configured must be enabled via pipelines within the +[service](#service) section. + +Secondarily, there are [extensions](#extensions), which provide capabilities +that can be added to the Collector, but which do not require direct access to +telemetry data and are not part of pipelines. They are also enabled within the +[service](#service) section. + +An example configuration would look like: + +```yaml +receivers: + otlp: + protocols: + grpc: + http: + +processors: + batch: + +exporters: + otlp: + endpoint: otelcol:4317 + +extensions: + health_check: + pprof: + zpages: + +service: + extensions: [health_check,pprof,zpages] + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + metrics: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + logs: + receivers: [otlp] + processors: [batch] + exporters: [otlp] +``` + +Note that the same receiver, processor, exporter and/or pipeline can be defined +more than once. For example: + +```yaml +receivers: + otlp: + protocols: + grpc: + http: + otlp/2: + protocols: + grpc: + endpoint: 0.0.0.0:55690 + +processors: + batch: + batch/test: + +exporters: + otlp: + endpoint: otelcol:4317 + otlp/2: + endpoint: otelcol2:4317 + +extensions: + health_check: + pprof: + zpages: + +service: + extensions: [health_check,pprof,zpages] + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + traces/2: + receivers: [otlp/2] + processors: [batch/test] + exporters: [otlp/2] + metrics: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + logs: + receivers: [otlp] + processors: [batch] + exporters: [otlp] +``` + +## Receivers + + + + +A receiver, which can be push or pull based, is how data gets into the +Collector. Receivers may support one or more [data +sources](../../concepts/data-sources). + + +The `receivers:` section is how receivers are configured. Many receivers come +with default settings so simply specifying the name of the receiver is enough +to configure it (for example, `zipkin:`). If configuration is required or a +user wants to change the default configuration then such configuration must be +defined in this section. Configuration parameters specified for which the +receiver provides a default configuration are overridden. + +> Configuring a receiver does not enable it. Receivers are enabled via +> pipelines within the [service](#service) section. + +One or more receivers must be configured. By default, no receivers +are configured. A basic example of all available receivers is provided below. + +> For detailed receiver configuration, please see the [receiver +README.md](https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/README.md). + +```yaml +receivers: + # Data sources: logs + fluentforward: + listenAddress: 0.0.0.0:8006 + + # Data sources: metrics + hostmetrics: + scrapers: + cpu: + disk: + filesystem: + load: + memory: + network: + process: + processes: + swap: + + # Data sources: traces + jaeger: + protocols: + grpc: + thrift_binary: + thrift_compact: + thrift_http: + + # Data sources: traces + kafka: + protocol_version: 2.0.0 + + # Data sources: traces, metrics + opencensus: + + # Data sources: traces, metrics, logs + otlp: + protocols: + grpc: + http: + + # Data sources: metrics + prometheus: + config: + scrape_configs: + - job_name: "otel-collector" + scrape_interval: 5s + static_configs: + - targets: ["localhost:8888"] + + # Data sources: traces + zipkin: +``` + +## Processors + + + +Processors are run on data between being received and being exported. +Processors are optional though [some are +recommended](https://github.com/open-telemetry/opentelemetry-collector/tree/main/processor#recommended-processors). + +The `processors:` section is how processors are configured. Processors may come +with default settings, but many require configuration. Any configuration for a +processor must be done in this section. Configuration parameters specified for +which the processor provides a default configuration are overridden. + +> Configuring a processor does not enable it. Processors are enabled via +> pipelines within the [service](#service) section. + +A basic example of all available processors is provided below. + +> For detailed processor configuration, please see the [processor +README.md](https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/README.md). + +```yaml +processors: + # Data sources: traces + attributes: + actions: + - key: environment + value: production + action: insert + - key: db.statement + action: delete + - key: email + action: hash + + # Data sources: traces, metrics, logs + batch: + + # Data sources: metrics + filter: + metrics: + include: + match_type: regexp + metric_names: + - prefix/.* + - prefix_.* + + # Data sources: traces, metrics, logs + memory_limiter: + ballast_size_mib: 2000 + check_interval: 5s + limit_mib: 4000 + spike_limit_mib: 500 + + # Data sources: traces + resource: + attributes: + - key: cloud.zone + value: "zone-1" + action: upsert + - key: k8s.cluster.name + from_attribute: k8s-cluster + action: insert + - key: redundant-attribute + action: delete + + # Data sources: traces + probabilistic_sampler: + hash_seed: 22 + sampling_percentage: 15 + + # Data sources: traces + span: + name: + to_attributes: + rules: + - ^\/api\/v1\/document\/(?P.*)\/update$ + from_attributes: ["db.svc", "operation"] + separator: "::" +``` + +## Exporters + + + +An exporter, which can be push or pull based, is how you send data to one or +more backends/destinations. Exporters may support one or more [data +sources](../../concepts/data-sources). + + +The `exporters:` section is how exporters are configured. Exporters may come +with default settings, but many require configuration to specify at least the +destination and security settings. Any configuration for an exporter must be +done in this section. Configuration parameters specified for which the exporter +provides a default configuration are overridden. + +> Configuring an exporter does not enable it. Exporters are enabled via +> pipelines within the [service](#service) section. + +One or more exporters must be configured. By default, no exporters +are configured. A basic example of all available exporters is provided below. + +> For detailed exporter configuration, please see the [exporter +README.md](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/README.md). + +```yaml +exporters: + # Data sources: traces, metrics, logs + file: + path: ./filename.json + + # Data sources: traces + jaeger: + endpoint: "http://jaeger-all-in-one:14250" + insecure: true + + # Data sources: traces + kafka: + protocol_version: 2.0.0 + + # Data sources: traces, metrics, logs + logging: + loglevel: debug + + # Data sources: traces, metrics + opencensus: + endpoint: "otelcol2:55678" + + # Data sources: traces, metrics, logs + otlp: + endpoint: otelcol2:4317 + insecure: true + + # Data sources: traces, metrics + otlphttp: + endpoint: https://example.com:4318/v1/traces + + # Data sources: metrics + prometheus: + endpoint: "prometheus:8889" + namespace: "default" + + # Data sources: metrics + prometheusremotewrite: + endpoint: "http://some.url:9411/api/prom/push" + + # Data sources: traces + zipkin: + endpoint: "http://localhost:9411/api/v2/spans" +``` + +## Extensions + +Extensions are available primarily for tasks that do not involve processing telemetry +data. Examples of extensions include health monitoring, service discovery, and +data forwarding. Extensions are optional. + +The `extensions:` section is how extensions are configured. Many extensions +come with default settings so simply specifying the name of the extension is +enough to configure it (for example, `health_check:`). If configuration is +required or a user wants to change the default configuration then such +configuration must be defined in this section. Configuration parameters +specified for which the extension provides a default configuration are +overridden. + +> Configuring an extension does not enable it. Extensions are enabled within +> the [service](#service) section. + +By default, no extensions are configured. A basic example of all available +extensions is provided below. + +> For detailed extension configuration, please see the [extension +README.md](https://github.com/open-telemetry/opentelemetry-collector/blob/main/extension/README.md). + +```yaml +extensions: + health_check: + pprof: + zpages: +``` + +## Service + +The service section is used to configure what components are enabled in the +Collector based on the configuration found in the receivers, processors, +exporters, and extensions sections. If a component is configured, but not +defined within the service section then it is not enabled. The service section +consists of two sub-sections: + +- extensions +- pipelines + +Extensions consist of a list of all extensions to enable. For example: + +```yaml + service: + extensions: [health_check, pprof, zpages] +``` + +Pipelines can be of the following types: + +- traces: collects and processes trace data. +- metrics: collects and processes metric data. +- logs: collects and processes log data. + +A pipeline consists of a set of receivers, processors and exporters. Each +receiver/processor/exporter must be defined in the configuration outside of the +service section to be included in a pipeline. + +*Note:* Each receiver/processor/exporter can be used in more than one pipeline. +For processor(s) referenced in multiple pipelines, each pipeline will get a +separate instance of that processor(s). This is in contrast to +receiver(s)/exporter(s) referenced in multiple pipelines, where only one +instance of a receiver/exporter is used for all pipelines. Also note that the +order of processors dictates the order in which data is processed. + +The following is an example pipeline configuration: + +```yaml +service: + pipelines: + metrics: + receivers: [opencensus, prometheus] + exporters: [opencensus, prometheus] + traces: + receivers: [opencensus, jaeger] + processors: [batch] + exporters: [opencensus, zipkin] +``` + +## Other Information + +### Configuration Environment Variables + +The use and expansion of environment variables is supported in the Collector +configuration. For example: + +```yaml +processors: + attributes/example: + actions: + - key: "${DB_KEY}" + action: "${OPERATION}" +``` + +### Proxy Support + +Exporters that leverage the net/http package (all do today) respect the +following proxy environment variables: + +- HTTP_PROXY +- HTTPS_PROXY +- NO_PROXY + +If set at Collector start time then exporters, regardless of protocol, will or +will not proxy traffic as defined by these environment variables. diff --git a/internal/otel_collector/website_docs/getting-started.md b/internal/otel_collector/website_docs/getting-started.md new file mode 100644 index 00000000000..20d5b65c9e5 --- /dev/null +++ b/internal/otel_collector/website_docs/getting-started.md @@ -0,0 +1,188 @@ +--- +title: "Getting Started" +weight: 1 +--- + +Please be sure to review the [Data Collection +documentation](../../concepts/data-collection) in order to understand the +deployment models, components, and repositories applicable to the OpenTelemetry +Collector. + +## Deployment + +The OpenTelemetry Collector consists of a single binary and two primary deployment methods: + +- **Agent:** A Collector instance running with the application or on the same + host as the application (e.g. binary, sidecar, or daemonset). +- **Gateway:** One or more Collector instances running as a standalone service + (e.g. container or deployment) typically per cluster, datacenter or region. + +### Agent + +It is recommended to deploy the Agent on every host within an environment. In +doing so, the Agent is capable of receiving telemetry data (push and pull +based) as well as enhancing telemetry data with metadata such as custom tags or +infrastructure information. In addition, the Agent can offload responsibilities +that client instrumentation would otherwise need to handle including batching, +retry, encryption, compression and more. OpenTelemetry instrumentation +libraries by default export their data assuming a locally running Collector is +available. + +### Gateway + +Additionally, a Gateway cluster can be deployed in every cluster, datacenter, +or region. A Gateway cluster runs as a standalone service and can offer +advanced capabilities over the Agent including tail-based sampling. In +addition, a Gateway cluster can limit the number of egress points required to +send data as well as consolidate API token management. Each Collector instance +in a Gateway cluster operates independently so it is easy to scale the +architecture based on performance needs with a simple load balancer. If a +gateway cluster is deployed, it usually receives data from Agents deployed +within an environment. + +## Getting Started + +### Demo + +Deploys a load generator, agent and gateway as well as Jaeger, Zipkin and +Prometheus back-ends. More information can be found on the demo +[README.md](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/examples/demo) + +```bash +$ git clone git@github.com:open-telemetry/opentelemetry-collector-contrib.git; \ + cd opentelemetry-collector-contrib/examples/demo; \ + docker-compose up -d +``` + +### Docker + +Every release of the Collector is published to Docker Hub and comes with a +default configuration file. + +```bash +$ docker run otel/opentelemetry-collector +``` + +In addition, you can use the local example provided. This example starts a +Docker container of the +[core](https://github.com/open-telemetry/opentelemetry-collector) version of +the Collector with all receivers enabled and exports all the data it receives +locally to a file. Data is sent to the container and the container scrapes its +own Prometheus metrics. + +```bash +$ git clone git@github.com:open-telemetry/opentelemetry-collector.git; \ + cd opentelemetry-collector/examples; \ + go build main.go; ./main & pid1="$!"; + docker run --rm -p 13133:13133 -p 14250:14250 -p 14268:14268 \ + -p 55678-55679:55678-55679 -p 4317:4317 -p 8888:8888 -p 9411:9411 \ + -v "${PWD}/otel-local-config.yaml":/otel-local-config.yaml \ + --name otelcol otel/opentelemetry-collector \ + --config otel-local-config.yaml; \ + kill $pid1; docker stop otelcol +``` + +### Kubernetes + +Deploys an agent as a daemonset and a single gateway instance. + +```bash +$ kubectl apply -f https://raw.githubusercontent.com/open-telemetry/opentelemetry-collector/main/examples/k8s/otel-config.yaml +``` + +The example above is meant to serve as a starting point, to be extended and +customized before actual production usage. + +The [OpenTelemetry +Operator](https://github.com/open-telemetry/opentelemetry-operator) can also be +used to provision and maintain an OpenTelemetry Collector instance, with +features such as automatic upgrade handling, `Service` configuration based on +the OpenTelemetry configuration, automatic sidecar injection into deployments, +among others. + +### Nomad + +Reference job files to deploy the Collector as an agent, gateway and in the +full demo can be found at +[https://github.com/hashicorp/nomad-open-telemetry-getting-started](https://github.com/hashicorp/nomad-open-telemetry-getting-started). + +### Linux Packaging + +Every Collector release includes DEB and RPM packaging for Linux amd64/arm64 +systems. The packaging includes a default configuration that can be found at +`/etc/otel-collector/config.yaml` post-installation. + +> Please note that systemd is require for automatic service configuration + +To get started on Debian systems run the following replacing `v0.20.0` with the +version of the Collector you wish to run and `amd64` with the appropriate +architecture. + +```bash +$ sudo apt-get update +$ sudo apt-get -y install wget systemctl +$ wget https://github.com/open-telemetry/opentelemetry-collector/releases/download/v0.20.0/otel-collector_0.20.0_amd64.deb +$ dpkg -i otel-collector_0.20.0_amd64.deb +``` + +To get started on Red Hat systems run the following replacing `v0.20.0` with the +version of the Collector you wish to run and `x86_64` with the appropriate +architecture. + +```bash +$ sudo yum update +$ sudo yum -y install wget systemctl +$ wget https://github.com/open-telemetry/opentelemetry-collector/releases/download/v0.20.0/otel-collector_0.20.0-1_x86_64.rpm +$ rpm -ivh otel-collector_0.20.0-1_x86_64.rpm +``` + +By default, the `otel-collector` systemd service will be started with the +`--config=/etc/otel-collector/config.yaml` option after installation. To +customize these options, modify the `OTELCOL_OPTIONS` variable in the +`/etc/otel-collector/otel-collector.conf` systemd environment file with the +appropriate command-line options (run `/usr/bin/otelcol --help` to see all +available options). Additional environment variables can also be passed to the +`otel-collector` service by adding them to this file. + +If either the Collector configuration file or +`/etc/otel-collector/otel-collector.conf` are modified, restart the +`otel-collector` service to apply the changes by running: + +```bash +$ sudo systemctl restart otel-collector +``` + +To check the output from the `otel-collector` service, run: + +```bash +$ sudo journalctl -u otel-collector +``` + +### Windows Packaging + +Every Collector release includes EXE and MSI packaging for Windows amd64 systems. +The MSI packaging includes a default configuration that can be found at +`\Program Files\OpenTelemetry Collector\config.yaml`. + +> Please note the Collector service is not automatically started + +The easiest way to get started is to double-click the MSI package and follow +the wizard. Silent installation is also available. + +### Local + +Builds the latest version of the collector based on the local operating system, +runs the binary with all receivers enabled and exports all the data it receives +locally to a file. Data is sent to the container and the container scrapes its own +Prometheus metrics. + +```bash +$ git clone git@github.com:open-telemetry/opentelemetry-collector-contrib.git; \ + cd opentelemetry-collector-contrib/examples/demo; \ + go build client/main.go; ./client/main & pid1="$!"; \ + go build server/main.go; ./server/main & pid2="$!"; \ + +$ git clone git@github.com:open-telemetry/opentelemetry-collector.git; \ + cd opentelemetry-collector; make install-tools; make otelcol; \ + ./bin/otelcol_$(go env GOOS)_$(go env GOARCH) --config ./examples/local/otel-config.yaml; kill $pid1; kill $pid2 +``` diff --git a/kibana/connecting_client.go b/kibana/connecting_client.go index 3558f58b6b3..b7d59ed12c0 100644 --- a/kibana/connecting_client.go +++ b/kibana/connecting_client.go @@ -19,6 +19,7 @@ package kibana import ( "context" + "encoding/base64" "errors" "io" "net/http" @@ -26,6 +27,8 @@ import ( "sync" "time" + "github.com/elastic/apm-server/beater/config" + "go.elastic.co/apm" "go.elastic.co/apm/module/apmhttp" @@ -58,12 +61,12 @@ type Client interface { type ConnectingClient struct { m sync.RWMutex client *kibana.Client - cfg *kibana.ClientConfig + cfg *config.KibanaConfig } // NewConnectingClient returns instance of ConnectingClient and starts a background routine trying to connect // to configured Kibana instance, using JitterBackoff for establishing connection. -func NewConnectingClient(cfg *kibana.ClientConfig) Client { +func NewConnectingClient(cfg *config.KibanaConfig) Client { c := &ConnectingClient{cfg: cfg} go func() { log := logp.NewLogger(logs.Kibana) @@ -124,7 +127,7 @@ func (c *ConnectingClient) SupportsVersion(ctx context.Context, v *common.Versio if !retry || upToDate { return upToDate, nil } - client, err := kibana.NewClientWithConfig(c.cfg) + client, err := kibana.NewClientWithConfig(c.clientConfig(), "apm-server") if err != nil { log.Errorf("failed to obtain connection to Kibana: %s", err.Error()) return upToDate, err @@ -145,11 +148,23 @@ func (c *ConnectingClient) connect() error { if c.client != nil { return nil } - client, err := kibana.NewClientWithConfig(c.cfg) + client, err := kibana.NewClientWithConfig(c.clientConfig(), "apm-server") if err != nil { return err } + if c.cfg.APIKey != "" { + client.Headers["Authorization"] = []string{"ApiKey " + base64.StdEncoding.EncodeToString([]byte(c.cfg.APIKey))} + client.Username = "" + client.Password = "" + } client.HTTP = apmhttp.WrapClient(client.HTTP) c.client = client return nil } + +func (c *ConnectingClient) clientConfig() *kibana.ClientConfig { + if c != nil && c.cfg != nil { + return &c.cfg.ClientConfig + } + return nil +} diff --git a/kibana/connecting_client_test.go b/kibana/connecting_client_test.go index fdd3de86d91..56e7264d2c9 100644 --- a/kibana/connecting_client_test.go +++ b/kibana/connecting_client_test.go @@ -21,15 +21,16 @@ import ( "context" "io/ioutil" "net/http" + "strings" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/elastic/apm-server/beater/config" + "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/kibana" - - "github.com/elastic/apm-server/convert" ) func TestNewConnectingClientFrom(t *testing.T) { @@ -39,6 +40,28 @@ func TestNewConnectingClientFrom(t *testing.T) { assert.Equal(t, mockCfg, c.(*ConnectingClient).cfg) } +func TestNewConnectingClientWithAPIKey(t *testing.T) { + cfg := &config.KibanaConfig{ + Enabled: true, + APIKey: "foo-id:bar-apikey", + ClientConfig: kibana.ClientConfig{ + Host: "localhost:5601", + Username: "elastic", + Password: "secret", + IgnoreVersion: true, + }, + } + conn := &ConnectingClient{cfg: cfg} + require.NotNil(t, conn) + err := conn.connect() + require.NoError(t, err) + client := conn.client + require.NotNil(t, client) + assert.Equal(t, "", client.Username) + assert.Equal(t, "", client.Password) + assert.Equal(t, "ApiKey Zm9vLWlkOmJhci1hcGlrZXk=", client.Headers.Get("Authorization")) +} + func TestConnectingClient_Send(t *testing.T) { t.Run("Send", func(t *testing.T) { c := mockClient() @@ -102,10 +125,13 @@ type rt struct { } var ( - mockCfg = &kibana.ClientConfig{ - Host: "non-existing", + mockCfg = &config.KibanaConfig{ + Enabled: true, + ClientConfig: kibana.ClientConfig{ + Host: "non-existing", + }, } - mockBody = ioutil.NopCloser(convert.ToReader(`{"response": "ok"}`)) + mockBody = ioutil.NopCloser(strings.NewReader(`{"response": "ok"}`)) mockStatus = http.StatusOK mockVersion = *common.MustNewVersion("7.3.0") ) diff --git a/kibana/kibanatest/kibana.go b/kibana/kibanatest/kibana.go new file mode 100644 index 00000000000..445f8a3504f --- /dev/null +++ b/kibana/kibanatest/kibana.go @@ -0,0 +1,78 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package kibanatest + +import ( + "bytes" + "context" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" + + "github.com/pkg/errors" + + "github.com/elastic/apm-server/kibana" + + "github.com/elastic/beats/v7/libbeat/common" +) + +// MockKibanaClient implements the kibana.Client interface for testing purposes +type MockKibanaClient struct { + code int + body map[string]interface{} + v common.Version + connected bool +} + +// Send returns a mock http.Response based on parameters used to init the MockKibanaClient instance +func (c *MockKibanaClient) Send( + _ context.Context, + method, extraPath string, params url.Values, + headers http.Header, + body io.Reader, +) (*http.Response, error) { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(c.body); err != nil { + panic(err) + } + resp := http.Response{StatusCode: c.code, Body: ioutil.NopCloser(&buf)} + if resp.StatusCode == http.StatusBadGateway { + return nil, errors.New("testerror") + } + return &resp, nil +} + +// GetVersion returns a mock version based on parameters used to init the MockKibanaClient instance +func (c *MockKibanaClient) GetVersion(context.Context) (common.Version, error) { + return c.v, nil +} + +// SupportsVersion returns whether or not mock client is compatible with given version +func (c *MockKibanaClient) SupportsVersion(_ context.Context, v *common.Version, _ bool) (bool, error) { + if !c.connected { + return false, errors.New("unable to retrieve connection to Kibana") + } + return v.LessThanOrEqual(true, &c.v), nil +} + +// MockKibana provides a fake connection for unit tests +func MockKibana(respCode int, respBody map[string]interface{}, v common.Version, connected bool) kibana.Client { + return &MockKibanaClient{code: respCode, body: respBody, v: v, connected: connected} +} diff --git a/kibana/send_config.go b/kibana/send_config.go new file mode 100644 index 00000000000..094d8682cda --- /dev/null +++ b/kibana/send_config.go @@ -0,0 +1,133 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package kibana + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "strings" + "time" + + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/go-ucfg" +) + +const kibanaConfigUploadPath = "/api/apm/fleet/apm_server_schema" + +// SendConfig marshals and uploads the provided config to kibana using the +// provided ConnectingClient. It retries until its context has been canceled or +// the upload succeeds. +func SendConfig(ctx context.Context, client Client, conf *ucfg.Config) error { + // configuration options are already flattened (dotted) + // any credentials for ES and Kibana are removed + flat, err := flattenAndClean(conf) + if err != nil { + return err + } + + b, err := json.Marshal(format(flat)) + if err != nil { + return err + } + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + resp, err := client.Send(ctx, http.MethodPost, kibanaConfigUploadPath, nil, nil, bytes.NewReader(b)) + if err != nil { + if errors.Is(err, errNotConnected) { + // Not connected to kibana, wait and try again. + time.Sleep(15 * time.Second) + continue + } + + // Are there other kinds of recoverable errors? + return err + } + // TODO: What sort of response will we get? + if resp.StatusCode > http.StatusOK { + return fmt.Errorf("bad response %s", resp.Status) + } + + return nil + } +} + +func format(m map[string]interface{}) map[string]interface{} { + return map[string]interface{}{"schema": m} +} + +func flattenAndClean(conf *ucfg.Config) (map[string]interface{}, error) { + m := common.MapStr{} + if err := conf.Unpack(m); err != nil { + return nil, err + } + flat := m.Flatten() + out := make(common.MapStr, len(flat)) + for k, v := range flat { + // remove if elasticsearch is NOT in the front position? + // *.elasticsearch.* according to axw + if strings.Contains(k, "elasticsearch") { + continue + } + if strings.Contains(k, "kibana") { + continue + } + if strings.HasPrefix(k, "instrumentation") { + continue + } + if strings.HasPrefix(k, "logging.") { + switch k[8:] { + case "level", "selectors", "metrics.enabled", "metrics.period": + default: + continue + } + } + if strings.HasPrefix(k, "path") { + continue + } + if k == "gc_percent" || k == "name" || k == "xpack.monitoring.enabled" { + continue + } + if k == "apm-server.host" { + v = "0.0.0.0:8200" + } + if k == "apm-server.rum.rate_limit" { + k = "apm-server.rum.event_rate.limit" + } + if strings.HasPrefix(k, "apm-server.ssl.") { + // Following ssl related settings need to be synced: + // apm-server.ssl.enabled + // apm-server.ssl.certificate + // apm-server.ssl.key + switch k[15:] { + case "enabled", "certificate", "key": + default: + continue + } + } + out[k] = v + } + return out, nil +} diff --git a/kibana/send_config_test.go b/kibana/send_config_test.go new file mode 100644 index 00000000000..c35bb0ec761 --- /dev/null +++ b/kibana/send_config_test.go @@ -0,0 +1,133 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package kibana + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/go-ucfg" +) + +func TestFlattenAndFormat(t *testing.T) { + tlsFieldsCount, loggingFieldCount := 0, 0 + cc, err := common.NewConfigWithYAML([]byte(serverYAML), "apm-server.yml") + c := ucfg.Config(*cc) + require.NoError(t, err) + + flat, err := flattenAndClean(&c) + assert.NoError(t, err) + + flat = format(flat) + assert.Contains(t, flat, "schema") + + flat = flat["schema"].(map[string]interface{}) + for k, v := range flat { + assert.NotContains(t, k, "elasticsearch") + assert.NotContains(t, k, "kibana") + assert.NotContains(t, k, "instrumentation") + assert.NotContains(t, k, "path.") + assert.NotEqual(t, k, "name") + assert.NotContains(t, k, "gc_percent") + assert.NotContains(t, k, "xpack.monitoring.enabled") + if strings.HasPrefix(k, "logging.") { + switch k { + case "logging.level", "logging.selectors", "logging.metrics.enabled", "logging.metrics.period": + loggingFieldCount++ + default: + assert.Fail(t, fmt.Sprintf("should not be present: %s", k)) + } + } + if k == "apm-server.host" { + assert.Equal(t, "0.0.0.0:8200", v) + } + if strings.HasPrefix(k, "apm-server.ssl.") { + switch k[15:] { + case "enabled", "certificate", "key": + tlsFieldsCount++ + default: + assert.Fail(t, fmt.Sprintf("should not be present: %s", k)) + } + } + } + assert.Equal(t, 3, tlsFieldsCount) + assert.Equal(t, 4, loggingFieldCount) + assert.Contains(t, flat, "apm-server.rum.event_rate.limit") + assert.Contains(t, flat, "apm-server.rum.event_rate.lru_size") + assert.NotContains(t, flat, "apm-server.rum.rate_limit") +} + +var serverYAML = `apm-server: + kibana: + enabled: true + api_key: abc123 + host: "localhost:8200" + auth: + api_key: + enabled: true + limit: 100 + max_header_size: 1048576 + idle_timeout: 45s + read_timeout: 30s + write_timeout: 30s + shutdown_timeout: 5s + ssl: + enabled: true + key: 'my-key' + certificate: 'my-cert' + key_passphrase: 'pass-phrase' + verify_mode: 'strict' + name: 'test-name' + rum: + enabled: false + event_rate: + lru_size: 1000 + rate_limit: 300 +gc_percent: 70 +logging: + level: 'debug' + selectors: ['intake'] + metrics: + enabled: true + period: 10s + files.name: "apm.log" + json: true +path.config: "/app/config" +path.data: "/app/data" +path: + home: "/app/" +xpack.monitoring.enabled: true +output.elasticsearch: + hosts: ["localhost:9200"] + enabled: true + compression_level: 0 + protocol: "https" + username: "elastic" + password: "changeme" + worker: 1 +instrumentation: + enabled: false + environment: "" + hosts: + - http://remote-apm-server:8200 +` diff --git a/log/ratelimit.go b/log/ratelimit.go new file mode 100644 index 00000000000..99dde3b6df7 --- /dev/null +++ b/log/ratelimit.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package logs + +import ( + "math" + "time" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "github.com/elastic/beats/v7/libbeat/logp" +) + +// WithRateLimit returns a logp.LogOption which rate limits messages +// with approximately the given frequency. +func WithRateLimit(interval time.Duration) logp.LogOption { + return zap.WrapCore(func(in zapcore.Core) zapcore.Core { + return zapcore.NewSamplerWithOptions(in, interval, 1, math.MaxInt32) + }) +} diff --git a/log/ratelimit_test.go b/log/ratelimit_test.go new file mode 100644 index 00000000000..64092ed4bdf --- /dev/null +++ b/log/ratelimit_test.go @@ -0,0 +1,52 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package logs_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" + + logs "github.com/elastic/apm-server/log" + "github.com/elastic/beats/v7/libbeat/logp" +) + +func TestWithRateLimit(t *testing.T) { + core, observed := observer.New(zapcore.DebugLevel) + logger := logp.NewLogger("bo", zap.WrapCore(func(in zapcore.Core) zapcore.Core { + return zapcore.NewTee(in, core) + })) + + const interval = 100 * time.Millisecond + limitedLogger := logger.WithOptions(logs.WithRateLimit(interval)) + + // Log twice in quick succession; the 2nd call will be ignored due to rate-limiting. + limitedLogger.Info("hello") + limitedLogger.Info("hello") + assert.Equal(t, 1, observed.Len()) + + // Sleep until the configured interval has elapsed, which should allow another + // record to be logged. + time.Sleep(interval) + limitedLogger.Info("hello") + assert.Equal(t, 2, observed.Len()) +} diff --git a/magefile.go b/magefile.go index 4b7d1212c27..6bd56894a09 100644 --- a/magefile.go +++ b/magefile.go @@ -55,7 +55,7 @@ func init() { }) mage.BeatDescription = "Elastic APM Server" - mage.BeatURL = "https://www.elastic.co/products/apm" + mage.BeatURL = "https://www.elastic.co/apm" mage.BeatIndexPrefix = "apm" mage.XPackDir = "x-pack" mage.BeatUser = "apm-server" @@ -160,7 +160,7 @@ func filterPackages(types string) { // Use SNAPSHOT=true to build snapshots. // Use PLATFORMS to control the target platforms. eg linux/amd64 // Use TYPES to control the target types. eg docker -func Package() { +func Package() error { start := time.Now() defer func() { fmt.Println("package ran for", time.Since(start)) }() @@ -175,44 +175,15 @@ func Package() { mg.Deps(Update, prepareIngestPackaging) mg.Deps(CrossBuild, CrossBuildXPack, CrossBuildGoDaemon) } - mg.SerialDeps(mage.Package, TestPackages) + return mage.Package() } -// TestPackages tests the generated packages (i.e. file modes, owners, groups). -func TestPackages() error { - return mage.TestPackages() -} - -// TestPackagesInstall integration tests the generated packages -func TestPackagesInstall() error { - // make the test script available to containers first - copy := &mage.CopyTask{ - Source: "tests/packaging/test.sh", - Dest: mage.MustExpand("{{.PWD}}/build/distributions/test.sh"), - Mode: 0755, - } - if err := copy.Execute(); err != nil { - return err - } - defer sh.Rm(copy.Dest) - - goTest := sh.OutCmd("go", "test") - var args []string - if mg.Verbose() { - args = append(args, "-v") - } - args = append(args, mage.MustExpand("tests/packaging/package_test.go")) - args = append(args, "-timeout", "20m") - args = append(args, "-files", mage.MustExpand("{{.PWD}}/build/distributions/*")) - args = append(args, "-tags=package") - - if out, err := goTest(args...); err != nil { - if mg.Verbose() { - fmt.Println(out) - } +func Version() error { + v, err := mage.BeatQualifiedVersion() + if err != nil { return err } - + fmt.Print(v) return nil } diff --git a/main.go b/main.go index 6b539245033..e81afade953 100644 --- a/main.go +++ b/main.go @@ -17,8 +17,8 @@ package main -//go:generate go run script/inline_schemas/inline_schemas.go //go:generate go run model/modeldecoder/generator/cmd/main.go +//go:generate bash script/vendor_otel.sh import ( "os" @@ -27,7 +27,7 @@ import ( "github.com/elastic/apm-server/cmd" ) -var rootCmd = cmd.NewRootCommand(beater.NewCreator(beater.CreatorParams{})) +var rootCmd = cmd.NewRootCommand(beater.NewCreator(beater.CreatorParams{}), cmd.DefaultSettings()) func main() { if err := rootCmd.Execute(); err != nil { diff --git a/model/agent.go b/model/agent.go new file mode 100644 index 00000000000..3fbe6fffb83 --- /dev/null +++ b/model/agent.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import ( + "github.com/elastic/beats/v7/libbeat/common" +) + +// Agent describes an Elastic APM agent. +type Agent struct { + Name string + Version string + EphemeralID string +} + +func (a *Agent) fields() common.MapStr { + var agent mapStr + agent.maybeSetString("name", a.Name) + agent.maybeSetString("version", a.Version) + agent.maybeSetString("ephemeral_id", a.EphemeralID) + return common.MapStr(agent) +} diff --git a/model/agent_test.go b/model/agent_test.go new file mode 100644 index 00000000000..5637798f14d --- /dev/null +++ b/model/agent_test.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/v7/libbeat/common" +) + +const ( + agentName, agentVersion = "elastic-node", "1.0.0" +) + +func TestAgentFields(t *testing.T) { + tests := []struct { + Agent Agent + Fields common.MapStr + }{ + { + Agent: Agent{}, + Fields: nil, + }, + { + Agent: Agent{ + Name: agentName, + Version: agentVersion, + }, + Fields: common.MapStr{ + "name": "elastic-node", + "version": "1.0.0", + }, + }, + } + + for _, test := range tests { + assert.Equal(t, test.Fields, test.Agent.fields()) + } +} diff --git a/model/apmevent.go b/model/apmevent.go new file mode 100644 index 00000000000..cf492979bd4 --- /dev/null +++ b/model/apmevent.go @@ -0,0 +1,171 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import ( + "context" + "time" + + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/common" +) + +// APMEvent holds the details of an APM event. +// +// Exactly one of the event fields should be non-nil. +type APMEvent struct { + // DataStream optionally holds data stream identifiers. + // + // This will have the zero value when APM Server is run + // in standalone mode. + DataStream DataStream + + ECSVersion string + Event Event + Agent Agent + Observer Observer + Container Container + Kubernetes Kubernetes + Service Service + Process Process + Host Host + User User + UserAgent UserAgent + Client Client + Source Source + Destination Destination + Cloud Cloud + Network Network + Session Session + URL URL + Processor Processor + Trace Trace + Parent Parent + Child Child + HTTP HTTP + + // Timestamp holds the event timestamp. + // + // See https://www.elastic.co/guide/en/ecs/current/ecs-base.html + Timestamp time.Time + + // Labels holds labels to apply to the event. + // + // See https://www.elastic.co/guide/en/ecs/current/ecs-base.html + Labels common.MapStr + + // Message holds the message for log events. + // + // See https://www.elastic.co/guide/en/ecs/current/ecs-base.html + Message string + + Transaction *Transaction + Span *Span + Metricset *Metricset + Error *Error + ProfileSample *ProfileSample +} + +// BeatEvent converts e to a beat.Event. +func (e *APMEvent) BeatEvent(ctx context.Context) beat.Event { + event := beat.Event{ + Timestamp: e.Timestamp, + Fields: make(common.MapStr), + } + if e.Transaction != nil { + e.Transaction.setFields((*mapStr)(&event.Fields), e) + } + if e.Span != nil { + e.Span.setFields((*mapStr)(&event.Fields), e) + } + if e.Metricset != nil { + e.Metricset.setFields((*mapStr)(&event.Fields)) + } + if e.Error != nil { + e.Error.setFields((*mapStr)(&event.Fields)) + } + if e.ProfileSample != nil { + e.ProfileSample.setFields((*mapStr)(&event.Fields)) + } + + // Set high resolution timestamp. + // + // TODO(axw) change @timestamp to use date_nanos, and remove this field. + if !e.Timestamp.IsZero() { + switch e.Processor { + case TransactionProcessor, SpanProcessor, ErrorProcessor: + event.Fields["timestamp"] = common.MapStr{"us": int(e.Timestamp.UnixNano() / 1000)} + } + } + + // Set top-level field sets. + fields := (*mapStr)(&event.Fields) + event.Timestamp = e.Timestamp + e.DataStream.setFields(fields) + if e.ECSVersion != "" { + fields.set("ecs", common.MapStr{"version": e.ECSVersion}) + } + fields.maybeSetMapStr("service", e.Service.Fields()) + fields.maybeSetMapStr("agent", e.Agent.fields()) + fields.maybeSetMapStr("observer", e.Observer.Fields()) + fields.maybeSetMapStr("host", e.Host.fields()) + fields.maybeSetMapStr("process", e.Process.fields()) + fields.maybeSetMapStr("user", e.User.fields()) + fields.maybeSetMapStr("client", e.Client.fields()) + fields.maybeSetMapStr("source", e.Source.fields()) + fields.maybeSetMapStr("destination", e.Destination.fields()) + fields.maybeSetMapStr("user_agent", e.UserAgent.fields()) + fields.maybeSetMapStr("container", e.Container.fields()) + fields.maybeSetMapStr("kubernetes", e.Kubernetes.fields()) + fields.maybeSetMapStr("cloud", e.Cloud.fields()) + fields.maybeSetMapStr("network", e.Network.fields()) + fields.maybeSetMapStr("labels", sanitizeLabels(e.Labels)) + fields.maybeSetMapStr("event", e.Event.fields()) + fields.maybeSetMapStr("url", e.URL.fields()) + fields.maybeSetMapStr("session", e.Session.fields()) + fields.maybeSetMapStr("parent", e.Parent.fields()) + fields.maybeSetMapStr("child", e.Child.fields()) + fields.maybeSetMapStr("processor", e.Processor.fields()) + fields.maybeSetMapStr("trace", e.Trace.fields()) + fields.maybeSetString("message", e.Message) + fields.maybeSetMapStr("http", e.HTTP.fields()) + if e.Processor == SpanProcessor { + // Deprecated: copy url.original and http.* to span.http.* for backwards compatibility. + // + // TODO(axw) remove this in 8.0: https://github.com/elastic/apm-server/issues/5995 + var spanHTTPFields mapStr + spanHTTPFields.maybeSetString("version", e.HTTP.Version) + if e.HTTP.Request != nil { + spanHTTPFields.maybeSetString("method", e.HTTP.Request.Method) + } + if e.HTTP.Response != nil { + spanHTTPFields.maybeSetMapStr("response", e.HTTP.Response.fields()) + } + if len(spanHTTPFields) != 0 || e.URL.Original != "" { + spanFieldsMap, ok := event.Fields["span"].(common.MapStr) + if !ok { + spanFieldsMap = make(common.MapStr) + event.Fields["span"] = spanFieldsMap + } + spanFields := mapStr(spanFieldsMap) + spanFields.maybeSetMapStr("http", common.MapStr(spanHTTPFields)) + spanFields.maybeSetString("http.url.original", e.URL.Original) + } + } + return event +} diff --git a/model/apmevent_test.go b/model/apmevent_test.go new file mode 100644 index 00000000000..d3c981e74a1 --- /dev/null +++ b/model/apmevent_test.go @@ -0,0 +1,154 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import ( + "context" + "net" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/v7/libbeat/common" +) + +func TestAPMEventFields(t *testing.T) { + pid := 1234 + host := "host" + hostname := "hostname" + containerID := "container-123" + serviceName, serviceNodeName := "myservice", "serviceABC" + uid := "12321" + mail := "user@email.com" + agentName := "elastic-node" + outcome := "success" + destinationAddress := "1.2.3.4" + destinationPort := 1234 + traceID := "trace_id" + parentID := "parent_id" + childID := []string{"child_1", "child_2"} + httpRequestMethod := "post" + httpRequestBody := "hello world" + + for _, test := range []struct { + input APMEvent + output common.MapStr + }{{ + input: APMEvent{ + ECSVersion: "1.0.0", + Agent: Agent{ + Name: agentName, + Version: agentVersion, + }, + Observer: Observer{Type: "apm-server"}, + Container: Container{ID: containerID}, + Service: Service{ + Name: serviceName, + Node: ServiceNode{Name: serviceNodeName}, + }, + Host: Host{ + Hostname: hostname, + Name: host, + }, + Client: Client{Domain: "client.domain"}, + Source: Source{IP: net.ParseIP("127.0.0.1"), Port: 1234}, + Destination: Destination{Address: destinationAddress, Port: destinationPort}, + Process: Process{Pid: pid}, + User: User{ID: uid, Email: mail}, + Event: Event{Outcome: outcome}, + Session: Session{ID: "session_id"}, + URL: URL{Original: "url"}, + Labels: common.MapStr{"a": "b", "c": 123}, + Message: "bottle", + Transaction: &Transaction{}, + Timestamp: time.Date(2019, 1, 3, 15, 17, 4, 908.596*1e6, time.FixedZone("+0100", 3600)), + Processor: Processor{Name: "processor_name", Event: "processor_event"}, + Trace: Trace{ID: traceID}, + Parent: Parent{ID: parentID}, + Child: Child{ID: childID}, + HTTP: HTTP{ + Request: &HTTPRequest{ + Method: httpRequestMethod, + Body: httpRequestBody, + }, + }, + }, + output: common.MapStr{ + // common fields + "ecs": common.MapStr{"version": "1.0.0"}, + "agent": common.MapStr{"version": "1.0.0", "name": "elastic-node"}, + "observer": common.MapStr{"type": "apm-server"}, + "container": common.MapStr{"id": containerID}, + "host": common.MapStr{"hostname": hostname, "name": host}, + "process": common.MapStr{"pid": pid}, + "service": common.MapStr{ + "name": "myservice", + "node": common.MapStr{"name": serviceNodeName}, + }, + "user": common.MapStr{"id": "12321", "email": "user@email.com"}, + "client": common.MapStr{"domain": "client.domain"}, + "source": common.MapStr{"ip": "127.0.0.1", "port": 1234}, + "destination": common.MapStr{ + "address": destinationAddress, + "ip": destinationAddress, + "port": destinationPort, + }, + "event": common.MapStr{"outcome": outcome}, + "session": common.MapStr{"id": "session_id"}, + "url": common.MapStr{"original": "url"}, + "labels": common.MapStr{ + "a": "b", + "c": 123, + }, + "message": "bottle", + "trace": common.MapStr{ + "id": traceID, + }, + "processor": common.MapStr{ + "name": "processor_name", + "event": "processor_event", + }, + "parent": common.MapStr{ + "id": parentID, + }, + "child": common.MapStr{ + "id": childID, + }, + "http": common.MapStr{ + "request": common.MapStr{ + "method": "post", + "body.original": httpRequestBody, + }, + }, + }, + }, { + input: APMEvent{ + Processor: TransactionProcessor, + Timestamp: time.Date(2019, 1, 3, 15, 17, 4, 908.596*1e6, time.FixedZone("+0100", 3600)), + }, + output: common.MapStr{ + "processor": common.MapStr{"name": "transaction", "event": "transaction"}, + // timestamp.us is added for transactions, spans, and errors. + "timestamp": common.MapStr{"us": 1546525024908596}, + }, + }} { + event := test.input.BeatEvent(context.Background()) + assert.Equal(t, test.output, event.Fields) + } +} diff --git a/model/batch.go b/model/batch.go index 721f2dc9e14..1f8b2583a0c 100644 --- a/model/batch.go +++ b/model/batch.go @@ -18,44 +18,37 @@ package model import ( - "github.com/elastic/apm-server/transform" + "context" + + "github.com/elastic/beats/v7/libbeat/beat" ) -type Batch struct { - Transactions []*Transaction - Spans []*Span - Metricsets []*Metricset - Errors []*Error +// BatchProcessor can be used to process a batch of events, giving the +// opportunity to update, add or remove events. +type BatchProcessor interface { + // ProcessBatch is called with a batch of events for processing. + // + // Processing may involve anything, e.g. modifying, adding, removing, + // aggregating, or publishing events. + ProcessBatch(context.Context, *Batch) error } -// Reset resets the batch to be empty, but it retains the underlying storage. -func (b *Batch) Reset() { - b.Transactions = b.Transactions[:0] - b.Spans = b.Spans[:0] - b.Metricsets = b.Metricsets[:0] - b.Errors = b.Errors[:0] -} +// ProcessBatchFunc is a function type that implements BatchProcessor. +type ProcessBatchFunc func(context.Context, *Batch) error -func (b *Batch) Len() int { - if b == nil { - return 0 - } - return len(b.Transactions) + len(b.Spans) + len(b.Metricsets) + len(b.Errors) +// ProcessBatch calls f(ctx, b) +func (f ProcessBatchFunc) ProcessBatch(ctx context.Context, b *Batch) error { + return f(ctx, b) } -func (b *Batch) Transformables() []transform.Transformable { - transformables := make([]transform.Transformable, 0, b.Len()) - for _, tx := range b.Transactions { - transformables = append(transformables, tx) - } - for _, span := range b.Spans { - transformables = append(transformables, span) - } - for _, metricset := range b.Metricsets { - transformables = append(transformables, metricset) - } - for _, err := range b.Errors { - transformables = append(transformables, err) +// Batch is a collection of APM events. +type Batch []APMEvent + +// Transform transforms all events in the batch, in sequence. +func (b *Batch) Transform(ctx context.Context) []beat.Event { + out := make([]beat.Event, len(*b)) + for i, event := range *b { + out[i] = event.BeatEvent(ctx) } - return transformables + return out } diff --git a/model/child.go b/model/child.go new file mode 100644 index 00000000000..109c856495e --- /dev/null +++ b/model/child.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import ( + "github.com/elastic/beats/v7/libbeat/common" +) + +// Child holds information about the children of a trace event. +type Child struct { + // ID holds IDs of child events. + ID []string +} + +func (c *Child) fields() common.MapStr { + var fields mapStr + if len(c.ID) > 0 { + fields.set("id", c.ID) + } + return common.MapStr(fields) +} diff --git a/model/client.go b/model/client.go index 440f4dcc90f..35a014efec7 100644 --- a/model/client.go +++ b/model/client.go @@ -25,17 +25,24 @@ import ( // Client holds information about the client of a request. type Client struct { + // Domain holds the client's domain (FQDN). + Domain string + // IP holds the client's IP address. IP net.IP - // TODO(axw) add client.geo fields, when we have - // GeoIP lookup implemented in the server. + // Port holds the client's IP port. + Port int } func (c *Client) fields() common.MapStr { var fields mapStr + fields.maybeSetString("domain", c.Domain) if c.IP != nil { fields.set("ip", c.IP.String()) } + if c.Port > 0 { + fields.set("port", c.Port) + } return common.MapStr(fields) } diff --git a/model/client_test.go b/model/client_test.go index 8b9ee1e6838..7f6aa2e9d0e 100644 --- a/model/client_test.go +++ b/model/client_test.go @@ -28,15 +28,23 @@ import ( func TestClientFields(t *testing.T) { for name, tc := range map[string]struct { - ip string - out common.MapStr + domain string + ip net.IP + port int + out common.MapStr }{ - "Empty": {ip: "", out: nil}, - "IPv4": {ip: "192.0.0.1", out: common.MapStr{"ip": "192.0.0.1"}}, - "IPv6": {ip: "2001:db8::68", out: common.MapStr{"ip": "2001:db8::68"}}, + "Empty": {out: nil}, + "IPv4": {ip: net.ParseIP("192.0.0.1"), out: common.MapStr{"ip": "192.0.0.1"}}, + "IPv6": {ip: net.ParseIP("2001:db8::68"), out: common.MapStr{"ip": "2001:db8::68"}}, + "Port": {port: 123, out: common.MapStr{"port": 123}}, + "Domain": {domain: "testing.invalid", out: common.MapStr{"domain": "testing.invalid"}}, } { t.Run(name, func(t *testing.T) { - c := Client{IP: net.ParseIP(tc.ip)} + c := Client{ + Domain: tc.domain, + IP: tc.ip, + Port: tc.port, + } assert.Equal(t, tc.out, c.fields()) }) } diff --git a/model/cloud.go b/model/cloud.go index dc53bd183be..9ce7b0c4491 100644 --- a/model/cloud.go +++ b/model/cloud.go @@ -34,12 +34,13 @@ type Cloud struct { ProjectName string Provider string Region string + ServiceName string } func (c *Cloud) fields() common.MapStr { var fields mapStr - var account, instance, machine, project mapStr + var account, instance, machine, project, service mapStr account.maybeSetString("id", c.AccountID) account.maybeSetString("name", c.AccountName) instance.maybeSetString("id", c.InstanceID) @@ -47,12 +48,14 @@ func (c *Cloud) fields() common.MapStr { machine.maybeSetString("type", c.MachineType) project.maybeSetString("id", c.ProjectID) project.maybeSetString("name", c.ProjectName) + service.maybeSetString("name", c.ServiceName) fields.maybeSetMapStr("account", common.MapStr(account)) fields.maybeSetString("availability_zone", c.AvailabilityZone) fields.maybeSetMapStr("instance", common.MapStr(instance)) fields.maybeSetMapStr("machine", common.MapStr(machine)) fields.maybeSetMapStr("project", common.MapStr(project)) + fields.maybeSetMapStr("service", common.MapStr(service)) fields.maybeSetString("provider", c.Provider) fields.maybeSetString("region", c.Region) return common.MapStr(fields) diff --git a/model/container.go b/model/container.go index c6e17f3488f..ee1c86e53ec 100644 --- a/model/container.go +++ b/model/container.go @@ -22,14 +22,22 @@ import ( ) type Container struct { - ID string + ID string + Name string + Runtime string + ImageName string + ImageTag string } -func (k *Container) fields() common.MapStr { - if k == nil { - return nil - } +func (c *Container) fields() common.MapStr { var container mapStr - container.maybeSetString("id", k.ID) + container.maybeSetString("name", c.Name) + container.maybeSetString("id", c.ID) + container.maybeSetString("runtime", c.Runtime) + + var image mapStr + image.maybeSetString("name", c.ImageName) + image.maybeSetString("tag", c.ImageTag) + container.maybeSetMapStr("image", common.MapStr(image)) return common.MapStr(container) } diff --git a/model/container_test.go b/model/container_test.go index 97d288bd1f8..88c95504ba0 100644 --- a/model/container_test.go +++ b/model/container_test.go @@ -40,6 +40,22 @@ func TestContainerTransform(t *testing.T) { Container: Container{ID: id}, Output: common.MapStr{"id": id}, }, + { + Container: Container{Name: "container_name"}, + Output: common.MapStr{"name": "container_name"}, + }, + { + Container: Container{Runtime: "container_runtime"}, + Output: common.MapStr{"runtime": "container_runtime"}, + }, + { + Container: Container{ImageName: "image_name"}, + Output: common.MapStr{"image": common.MapStr{"name": "image_name"}}, + }, + { + Container: Container{ImageTag: "image_tag"}, + Output: common.MapStr{"image": common.MapStr{"tag": "image_tag"}}, + }, } for _, test := range tests { diff --git a/model/context.go b/model/context.go index 3e08c9236e2..b61f8185e95 100644 --- a/model/context.go +++ b/model/context.go @@ -18,274 +18,18 @@ package model import ( - "net/http" - "net/url" - "strconv" - "github.com/elastic/beats/v7/libbeat/common" - - "github.com/elastic/apm-server/utility" ) -// Context holds all information sent under key context -type Context struct { - Http *Http - URL *URL - Labels *Labels - Page *Page - Custom *Custom - Message *Message - Experimental interface{} -} - -// Http bundles information related to an http request and its response -type Http struct { - Version *string - Request *Req - Response *Resp -} - -// URL describes an URL and its components -type URL struct { - Original *string - Scheme *string - Full *string - Domain *string - Port *int - Path *string - Query *string - Fragment *string -} - -func ParseURL(original, hostname string) *URL { - original = truncate(original) - url, err := url.Parse(original) - if err != nil { - return &URL{Original: &original} - } - if url.Scheme == "" { - url.Scheme = "http" - } - if url.Host == "" { - url.Host = hostname - } - full := truncate(url.String()) - out := &URL{ - Original: &original, - Scheme: &url.Scheme, - Full: &full, - } - if path := truncate(url.Path); path != "" { - out.Path = &path - } - if query := truncate(url.RawQuery); query != "" { - out.Query = &query - } - if fragment := url.Fragment; fragment != "" { - out.Fragment = &fragment - } - if host := truncate(url.Hostname()); host != "" { - out.Domain = &host - } - if port := truncate(url.Port()); port != "" { - if intv, err := strconv.Atoi(port); err == nil { - out.Port = &intv - } - } - return out -} - -// truncate returns s truncated at n runes, and the number of runes in the resulting string (<= n). -func truncate(s string) string { - var j int - for i := range s { - if j == 1024 { - return s[:i] - } - j++ - } - return s -} - -// Page consists of URL and referer -type Page struct { - URL *URL - Referer *string -} - -// Labels holds user defined information nested under key tags -// -// TODO(axw) either get rid of this type, or use it consistently -// in all model types (looking at you, Metadata). -type Labels common.MapStr - -// Custom holds user defined information nested under key custom -type Custom common.MapStr - -// Req bundles information related to an http request -type Req struct { - Method string - Body interface{} - Headers http.Header - Env interface{} - Socket *Socket - Cookies interface{} -} - -// Socket indicates whether an http request was encrypted and the initializers remote address -type Socket struct { - RemoteAddress *string - Encrypted *bool -} - -// Resp bundles information related to an http requests response -type Resp struct { - Finished *bool - HeadersSent *bool - MinimalResp -} - -type MinimalResp struct { - StatusCode *int - Headers http.Header - TransferSize *float64 - EncodedBodySize *float64 - DecodedBodySize *float64 -} - -// Fields returns common.MapStr holding transformed data for attribute url. -func (url *URL) Fields() common.MapStr { - if url == nil { +// customFields transforms in, returning a copy with sanitized keys, +// suitable for storing as "custom" in transaction and error documents. +func customFields(in common.MapStr) common.MapStr { + if len(in) == 0 { return nil } - fields := common.MapStr{} - utility.Set(fields, "full", url.Full) - utility.Set(fields, "fragment", url.Fragment) - utility.Set(fields, "domain", url.Domain) - utility.Set(fields, "path", url.Path) - utility.Set(fields, "port", url.Port) - utility.Set(fields, "original", url.Original) - utility.Set(fields, "scheme", url.Scheme) - utility.Set(fields, "query", url.Query) - return fields -} - -// Fields returns common.MapStr holding transformed data for attribute http. -func (h *Http) Fields() common.MapStr { - if h == nil { - return nil + out := make(common.MapStr, len(in)) + for k, v := range in { + out[sanitizeLabelKey(k)] = v } - - fields := common.MapStr{} - utility.Set(fields, "version", h.Version) - utility.Set(fields, "request", h.Request.fields()) - utility.Set(fields, "response", h.Response.fields()) - return fields -} - -// UserAgent parses User Agent information from attribute http. -func (h *Http) UserAgent() string { - if h == nil || h.Request == nil { - return "" - } - dec := utility.ManualDecoder{} - return dec.UserAgentHeader(h.Request.Headers) -} - -// Fields returns common.MapStr holding transformed data for attribute page. -func (page *Page) Fields() common.MapStr { - if page == nil { - return nil - } - var fields = common.MapStr{} - // Remove in 8.0 - if page.URL != nil { - utility.Set(fields, "url", page.URL.Original) - } - utility.Set(fields, "referer", page.Referer) - return fields -} - -// Fields returns common.MapStr holding transformed data for attribute label. -func (labels *Labels) Fields() common.MapStr { - if labels == nil { - return nil - } - return common.MapStr(*labels) -} - -// Fields returns common.MapStr holding transformed data for attribute custom. -func (custom *Custom) Fields() common.MapStr { - if custom == nil { - return nil - } - // We use utility.Set to normalise decoded JSON types, - // e.g. json.Number is converted to a float64 if possible. - m := make(common.MapStr) - for k, v := range *custom { - utility.Set(m, k, v) - } - return m -} - -func (req *Req) fields() common.MapStr { - if req == nil { - return nil - } - fields := common.MapStr{} - utility.Set(fields, "headers", headerToFields(req.Headers)) - utility.Set(fields, "socket", req.Socket.fields()) - utility.Set(fields, "env", req.Env) - utility.DeepUpdate(fields, "body.original", req.Body) - utility.Set(fields, "method", req.Method) - utility.Set(fields, "cookies", req.Cookies) - - return fields -} - -func (resp *Resp) fields() common.MapStr { - if resp == nil { - return nil - } - fields := resp.MinimalResp.Fields() - if fields == nil { - fields = common.MapStr{} - } - utility.Set(fields, "headers_sent", resp.HeadersSent) - utility.Set(fields, "finished", resp.Finished) - return fields -} - -func (m *MinimalResp) Fields() common.MapStr { - if m == nil { - return nil - } - fields := common.MapStr{} - utility.Set(fields, "headers", headerToFields(m.Headers)) - utility.Set(fields, "status_code", m.StatusCode) - utility.Set(fields, "transfer_size", m.TransferSize) - utility.Set(fields, "encoded_body_size", m.EncodedBodySize) - utility.Set(fields, "decoded_body_size", m.DecodedBodySize) - return fields -} - -func headerToFields(h http.Header) common.MapStr { - if len(h) == 0 { - return nil - } - m := common.MapStr{} - for k, v := range h { - m.Put(k, v) - } - return m -} - -func (s *Socket) fields() common.MapStr { - if s == nil { - return nil - } - fields := common.MapStr{} - utility.Set(fields, "encrypted", s.Encrypted) - utility.Set(fields, "remote_address", s.RemoteAddress) - return fields + return out } diff --git a/model/datastream.go b/model/datastream.go new file mode 100644 index 00000000000..e94aec05e5b --- /dev/null +++ b/model/datastream.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import "github.com/elastic/apm-server/datastreams" + +// DataStream identifies the data stream to which an event will be written. +type DataStream struct { + // Type holds the data_stream.type identifier. + Type string + + // Dataset holds the data_stream.dataset identifier. + Dataset string + + // Namespace holds the data_stream.namespace identifier. + Namespace string +} + +func (d *DataStream) setFields(fields *mapStr) { + fields.maybeSetString(datastreams.TypeField, d.Type) + fields.maybeSetString(datastreams.DatasetField, d.Dataset) + fields.maybeSetString(datastreams.NamespaceField, d.Namespace) +} diff --git a/model/destination.go b/model/destination.go new file mode 100644 index 00000000000..1fdd87edde2 --- /dev/null +++ b/model/destination.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import ( + "net" + + "github.com/elastic/beats/v7/libbeat/common" +) + +// Destination holds information about the destination of a request. +type Destination struct { + Address string + Port int +} + +func (d *Destination) fields() common.MapStr { + var fields mapStr + if fields.maybeSetString("address", d.Address) { + // Copy destination.address to destination.ip if it's a valid IP. + // + // TODO(axw) move this to a "convert" ingest processor once we + // have a high enough minimum supported Elasticsearch version. + if ip := net.ParseIP(d.Address); ip != nil { + fields.set("ip", d.Address) + } + } + if d.Port > 0 { + fields.set("port", d.Port) + } + return common.MapStr(fields) +} diff --git a/model/error.go b/model/error.go index e9d85fc63b3..5353959aaca 100644 --- a/model/error.go +++ b/model/error.go @@ -18,348 +18,104 @@ package model import ( - "context" - "crypto/md5" - "encoding/hex" - "encoding/json" - "fmt" - "hash" - "io" - "strconv" - "time" - - "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" - "github.com/elastic/beats/v7/libbeat/monitoring" - - "github.com/elastic/apm-server/transform" - "github.com/elastic/apm-server/utility" ) var ( - errorMetrics = monitoring.Default.NewRegistry("apm-server.processor.error") - errorTransformations = monitoring.NewInt(errorMetrics, "transformations") - errorStacktraceCounter = monitoring.NewInt(errorMetrics, "stacktraces") - errorFrameCounter = monitoring.NewInt(errorMetrics, "frames") - errorProcessorEntry = common.MapStr{"name": errorProcessorName, "event": errorDocType} + // ErrorProcessor is the Processor value that should be assigned to error events. + ErrorProcessor = Processor{Name: "error", Event: "error"} ) const ( - errorProcessorName = "error" - errorDocType = "error" + ErrorsDataset = "apm.error" ) type Error struct { - ID *string - TransactionID string - TraceID string - ParentID string - - Timestamp time.Time - Metadata Metadata + ID string - Culprit *string - Labels *Labels - Page *Page - HTTP *Http - URL *URL - Custom *Custom + GroupingKey string + Culprit string + Custom common.MapStr Exception *Exception Log *Log - - TransactionSampled *bool - TransactionType *string - - // RUM records whether or not this is a RUM error, - // and should have its stack frames sourcemapped. - RUM bool - - Experimental interface{} - data common.MapStr } type Exception struct { - Message *string - Module *string - Code interface{} + Message string + Module string + Code string Attributes interface{} Stacktrace Stacktrace - Type *string + Type string Handled *bool Cause []Exception - Parent *int } type Log struct { Message string - Level *string - ParamMessage *string - LoggerName *string + Level string + ParamMessage string + LoggerName string Stacktrace Stacktrace } -func (e *Error) Transform(ctx context.Context, cfg *transform.Config) []beat.Event { - errorTransformations.Inc() - +func (e *Error) setFields(fields *mapStr) { + var errorFields mapStr + errorFields.maybeSetString("id", e.ID) if e.Exception != nil { - addStacktraceCounter(e.Exception.Stacktrace) - } - if e.Log != nil { - addStacktraceCounter(e.Log.Stacktrace) - } - - fields := common.MapStr{ - "error": e.fields(ctx, cfg), - "processor": errorProcessorEntry, - } - - // first set the generic metadata (order is relevant) - e.Metadata.Set(fields) - utility.Set(fields, "source", fields["client"]) - // then add event specific information - // merges with metadata labels, overrides conflicting keys - utility.DeepUpdate(fields, "labels", e.Labels.Fields()) - utility.Set(fields, "http", e.HTTP.Fields()) - urlFields := e.URL.Fields() - if urlFields != nil { - utility.Set(fields, "url", e.URL.Fields()) - } - if e.Page != nil { - utility.DeepUpdate(fields, "http.request.referrer", e.Page.Referer) - if urlFields == nil { - utility.Set(fields, "url", e.Page.URL.Fields()) - } - } - utility.Set(fields, "experimental", e.Experimental) - - // sampled and type is nil if an error happens outside a transaction or an (old) agent is not sending sampled info - // agents must send semantically correct data - if e.TransactionSampled != nil || e.TransactionType != nil || e.TransactionID != "" { - transaction := common.MapStr{} - if e.TransactionID != "" { - transaction["id"] = e.TransactionID - } - utility.Set(transaction, "type", e.TransactionType) - utility.Set(transaction, "sampled", e.TransactionSampled) - utility.Set(fields, "transaction", transaction) - } - - utility.AddID(fields, "parent", e.ParentID) - utility.AddID(fields, "trace", e.TraceID) - utility.Set(fields, "timestamp", utility.TimeAsMicros(e.Timestamp)) - - return []beat.Event{ - { - Fields: fields, - Timestamp: e.Timestamp, - }, + exceptionFields := e.Exception.appendFields(nil, 0) + errorFields.set("exception", exceptionFields) } + errorFields.maybeSetMapStr("log", e.logFields()) + errorFields.maybeSetString("culprit", e.Culprit) + errorFields.maybeSetMapStr("custom", customFields(e.Custom)) + errorFields.maybeSetString("grouping_key", e.GroupingKey) + fields.set("error", common.MapStr(errorFields)) } -func (e *Error) fields(ctx context.Context, cfg *transform.Config) common.MapStr { - e.data = common.MapStr{} - e.add("id", e.ID) - e.add("page", e.Page.Fields()) - - exceptionChain := flattenExceptionTree(e.Exception) - e.addException(ctx, cfg, exceptionChain) - e.addLog(ctx, cfg) - - e.updateCulprit(cfg) - e.add("culprit", e.Culprit) - e.add("custom", e.Custom.Fields()) - - e.add("grouping_key", e.calcGroupingKey(exceptionChain)) - - return e.data -} - -func (e *Error) updateCulprit(cfg *transform.Config) { - if cfg.RUM.SourcemapStore == nil { - return - } - var fr *StacktraceFrame - if e.Log != nil { - fr = findSmappedNonLibraryFrame(e.Log.Stacktrace) - } - if fr == nil && e.Exception != nil { - fr = findSmappedNonLibraryFrame(e.Exception.Stacktrace) - } - if fr == nil { - return - } - var culprit string - if fr.Filename != nil { - culprit = *fr.Filename - } else if fr.Classname != nil { - culprit = *fr.Classname - } - if fr.Function != nil { - culprit += fmt.Sprintf(" in %v", *fr.Function) - } - e.Culprit = &culprit -} - -func findSmappedNonLibraryFrame(frames []*StacktraceFrame) *StacktraceFrame { - for _, fr := range frames { - if fr.IsSourcemapApplied() && !fr.IsLibraryFrame() { - return fr - } - } - return nil -} - -func (e *Error) addException(ctx context.Context, cfg *transform.Config, chain []Exception) { - var result []common.MapStr - for _, exception := range chain { - ex := common.MapStr{} - utility.Set(ex, "message", exception.Message) - utility.Set(ex, "module", exception.Module) - utility.Set(ex, "attributes", exception.Attributes) - utility.Set(ex, "type", exception.Type) - utility.Set(ex, "handled", exception.Handled) - utility.Set(ex, "parent", exception.Parent) - - switch code := exception.Code.(type) { - case int: - utility.Set(ex, "code", strconv.Itoa(code)) - case float64: - utility.Set(ex, "code", fmt.Sprintf("%.0f", code)) - case string: - utility.Set(ex, "code", code) - case json.Number: - utility.Set(ex, "code", code.String()) - } - - st := exception.Stacktrace.transform(ctx, cfg, e.RUM, &e.Metadata.Service) - utility.Set(ex, "stacktrace", st) - - result = append(result, ex) - } - - e.add("exception", result) -} - -func (e *Error) addLog(ctx context.Context, cfg *transform.Config) { +func (e *Error) logFields() common.MapStr { if e.Log == nil { - return - } - log := common.MapStr{} - utility.Set(log, "message", e.Log.Message) - utility.Set(log, "param_message", e.Log.ParamMessage) - utility.Set(log, "logger_name", e.Log.LoggerName) - utility.Set(log, "level", e.Log.Level) - st := e.Log.Stacktrace.transform(ctx, cfg, e.RUM, &e.Metadata.Service) - utility.Set(log, "stacktrace", st) - - e.add("log", log) -} - -type groupingKey struct { - hash hash.Hash - empty bool -} - -func newGroupingKey() *groupingKey { - return &groupingKey{ - hash: md5.New(), - empty: true, - } -} - -func (k *groupingKey) add(s *string) bool { - if s == nil { - return false - } - io.WriteString(k.hash, *s) - k.empty = false - return true -} - -func (k *groupingKey) addEither(str ...*string) { - for _, s := range str { - if ok := k.add(s); ok { - break - } - } -} - -func (k *groupingKey) String() string { - return hex.EncodeToString(k.hash.Sum(nil)) -} - -// calcGroupingKey computes a value for deduplicating errors - events with -// same grouping key can be collapsed together. -func (e *Error) calcGroupingKey(chain []Exception) string { - k := newGroupingKey() - var stacktrace Stacktrace - - for _, ex := range chain { - k.add(ex.Type) - stacktrace = append(stacktrace, ex.Stacktrace...) - } - - if e.Log != nil { - k.add(e.Log.ParamMessage) - if len(stacktrace) == 0 { - stacktrace = e.Log.Stacktrace - } - } - - for _, fr := range stacktrace { - if fr.ExcludeFromGrouping { - continue + return nil + } + var log mapStr + log.maybeSetString("message", e.Log.Message) + log.maybeSetString("param_message", e.Log.ParamMessage) + log.maybeSetString("logger_name", e.Log.LoggerName) + log.maybeSetString("level", e.Log.Level) + if st := e.Log.Stacktrace.transform(); len(st) > 0 { + log.set("stacktrace", st) + } + return common.MapStr(log) +} + +func (e *Exception) appendFields(out []common.MapStr, parentOffset int) []common.MapStr { + offset := len(out) + var fields mapStr + fields.maybeSetString("message", e.Message) + fields.maybeSetString("module", e.Module) + fields.maybeSetString("type", e.Type) + fields.maybeSetString("code", e.Code) + fields.maybeSetBool("handled", e.Handled) + if offset > parentOffset+1 { + // The parent of an exception in the resulting slice is at the offset + // indicated by the `parent` field (0 index based), or the preceding + // exception in the slice if the `parent` field is not set. + fields.set("parent", parentOffset) + } + if e.Attributes != nil { + fields.set("attributes", e.Attributes) + } + if n := len(e.Stacktrace); n > 0 { + frames := make([]common.MapStr, n) + for i, frame := range e.Stacktrace { + frames[i] = frame.transform() } - k.addEither(fr.Module, fr.Filename, fr.Classname) - k.add(fr.Function) + fields.set("stacktrace", frames) } - if k.empty { - for _, ex := range chain { - k.add(ex.Message) - } - } - if k.empty && e.Log != nil { - k.add(&e.Log.Message) - } - - return k.String() -} - -func (e *Error) add(key string, val interface{}) { - utility.Set(e.data, key, val) -} - -func addStacktraceCounter(st Stacktrace) { - if frames := len(st); frames > 0 { - errorStacktraceCounter.Inc() - errorFrameCounter.Add(int64(frames)) - } -} - -// flattenExceptionTree recursively traverses the causes of an exception to return a slice of exceptions. -// Tree traversal is Depth First. -// The parent of a exception in the resulting slice is at the position indicated by the `parent` property -// (0 index based), or the preceding exception if `parent` is nil. -// The resulting exceptions always have `nil` cause. -func flattenExceptionTree(exception *Exception) []Exception { - var recur func(Exception, int) []Exception - - recur = func(e Exception, posId int) []Exception { - causes := e.Cause - e.Cause = nil - result := []Exception{e} - for idx, cause := range causes { - if idx > 0 { - cause.Parent = &posId - } - result = append(result, recur(cause, posId+len(result))...) - } - return result - } - - if exception == nil { - return []Exception{} + out = append(out, common.MapStr(fields)) + for _, cause := range e.Cause { + out = cause.appendFields(out, offset) } - return recur(*exception, 0) + return out } diff --git a/model/error/_meta/fields.yml b/model/error/_meta/fields.yml index 13d432715e0..06492564536 100644 --- a/model/error/_meta/fields.yml +++ b/model/error/_meta/fields.yml @@ -2,6 +2,851 @@ title: APM Error description: Error-specific data for APM fields: + - name: processor.name + type: keyword + description: Processor name. + overwrite: true + + - name: processor.event + type: keyword + description: Processor event. + overwrite: true + + - name: timestamp + type: group + fields: + - name: us + type: long + count: 1 + description: > + Timestamp of the event in microseconds since Unix epoch. + overwrite: true + + - name: message + type: text + description: The original error message. + overwrite: true + + - name: url + type: group + description: > + A complete Url, with scheme, host and path. + dynamic: false + fields: + + - name: scheme + type: keyword + description: > + The protocol of the request, e.g. "https:". + overwrite: true + + - name: full + type: keyword + description: > + The full, possibly agent-assembled URL of the request, e.g https://example.com:443/search?q=elasticsearch#top. + overwrite: true + + - name: domain + type: keyword + description: > + The hostname of the request, e.g. "example.com". + overwrite: true + + - name: port + type: long + description: > + The port of the request, e.g. 443. + overwrite: true + + - name: path + type: keyword + description: > + The path of the request, e.g. "/search". + overwrite: true + + - name: query + type: keyword + description: > + The query string of the request, e.g. "q=elasticsearch". + overwrite: true + + - name: fragment + type: keyword + description: > + A fragment specifying a location in a web page , e.g. "top". + overwrite: true + + - name: http + type: group + dynamic: false + fields: + + - name: version + type: keyword + description: > + The http version of the request leading to this event. + overwrite: true + + - name: request + type: group + fields: + + - name: method + type: keyword + description: > + The http method of the request leading to this event. + overwrite: true + + - name: headers + type: object + enabled: false + description: > + The canonical headers of the monitored HTTP request. + overwrite: true + + - name: referrer + type: keyword + ignore_above: 1024 + description: Referrer for this HTTP request. + overwrite: true + + - name: response + type: group + fields: + + - name: status_code + type: long + description: > + The status code of the HTTP response. + overwrite: true + + - name: finished + type: boolean + description: > + Used by the Node agent to indicate when in the response life cycle an error has occurred. + overwrite: true + + - name: headers + type: object + enabled: false + description: > + The canonical headers of the monitored HTTP response. + overwrite: true + + - name: labels + type: object + object_type_params: + - object_type: keyword + - object_type: boolean + - object_type: scaled_float + scaling_factor: 1000000 + dynamic: true + overwrite: true + description: > + A flat mapping of user-defined labels with string, boolean or number values. + + - name: service + type: group + dynamic: false + description: > + Service fields. + fields: + - name: name + type: keyword + description: > + Immutable name of the service emitting this event. + overwrite: true + + - name: version + type: keyword + description: > + Version of the service emitting this event. + overwrite: true + + - name: environment + type: keyword + description: > + Service environment. + overwrite: true + + - name: node + type: group + fields: + - name: name + type: keyword + description: > + Unique meaningful name of the service node. + overwrite: true + + - name: language + type: group + fields: + + - name: name + type: keyword + description: > + Name of the programming language used. + overwrite: true + + - name: version + type: keyword + description: > + Version of the programming language used. + overwrite: true + + - name: runtime + type: group + fields: + + - name: name + type: keyword + description: > + Name of the runtime used. + overwrite: true + + - name: version + type: keyword + description: > + Version of the runtime used. + overwrite: true + + - name: framework + type: group + fields: + + - name: name + type: keyword + description: > + Name of the framework used. + overwrite: true + + - name: version + type: keyword + description: > + Version of the framework used. + overwrite: true + + - name: transaction + type: group + dynamic: false + fields: + - name: id + type: keyword + description: > + The transaction ID. + overwrite: true + - name: sampled + type: boolean + description: > + Transactions that are 'sampled' will include all available information. Transactions that are not sampled will not have spans or context. + overwrite: true + - name: type + type: keyword + description: > + Keyword of specific relevance in the service's domain (eg. 'request', 'backgroundjob', etc) + overwrite: true + - name: name + type: keyword + multi_fields: + - name: text + type: text + description: > + Generic designation of a transaction in the scope of a single service (eg. 'GET /users/:id'). + overwrite: true + + - name: trace + type: group + dynamic: false + fields: + - name: id + type: keyword + description: > + The ID of the trace to which the event belongs to. + overwrite: true + + - name: parent + type: group + dynamic: false + fields: + - name: id + type: keyword + description: > + The ID of the parent event. + overwrite: true + + - name: agent + type: group + dynamic: false + fields: + + - name: name + type: keyword + description: > + Name of the agent used. + overwrite: true + + - name: version + type: keyword + description: > + Version of the agent used. + overwrite: true + + - name: ephemeral_id + type: keyword + description: > + The Ephemeral ID identifies a running process. + overwrite: true + + - name: container + type: group + dynamic: false + title: Container + description: > + Container fields are used for meta information about the specific container + that is the source of information. These fields help correlate data based + containers from any runtime. + fields: + + - name: id + type: keyword + description: > + Unique container id. + overwrite: true + + - name: kubernetes + type: group + dynamic: false + title: Kubernetes + description: > + Kubernetes metadata reported by agents + fields: + + - name: namespace + type: keyword + description: > + Kubernetes namespace + overwrite: true + + - name: node + type: group + fields: + - name: name + type: keyword + description: > + Kubernetes node name + overwrite: true + + - name: pod + type: group + fields: + + - name: name + type: keyword + description: > + Kubernetes pod name + overwrite: true + + - name: uid + type: keyword + description: > + Kubernetes Pod UID + overwrite: true + + - name: network + type: group + dynamic: false + description: > + Optional network fields + fields: + + - name: connection + type: group + description: > + Network connection details + fields: + + - name: type + type: keyword + description: > + Network connection type, eg. "wifi", "cell" + overwrite: true + + - name: subtype + type: keyword + description: > + Detailed network connection sub-type, e.g. "LTE", "CDMA" + overwrite: true + + - name: carrier + type: group + description: > + Network operator + fields: + + - name: name + type: keyword + overwrite: true + description: > + Carrier name, eg. Vodafone, T-Mobile, etc. + + - name: mcc + type: keyword + overwrite: true + description: > + Mobile country code + + - name: mnc + type: keyword + overwrite: true + description: > + Mobile network code + + - name: icc + type: keyword + overwrite: true + description: > + ISO country code, eg. US + + - name: host + type: group + dynamic: false + description: > + Optional host fields. + fields: + + - name: architecture + type: keyword + description: > + The architecture of the host the event was recorded on. + overwrite: true + + - name: hostname + type: keyword + description: > + The hostname of the host the event was recorded on. + overwrite: true + + - name: name + type: keyword + description: > + Name of the host the event was recorded on. + It can contain same information as host.hostname or a name specified by the user. + overwrite: true + + - name: ip + type: ip + description: > + IP of the host that records the event. + overwrite: true + + - name: os + title: Operating System + group: 2 + description: > + The OS fields contain information about the operating system. + type: group + fields: + - name: platform + type: keyword + description: > + The platform of the host the event was recorded on. + overwrite: true + + - name: process + type: group + dynamic: false + description: > + Information pertaining to the running process where the data was collected + fields: + - name: args + level: extended + type: keyword + description: > + Process arguments. + May be filtered to protect sensitive information. + overwrite: true + + - name: pid + type: long + description: > + Numeric process ID of the service process. + overwrite: true + + - name: ppid + type: long + description: > + Numeric ID of the service's parent process. + overwrite: true + + - name: title + type: keyword + description: > + Service process title. + overwrite: true + + - name: observer + type: group + dynamic: false + fields: + + - name: listening + type: keyword + overwrite: true + description: > + Address the server is listening on. + + - name: hostname + type: keyword + overwrite: true + description: > + Hostname of the APM Server. + overwrite: true + + - name: version + type: keyword + overwrite: true + description: > + APM Server version. + + - name: version_major + type: byte + overwrite: true + description: > + Major version number of the observer + + - name: type + type: keyword + overwrite: true + description: > + The type will be set to `apm-server`. + + - name: id + type: keyword + overwrite: true + description: > + Unique identifier of the APM Server. + + - name: ephemeral_id + type: keyword + overwrite: true + description: > + Ephemeral identifier of the APM Server. + + - name: user + type: group + dynamic: false + fields: + + - name: name + type: keyword + description: > + The username of the logged in user. + overwrite: true + + - name: domain + type: keyword + description: > + Domain of the logged in user. + overwrite: true + + - name: id + type: keyword + description: > + Identifier of the logged in user. + overwrite: true + + - name: email + type: keyword + description: > + Email of the logged in user. + overwrite: true + + - name: client + dynamic: false + type: group + fields: + + - name: domain + type: keyword + ignore_above: 1024 + description: > + Client domain. + overwrite: true + + - name: ip + type: ip + description: > + IP address of the client of a recorded event. + This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + overwrite: true + + - name: port + type: long + description: > + Port of the client. + overwrite: true + + - name: source + dynamic: false + type: group + fields: + + - name: domain + type: keyword + ignore_above: 1024 + description: > + Source domain. + overwrite: true + + - name: ip + type: ip + description: > + IP address of the source of a recorded event. + This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + overwrite: true + + - name: port + type: long + description: > + Port of the source. + overwrite: true + + - name: destination + title: Destination + group: 2 + description: 'Destination fields describe details about the destination of a packet/event. + + Destination fields are usually populated in conjunction with source fields.' + type: group + fields: + - name: address + level: extended + type: keyword + ignore_above: 1024 + description: 'Some event destination addresses are defined ambiguously. The + event will sometimes list an IP, a domain or a unix socket. You should always + store the raw address in the `.address` field. + Then it should be duplicated to `.ip` or `.domain`, depending on which one + it is.' + overwrite: true + + - name: ip + level: core + type: ip + description: 'IP addess of the destination. + Can be one of multiple IPv4 or IPv6 addresses.' + overwrite: true + + - name: port + level: core + type: long + format: string + description: Port of the destination. + overwrite: true + + - name: user_agent + dynamic: false + title: User agent + description: > + The user_agent fields normally come from a browser request. They often + show up in web service logs coming from the parsed user agent string. + type: group + overwrite: true + fields: + + - name: original + type: keyword + description: > + Unparsed version of the user_agent. + example: "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1" + overwrite: true + + multi_fields: + - name: text + type: text + description: > + Software agent acting in behalf of a user, eg. a web browser / OS combination. + overwrite: true + + - name: name + type: keyword + overwrite: true + example: Safari + description: > + Name of the user agent. + + - name: version + type: keyword + overwrite: true + description: > + Version of the user agent. + example: 12.0 + + - name: device + type: group + overwrite: true + title: Device + description: > + Information concerning the device. + fields: + + - name: name + type: keyword + overwrite: true + example: iPhone + description: > + Name of the device. + + - name: os + type: group + overwrite: true + title: Operating System + description: > + The OS fields contain information about the operating system. + fields: + + - name: platform + type: keyword + overwrite: true + description: > + Operating system platform (such centos, ubuntu, windows). + example: darwin + + - name: name + type: keyword + overwrite: true + example: "Mac OS X" + description: > + Operating system name, without the version. + + - name: full + type: keyword + overwrite: true + example: "Mac OS Mojave" + description: > + Operating system name, including the version or code name. + + - name: family + type: keyword + overwrite: true + example: "debian" + description: > + OS family (such as redhat, debian, freebsd, windows). + + - name: version + type: keyword + overwrite: true + example: "10.14.1" + description: > + Operating system version as a raw string. + + - name: kernel + type: keyword + overwrite: true + example: "4.4.0-112-generic" + description: > + Operating system kernel version as a raw string. + + - name: cloud + title: Cloud + group: 2 + type: group + description: > + Cloud metadata reported by agents + fields: + - name: account + type: group + dynamic: false + fields: + - name: id + level: extended + type: keyword + ignore_above: 1024 + description: Cloud account ID + overwrite: true + - name: name + level: extended + type: keyword + ignore_above: 1024 + description: Cloud account name + overwrite: true + - name: availability_zone + level: extended + type: keyword + ignore_above: 1024 + description: Cloud availability zone name + example: us-east1-a + overwrite: true + - name: instance + type: group + dynamic: false + fields: + - name: id + level: extended + type: keyword + ignore_above: 1024 + description: Cloud instance/machine ID + overwrite: true + - name: name + level: extended + type: keyword + ignore_above: 1024 + description: Cloud instance/machine name + overwrite: true + - name: machine + type: group + dynamic: false + fields: + - name: type + level: extended + type: keyword + ignore_above: 1024 + description: Cloud instance/machine type + example: t2.medium + overwrite: true + - name: project + type: group + dynamic: false + fields: + - name: id + level: extended + type: keyword + ignore_above: 1024 + description: Cloud project ID + overwrite: true + - name: name + level: extended + type: keyword + ignore_above: 1024 + description: Cloud project name + overwrite: true + - name: provider + level: extended + type: keyword + ignore_above: 1024 + description: Cloud provider name + example: gcp + overwrite: true + - name: region + level: extended + type: keyword + ignore_above: 1024 + description: Cloud region name + example: us-east1 + overwrite: true + - name: service + type: group + dynamic: false + fields: + - name: name + level: extended + type: keyword + ignore_above: 1024 + description: > + Cloud service name, intended to distinguish services running on + different platforms within a provider. + overwrite: true + - name: error type: group description: > @@ -24,7 +869,13 @@ - name: grouping_key type: keyword description: > - GroupingKey of the logged error for use in grouping. + Hash of select properties of the logged error for grouping purposes. + + - name: grouping_name + type: keyword + description: > + Name to associate with an error group. + Errors belonging to the same group (same grouping_key) may have differing values for grouping_name. Consumers may choose one arbitrarily. - name: exception type: group @@ -48,6 +899,7 @@ - name: type type: keyword count: 4 + description: The type of the original error, e.g. the Java exception class name. - name: handled type: boolean diff --git a/model/error/generated/schema/error.go b/model/error/generated/schema/error.go deleted file mode 100644 index 83e0c037d7b..00000000000 --- a/model/error/generated/schema/error.go +++ /dev/null @@ -1,676 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package schema - -const ModelSchema = `{ - "$id": "docs/spec/errors/error.json", - "type": "object", - "description": "An error or a logged error message captured by an agent occurring in a monitored service", - "allOf": [ - { "$id": "docs/spec/timestamp_epoch.json", - "title": "Timestamp Epoch", - "description": "Object with 'timestamp' property.", - "type": ["object"], - "properties": { - "timestamp": { - "description": "Recorded time of the event, UTC based and formatted as microseconds since Unix epoch", - "type": ["integer", "null"] - } - } }, - { - "properties": { - "id": { - "type": ["string"], - "description": "Hex encoded 128 random bits ID of the error.", - "maxLength": 1024 - }, - "trace_id": { - "description": "Hex encoded 128 random bits ID of the correlated trace. Must be present if transaction_id and parent_id are set.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "transaction_id": { - "type": ["string", "null"], - "description": "Hex encoded 64 random bits ID of the correlated transaction. Must be present if trace_id and parent_id are set.", - "maxLength": 1024 - }, - "parent_id": { - "description": "Hex encoded 64 random bits ID of the parent transaction or span. Must be present if trace_id and transaction_id are set.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "transaction": { - "type": ["object", "null"], - "description": "Data for correlating errors with transactions", - "properties": { - "sampled": { - "type": ["boolean", "null"], - "description": "Transactions that are 'sampled' will include all available information. Transactions that are not sampled will not have 'spans' or 'context'. Defaults to true." - }, - "type": { - "type": ["string", "null"], - "description": "Keyword of specific relevance in the service's domain (eg: 'request', 'backgroundjob', etc)", - "maxLength": 1024 - } - } - }, - "context": { - "$id": "docs/spec/context.json", - "title": "Context", - "description": "Any arbitrary contextual information regarding the event, captured by the agent, optionally provided by the user", - "type": ["object", "null"], - "properties": { - "custom": { - "description": "An arbitrary mapping of additional metadata to store with the event.", - "type": ["object", "null"], - "patternProperties": { - "^[^.*\"]*$": {} - }, - "additionalProperties": false - }, - "response": { - "type": ["object", "null"], - "allOf": [ - { "$id": "docs/spec/http_response.json", - "title": "HTTP response object", - "description": "HTTP response object, used by error, span and transction documents", - "type": ["object", "null"], - "properties": { - "status_code": { - "type": ["integer", "null"], - "description": "The status code of the http request." - }, - "transfer_size": { - "type": ["number", "null"], - "description": "Total size of the payload." - }, - "encoded_body_size": { - "type": ["number", "null"], - "description": "The encoded size of the payload." - }, - "decoded_body_size": { - "type": ["number", "null"], - "description": "The decoded size of the payload." - }, - "headers": { - "type": ["object", "null"], - "patternProperties": { - "[.*]*$": { - "type": ["string", "array", "null"], - "items": { - "type": ["string"] - } - } - } - } - } }, - { - "properties": { - "finished": { - "description": "A boolean indicating whether the response was finished or not", - "type": [ - "boolean", - "null" - ] - }, - "headers_sent": { - "type": [ - "boolean", - "null" - ] - } - } - } - ] - }, - "request": { - "$id": "docs/spec/request.json", - "title": "Request", - "description": "If a log record was generated as a result of a http request, the http interface can be used to collect this information.", - "type": ["object", "null"], - "properties": { - "body": { - "description": "Data should only contain the request body (not the query string). It can either be a dictionary (for standard HTTP requests) or a raw request body.", - "type": ["object", "string", "null"] - }, - "env": { - "description": "The env variable is a compounded of environment information passed from the webserver.", - "type": ["object", "null"], - "properties": {} - }, - "headers": { - "description": "Should include any headers sent by the requester. Cookies will be taken by headers if supplied.", - "type": ["object", "null"], - "patternProperties": { - "[.*]*$": { - "type": ["string", "array", "null"], - "items": { - "type": ["string"] - } - } - } - }, - "http_version": { - "description": "HTTP version.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "method": { - "description": "HTTP method.", - "type": "string", - "maxLength": 1024 - }, - "socket": { - "type": ["object", "null"], - "properties": { - "encrypted": { - "description": "Indicates whether request was sent as SSL/HTTPS request.", - "type": ["boolean", "null"] - }, - "remote_address": { - "description": "The network address sending the request. Should be obtained through standard APIs and not parsed from any headers like 'Forwarded'.", - "type": ["string", "null"] - } - } - }, - "url": { - "description": "A complete Url, with scheme, host and path.", - "type": "object", - "properties": { - "raw": { - "type": ["string", "null"], - "description": "The raw, unparsed URL of the HTTP request line, e.g https://example.com:443/search?q=elasticsearch. This URL may be absolute or relative. For more details, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2.", - "maxLength": 1024 - }, - "protocol": { - "type": ["string", "null"], - "description": "The protocol of the request, e.g. 'https:'.", - "maxLength": 1024 - }, - "full": { - "type": ["string", "null"], - "description": "The full, possibly agent-assembled URL of the request, e.g https://example.com:443/search?q=elasticsearch#top.", - "maxLength": 1024 - }, - "hostname": { - "type": ["string", "null"], - "description": "The hostname of the request, e.g. 'example.com'.", - "maxLength": 1024 - }, - "port": { - "type": ["string", "integer","null"], - "description": "The port of the request, e.g. '443'", - "maxLength": 1024 - }, - "pathname": { - "type": ["string", "null"], - "description": "The path of the request, e.g. '/search'", - "maxLength": 1024 - }, - "search": { - "description": "The search describes the query string of the request. It is expected to have values delimited by ampersands.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "hash": { - "type": ["string", "null"], - "description": "The hash of the request URL, e.g. 'top'", - "maxLength": 1024 - } - } - }, - "cookies": { - "description": "A parsed key-value object of cookies", - "type": ["object", "null"] - } - }, - "required": ["url", "method"] - }, - "tags": { - "$id": "docs/spec/tags.json", - "title": "Tags", - "type": ["object", "null"], - "description": "A flat mapping of user-defined tags with string, boolean or number values.", - "patternProperties": { - "^[^.*\"]*$": { - "type": ["string", "boolean", "number", "null"], - "maxLength": 1024 - } - }, - "additionalProperties": false - }, - "user": { - "description": "Describes the correlated user for this event. If user data are provided here, all user related information from metadata is ignored, otherwise the metadata's user information will be stored with the event.", - "$id": "docs/spec/user.json", - "title": "User", - "type": ["object", "null"], - "properties": { - "id": { - "description": "Identifier of the logged in user, e.g. the primary key of the user", - "type": ["string", "integer", "null"], - "maxLength": 1024 - }, - "email": { - "description": "Email of the logged in user", - "type": ["string", "null"], - "maxLength": 1024 - }, - "username": { - "description": "The username of the logged in user", - "type": ["string", "null"], - "maxLength": 1024 - } - } - }, - "page": { - "description": "", - "type": ["object", "null"], - "properties": { - "referer": { - "description": "RUM specific field that stores the URL of the page that 'linked' to the current page.", - "type": ["string", "null"] - }, - "url": { - "description": "RUM specific field that stores the URL of the current page", - "type": ["string", "null"] - } - } - }, - "service": { - "description": "Service related information can be sent per event. Provided information will override the more generic information from metadata, non provided fields will be set according to the metadata information.", - "$id": "docs/spec/service.json", - "title": "Service", - "type": ["object", "null"], - "properties": { - "agent": { - "description": "Name and version of the Elastic APM agent", - "type": ["object", "null"], - "properties": { - "name": { - "description": "Name of the Elastic APM agent, e.g. \"Python\"", - "type": ["string", "null"], - "maxLength": 1024 - }, - "version": { - "description": "Version of the Elastic APM agent, e.g.\"1.0.0\"", - "type": ["string", "null"], - "maxLength": 1024 - }, - "ephemeral_id": { - "description": "Free format ID used for metrics correlation by some agents", - "type": ["string", "null"], - "maxLength": 1024 - } - } - }, - "framework": { - "description": "Name and version of the web framework used", - "type": ["object", "null"], - "properties": { - "name": { - "type": ["string", "null"], - "maxLength": 1024 - }, - "version": { - "type": ["string", "null"], - "maxLength": 1024 - } - } - }, - "language": { - "description": "Name and version of the programming language used", - "type": ["object", "null"], - "properties": { - "name": { - "type": ["string", "null"], - "maxLength": 1024 - }, - "version": { - "type": ["string", "null"], - "maxLength": 1024 - } - } - }, - "name": { - "description": "Immutable name of the service emitting this event", - "type": ["string", "null"], - "pattern": "^[a-zA-Z0-9 _-]+$", - "maxLength": 1024 - }, - "environment": { - "description": "Environment name of the service, e.g. \"production\" or \"staging\"", - "type": ["string", "null"], - "maxLength": 1024 - }, - "runtime": { - "description": "Name and version of the language runtime running this service", - "type": ["object", "null"], - "properties": { - "name": { - "type": ["string", "null"], - "maxLength": 1024 - }, - "version": { - "type": ["string", "null"], - "maxLength": 1024 - } - } - }, - "version": { - "description": "Version of the service emitting this event", - "type": ["string", "null"], - "maxLength": 1024 - }, - "node": { - "description": "Unique meaningful name of the service node.", - "type": ["object", "null"], - "properties": { - "configured_name": { - "type": ["string", "null"], - "maxLength": 1024 - } - } - } - } - }, - "message": { - "$id": "docs/spec/message.json", - "title": "Message", - "description": "Details related to message receiving and publishing if the captured event integrates with a messaging system", - "type": ["object", "null"], - "properties": { - "queue": { - "type": ["object", "null"], - "properties": { - "name": { - "description": "Name of the message queue where the message is received.", - "type": ["string","null"], - "maxLength": 1024 - } - } - }, - "age": { - "type": ["object", "null"], - "properties": { - "ms": { - "description": "The age of the message in milliseconds. If the instrumented messaging framework provides a timestamp for the message, agents may use it. Otherwise, the sending agent can add a timestamp in milliseconds since the Unix epoch to the message's metadata to be retrieved by the receiving agent. If a timestamp is not available, agents should omit this field.", - "type": ["integer", "null"] - } - } - }, - "body": { - "description": "messsage body, similar to an http request body", - "type": ["string", "null"] - }, - "headers": { - "description": "messsage headers, similar to http request headers", - "type": ["object", "null"], - "patternProperties": { - "[.*]*$": { - "type": ["string", "array", "null"], - "items": { - "type": ["string"] - } - } - } - } - } - } - } - }, - "culprit": { - "description": "Function call which was the primary perpetrator of this event.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "exception": { - "description": "Information about the originally thrown error.", - "type": ["object", "null"], - "properties": { - "code": { - "type": ["string", "integer", "null"], - "maxLength": 1024, - "description": "The error code set when the error happened, e.g. database error code." - }, - "message": { - "description": "The original error message.", - "type": ["string", "null"] - }, - "module": { - "description": "Describes the exception type's module namespace.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "attributes": { - "type": ["object", "null"] - }, - "stacktrace": { - "type": ["array", "null"], - "items": { - "$id": "docs/spec/stacktrace_frame.json", - "title": "Stacktrace", - "type": "object", - "description": "A stacktrace frame, contains various bits (most optional) describing the context of the frame", - "properties": { - "abs_path": { - "description": "The absolute path of the file involved in the stack frame", - "type": ["string", "null"] - }, - "colno": { - "description": "Column number", - "type": ["integer", "null"] - }, - "context_line": { - "description": "The line of code part of the stack frame", - "type": ["string", "null"] - }, - "filename": { - "description": "The relative filename of the code involved in the stack frame, used e.g. to do error checksumming", - "type": ["string", "null"] - }, - "classname": { - "description": "The classname of the code involved in the stack frame", - "type": ["string", "null"] - }, - "function": { - "description": "The function involved in the stack frame", - "type": ["string", "null"] - }, - "library_frame": { - "description": "A boolean, indicating if this frame is from a library or user code", - "type": ["boolean", "null"] - }, - "lineno": { - "description": "The line number of code part of the stack frame, used e.g. to do error checksumming", - "type": ["integer", "null"] - }, - "module": { - "description": "The module to which frame belongs to", - "type": ["string", "null"] - }, - "post_context": { - "description": "The lines of code after the stack frame", - "type": ["array", "null"], - "minItems": 0, - "items": { - "type": "string" - } - }, - "pre_context": { - "description": "The lines of code before the stack frame", - "type": ["array", "null"], - "minItems": 0, - "items": { - "type": "string" - } - }, - "vars": { - "description": "Local variables for this stack frame", - "type": ["object", "null"], - "properties": {} - } - }, - "anyOf": [ - { "required": ["filename"], "properties": {"filename": { "type": "string" }} }, - { "required": ["classname"], "properties": {"classname": { "type": "string" }} } - ] - }, - "minItems": 0 - }, - "type": { - "type": ["string", "null"], - "maxLength": 1024 - }, - "handled": { - "type": ["boolean", "null"], - "description": "Indicator whether the error was caught somewhere in the code or not." - }, - "cause": { - "type": ["array", "null"], - "items": { - "type": ["object", "null"], - "description": "Recursive exception object" - }, - "minItems": 0, - "description": "Exception tree" - } - }, - "anyOf": [ - {"required": ["message"], "properties": {"message": {"type": "string"}}}, - {"required": ["type"], "properties": {"type": {"type": "string"}}} - ] - }, - "log": { - "type": ["object", "null"], - "description": "Additional information added when logging the error.", - "properties": { - "level": { - "description": "The severity of the record.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "logger_name": { - "description": "The name of the logger instance used.", - "type": ["string", "null"], - "default": "default", - "maxLength": 1024 - }, - "message": { - "description": "The additionally logged error message.", - "type": "string" - }, - "param_message": { - "description": "A parametrized message. E.g. 'Could not connect to %s'. The property message is still required, and should be equal to the param_message, but with placeholders replaced. In some situations the param_message is used to group errors together. The string is not interpreted, so feel free to use whichever placeholders makes sense in the client languange.", - "type": ["string", "null"], - "maxLength": 1024 - - }, - "stacktrace": { - "type": ["array", "null"], - "items": { - "$id": "docs/spec/stacktrace_frame.json", - "title": "Stacktrace", - "type": "object", - "description": "A stacktrace frame, contains various bits (most optional) describing the context of the frame", - "properties": { - "abs_path": { - "description": "The absolute path of the file involved in the stack frame", - "type": ["string", "null"] - }, - "colno": { - "description": "Column number", - "type": ["integer", "null"] - }, - "context_line": { - "description": "The line of code part of the stack frame", - "type": ["string", "null"] - }, - "filename": { - "description": "The relative filename of the code involved in the stack frame, used e.g. to do error checksumming", - "type": ["string", "null"] - }, - "classname": { - "description": "The classname of the code involved in the stack frame", - "type": ["string", "null"] - }, - "function": { - "description": "The function involved in the stack frame", - "type": ["string", "null"] - }, - "library_frame": { - "description": "A boolean, indicating if this frame is from a library or user code", - "type": ["boolean", "null"] - }, - "lineno": { - "description": "The line number of code part of the stack frame, used e.g. to do error checksumming", - "type": ["integer", "null"] - }, - "module": { - "description": "The module to which frame belongs to", - "type": ["string", "null"] - }, - "post_context": { - "description": "The lines of code after the stack frame", - "type": ["array", "null"], - "minItems": 0, - "items": { - "type": "string" - } - }, - "pre_context": { - "description": "The lines of code before the stack frame", - "type": ["array", "null"], - "minItems": 0, - "items": { - "type": "string" - } - }, - "vars": { - "description": "Local variables for this stack frame", - "type": ["object", "null"], - "properties": {} - } - }, - "anyOf": [ - { "required": ["filename"], "properties": {"filename": { "type": "string" }} }, - { "required": ["classname"], "properties": {"classname": { "type": "string" }} } - ] - }, - "minItems": 0 - } - }, - "required": ["message"] - } - }, - "allOf": [ - { "required": ["id"] }, - { "if": {"required": ["transaction_id"], "properties": {"transaction_id": { "type": "string" }}}, - "then": { "required": ["trace_id", "parent_id"], "properties": {"trace_id": { "type": "string" }, "parent_id": {"type": "string"}}}}, - { "if": {"required": ["trace_id"], "properties": {"trace_id": { "type": "string" }}}, - "then": { "required": ["parent_id"], "properties": {"parent_id": { "type": "string" }}} }, - { "if": {"required": ["parent_id"], "properties": {"parent_id": { "type": "string" }}}, - "then": { "required": ["trace_id"], "properties": {"trace_id": { "type": "string" }}} } - ], - "anyOf": [ - { "required": ["exception"], "properties": {"exception": { "type": "object" }} }, - { "required": ["log"], "properties": {"log": { "type": "object" }} } - ] - } - ] -} -` diff --git a/model/error/generated/schema/rum_v3_error.go b/model/error/generated/schema/rum_v3_error.go deleted file mode 100644 index 9a7680f9430..00000000000 --- a/model/error/generated/schema/rum_v3_error.go +++ /dev/null @@ -1,684 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package schema - -const RUMV3Schema = `{ - "$id": "docs/spec/errors/rum_v3_error.json", - "type": "object", - "description": "An error or a logged error message captured by an agent occurring in a monitored service", - "allOf": [ - { "$id": "docs/spec/timestamp_epoch.json", - "title": "Timestamp Epoch", - "description": "Object with 'timestamp' property.", - "type": ["object"], - "properties": { - "timestamp": { - "description": "Recorded time of the event, UTC based and formatted as microseconds since Unix epoch", - "type": ["integer", "null"] - } - } }, - { - "properties": { - "id": { - "type": ["string"], - "description": "Hex encoded 128 random bits ID of the error.", - "maxLength": 1024 - }, - "tid": { - "description": "Hex encoded 128 random bits ID of the correlated trace. Must be present if transaction_id and parent_id are set.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "xid": { - "type": ["string", "null"], - "description": "Hex encoded 64 random bits ID of the correlated transaction. Must be present if trace_id and parent_id are set.", - "maxLength": 1024 - }, - "pid": { - "description": "Hex encoded 64 random bits ID of the parent transaction or span. Must be present if trace_id and transaction_id are set.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "x": { - "type": ["object", "null"], - "description": "Data for correlating errors with transactions", - "properties": { - "sm": { - "type": ["boolean", "null"], - "description": "Transactions that are 'sampled' will include all available information. Transactions that are not sampled will not have 'spans' or 'context'. Defaults to true." - }, - "t": { - "type": ["string", "null"], - "description": "Keyword of specific relevance in the service's domain (eg: 'request', 'backgroundjob', etc)", - "maxLength": 1024 - } - } - }, - "c": { - "$id": "docs/spec/rum_v3_context.json", - "title": "Context", - "description": "Any arbitrary contextual information regarding the event, captured by the agent, optionally provided by the user", - "type": [ - "object", - "null" - ], - "properties": { - "cu": { - "description": "An arbitrary mapping of additional metadata to store with the event.", - "type": [ - "object", - "null" - ], - "patternProperties": { - "^[^.*\"]*$": {} - }, - "additionalProperties": false - }, - "r": { - "type": [ - "object", - "null" - ], - "allOf": [ - { - "properties": { - "sc": { - "type": [ - "integer", - "null" - ], - "description": "The status code of the http request." - }, - "ts": { - "type": [ - "number", - "null" - ], - "description": "Total size of the payload." - }, - "ebs": { - "type": [ - "number", - "null" - ], - "description": "The encoded size of the payload." - }, - "dbs": { - "type": [ - "number", - "null" - ], - "description": "The decoded size of the payload." - }, - "he": { - "type": [ - "object", - "null" - ], - "patternProperties": { - "[.*]*$": { - "type": [ - "string", - "array", - "null" - ], - "items": { - "type": [ - "string" - ] - } - } - } - } - } - } - ] - }, - "q": { - "properties": { - "en": { - "description": "The env variable is a compounded of environment information passed from the webserver.", - "type": [ - "object", - "null" - ], - "properties": {} - }, - "he": { - "description": "Should include any headers sent by the requester. Cookies will be taken by headers if supplied.", - "type": [ - "object", - "null" - ], - "patternProperties": { - "[.*]*$": { - "type": [ - "string", - "array", - "null" - ], - "items": { - "type": [ - "string" - ] - } - } - } - }, - "hve": { - "description": "HTTP version.", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "mt": { - "description": "HTTP method.", - "type": "string", - "maxLength": 1024 - } - }, - "required": [ - "mt" - ] - }, - "g": { - "$id": "docs/spec/tags.json", - "title": "Tags", - "type": ["object", "null"], - "description": "A flat mapping of user-defined tags with string, boolean or number values.", - "patternProperties": { - "^[^.*\"]*$": { - "type": ["string", "boolean", "number", "null"], - "maxLength": 1024 - } - }, - "additionalProperties": false - }, - "u": { - "$id": "docs/spec/rum_v3_user.json", - "title": "User", - "type": [ - "object", - "null" - ], - "properties": { - "id": { - "description": "Identifier of the logged in user, e.g. the primary key of the user", - "type": [ - "string", - "integer", - "null" - ], - "maxLength": 1024 - }, - "em": { - "description": "Email of the logged in user", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "un": { - "description": "The username of the logged in user", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "p": { - "description": "", - "type": [ - "object", - "null" - ], - "properties": { - "rf": { - "description": "RUM specific field that stores the URL of the page that 'linked' to the current page.", - "type": [ - "string", - "null" - ] - }, - "url": { - "description": "RUM specific field that stores the URL of the current page", - "type": [ - "string", - "null" - ] - } - } - }, - "se": { - "description": "Service related information can be sent per event. Provided information will override the more generic information from metadata, non provided fields will be set according to the metadata information.", - "$id": "docs/spec/rum_v3_service.json", - "title": "Service", - "type": [ - "object", - "null" - ], - "properties": { - "a": { - "description": "Name and version of the Elastic APM agent", - "type": [ - "object", - "null" - ], - "properties": { - "n": { - "description": "Name of the Elastic APM agent, e.g. \"Python\"", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "ve": { - "description": "Version of the Elastic APM agent, e.g.\"1.0.0\"", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "fw": { - "description": "Name and version of the web framework used", - "type": [ - "object", - "null" - ], - "properties": { - "n": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "ve": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "la": { - "description": "Name and version of the programming language used", - "type": [ - "object", - "null" - ], - "properties": { - "n": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "ve": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "n": { - "description": "Immutable name of the service emitting this event", - "type": [ - "string", - "null" - ], - "pattern": "^[a-zA-Z0-9 _-]+$", - "maxLength": 1024 - }, - "en": { - "description": "Environment name of the service, e.g. \"production\" or \"staging\"", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "ru": { - "description": "Name and version of the language runtime running this service", - "type": [ - "object", - "null" - ], - "properties": { - "n": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "ve": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "ve": { - "description": "Version of the service emitting this event", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - } - } - }, - "cu": { - "description": "Function call which was the primary perpetrator of this event.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "ex": { - "description": "Information about the originally thrown error.", - "type": ["object", "null"], - "properties": { - "cd": { - "type": ["string", "integer", "null"], - "maxLength": 1024, - "description": "The error code set when the error happened, e.g. database error code." - }, - "mg": { - "description": "The original error message.", - "type": ["string", "null"] - }, - "mo": { - "description": "Describes the exception type's module namespace.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "at": { - "type": ["object", "null"] - }, - "st": { - "type": ["array", "null"], - "items": { - "$id": "docs/spec/rum_v3_stacktrace_frame.json", - "title": "Stacktrace", - "type": "object", - "description": "A stacktrace frame, contains various bits (most optional) describing the context of the frame", - "properties": { - "ap": { - "description": "The absolute path of the file involved in the stack frame", - "type": [ - "string", - "null" - ] - }, - "co": { - "description": "Column number", - "type": [ - "integer", - "null" - ] - }, - "cli": { - "description": "The line of code part of the stack frame", - "type": [ - "string", - "null" - ] - }, - "f": { - "description": "The relative filename of the code involved in the stack frame, used e.g. to do error checksumming", - "type": [ - "string", - "null" - ] - }, - "cn": { - "description": "The classname of the code involved in the stack frame", - "type": [ - "string", - "null" - ] - }, - "fn": { - "description": "The function involved in the stack frame", - "type": [ - "string", - "null" - ] - }, - "li": { - "description": "The line number of code part of the stack frame, used e.g. to do error checksumming", - "type": [ - "integer", - "null" - ] - }, - "mo": { - "description": "The module to which frame belongs to", - "type": [ - "string", - "null" - ] - }, - "poc": { - "description": "The lines of code after the stack frame", - "type": [ - "array", - "null" - ], - "minItems": 0, - "items": { - "type": "string" - } - }, - "prc": { - "description": "The lines of code before the stack frame", - "type": [ - "array", - "null" - ], - "minItems": 0, - "items": { - "type": "string" - } - } - }, - "required": [ - "f" - ] - }, - "minItems": 0 - }, - "t": { - "type": ["string", "null"], - "maxLength": 1024 - }, - "hd": { - "type": ["boolean", "null"], - "description": "Indicator whether the error was caught somewhere in the code or not." - }, - "ca": { - "type": ["array", "null"], - "items": { - "type": ["object", "null"], - "description": "Recursive exception object" - }, - "minItems": 0, - "description": "Exception tree" - } - }, - "anyOf": [ - {"required": ["mg"], "properties": {"mg": {"type": "string"}}}, - {"required": ["t"], "properties": {"t": {"type": "string"}}} - ] - }, - "log": { - "type": ["object", "null"], - "description": "Additional information added when logging the error.", - "properties": { - "lv": { - "description": "The severity of the record.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "ln": { - "description": "The name of the logger instance used.", - "type": ["string", "null"], - "default": "default", - "maxLength": 1024 - }, - "mg": { - "description": "The additionally logged error message.", - "type": "string" - }, - "pmg": { - "description": "A parametrized message. E.g. 'Could not connect to %s'. The property message is still required, and should be equal to the param_message, but with placeholders replaced. In some situations the param_message is used to group errors together. The string is not interpreted, so feel free to use whichever placeholders makes sense in the client languange.", - "type": ["string", "null"], - "maxLength": 1024 - - }, - "st": { - "type": ["array", "null"], - "items": { - "$id": "docs/spec/rum_v3_stacktrace_frame.json", - "title": "Stacktrace", - "type": "object", - "description": "A stacktrace frame, contains various bits (most optional) describing the context of the frame", - "properties": { - "ap": { - "description": "The absolute path of the file involved in the stack frame", - "type": [ - "string", - "null" - ] - }, - "co": { - "description": "Column number", - "type": [ - "integer", - "null" - ] - }, - "cli": { - "description": "The line of code part of the stack frame", - "type": [ - "string", - "null" - ] - }, - "f": { - "description": "The relative filename of the code involved in the stack frame, used e.g. to do error checksumming", - "type": [ - "string", - "null" - ] - }, - "cn": { - "description": "The classname of the code involved in the stack frame", - "type": [ - "string", - "null" - ] - }, - "fn": { - "description": "The function involved in the stack frame", - "type": [ - "string", - "null" - ] - }, - "li": { - "description": "The line number of code part of the stack frame, used e.g. to do error checksumming", - "type": [ - "integer", - "null" - ] - }, - "mo": { - "description": "The module to which frame belongs to", - "type": [ - "string", - "null" - ] - }, - "poc": { - "description": "The lines of code after the stack frame", - "type": [ - "array", - "null" - ], - "minItems": 0, - "items": { - "type": "string" - } - }, - "prc": { - "description": "The lines of code before the stack frame", - "type": [ - "array", - "null" - ], - "minItems": 0, - "items": { - "type": "string" - } - } - }, - "required": [ - "f" - ] - }, - "minItems": 0 - } - }, - "required": ["mg"] - } - }, - "allOf": [ - { "required": ["id"] }, - { "if": {"required": ["xid"], "properties": {"xid": { "type": "string" }}}, - "then": { "required": ["tid", "pid"], "properties": {"tid": { "type": "string" }, "pid": {"type": "string"}}}}, - { "if": {"required": ["tid"], "properties": {"tid": { "type": "string" }}}, - "then": { "required": ["pid"], "properties": {"pid": { "type": "string" }}} }, - { "if": {"required": ["pid"], "properties": {"pid": { "type": "string" }}}, - "then": { "required": ["tid"], "properties": {"tid": { "type": "string" }}} } - ], - "anyOf": [ - { "required": ["ex"], "properties": {"ex": { "type": "object" }} }, - { "required": ["log"], "properties": {"log": { "type": "object" }} } - ] - } - ] -} -` diff --git a/model/error_test.go b/model/error_test.go index 02bc5262228..ce61188c33f 100644 --- a/model/error_test.go +++ b/model/error_test.go @@ -19,102 +19,94 @@ package model import ( "context" - "crypto/md5" - "encoding/hex" - "fmt" - "io" - "net" "testing" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/elastic/apm-server/sourcemap" - "github.com/elastic/apm-server/sourcemap/test" - "github.com/elastic/apm-server/tests" - "github.com/elastic/apm-server/transform" - "github.com/elastic/beats/v7/libbeat/common" ) func baseException() *Exception { - msg := "exception message" - return &Exception{Message: &msg} + return &Exception{Message: "exception message"} } -func (e *Exception) withCode(code interface{}) *Exception { +func (e *Exception) withCode(code string) *Exception { e.Code = code return e } -func (e *Exception) withType(etype string) *Exception { - e.Type = &etype - return e -} - -func (e *Exception) withFrames(frames []*StacktraceFrame) *Exception { - e.Stacktrace = frames - return e -} - func baseLog() *Log { return &Log{Message: "error log message"} } -func (l *Log) withParamMsg(msg string) *Log { - l.ParamMessage = &msg - return l -} - -func (l *Log) withFrames(frames []*StacktraceFrame) *Log { - l.Stacktrace = frames - return l -} - func TestHandleExceptionTree(t *testing.T) { - event := &Error{ - ID: tests.StringPtr("id"), - Exception: &Exception{ - Message: tests.StringPtr("message0"), - Type: tests.StringPtr("type0"), - Stacktrace: Stacktrace{{ - Filename: tests.StringPtr("file0"), - }}, - Cause: []Exception{{ - Message: tests.StringPtr("message1"), - Type: tests.StringPtr("type1"), - }, { - Message: tests.StringPtr("message2"), - Type: tests.StringPtr("type2"), + event := APMEvent{ + Error: &Error{ + ID: "id", + Exception: &Exception{ + Message: "message0", + Type: "type0", + Stacktrace: Stacktrace{{ + Filename: "file0", + }}, Cause: []Exception{{ - Message: tests.StringPtr("message3"), - Type: tests.StringPtr("type3"), + Message: "message1", + Type: "type1", + }, { + Message: "message2", + Type: "type2", Cause: []Exception{{ - Message: tests.StringPtr("message4"), - Type: tests.StringPtr("type4"), - }, { - Message: tests.StringPtr("message5"), - Type: tests.StringPtr("type5"), + Message: "message3", + Type: "type3", + Cause: []Exception{{ + Message: "message4", + Type: "type4", + }, { + Message: "message5", + Type: "type5", + }}, }}, + }, { + Message: "message6", + Type: "type6", }}, - }, { - Message: tests.StringPtr("message6"), - Type: tests.StringPtr("type6"), - }}, + }, }, } - exceptions := flattenExceptionTree(event.Exception) - assert.Len(t, exceptions, 7) - for i, ex := range exceptions { - assert.Equal(t, fmt.Sprintf("message%d", i), *ex.Message) - assert.Equal(t, fmt.Sprintf("type%d", i), *ex.Type) - assert.Nil(t, ex.Cause) - } - assert.Equal(t, 0, *exceptions[2].Parent) - assert.Equal(t, 3, *exceptions[5].Parent) - assert.Equal(t, 0, *exceptions[6].Parent) + beatEvent := event.BeatEvent(context.Background()) + exceptionField, err := beatEvent.Fields.GetValue("error.exception") + require.NoError(t, err) + assert.Equal(t, []common.MapStr{{ + "message": "message0", + "stacktrace": []common.MapStr{{ + "exclude_from_grouping": false, + "filename": "file0", + }}, + "type": "type0", + }, { + "message": "message1", + "type": "type1", + }, { + "message": "message2", + "type": "type2", + "parent": 0, + }, { + "message": "message3", + "type": "type3", + }, { + "message": "message4", + "type": "type4", + }, { + "message": "message5", + "type": "type5", + "parent": 3, + }, { + "message": "message6", + "type": "type6", + "parent": 0, + }}, exceptionField) } func TestEventFields(t *testing.T) { @@ -122,19 +114,18 @@ func TestEventFields(t *testing.T) { culprit := "some trigger" errorType := "error type" - codeFloat := 13.0 module := "error module" exMsg := "exception message" handled := false attributes := common.MapStr{"k1": "val1"} exception := Exception{ - Type: &errorType, - Code: codeFloat, - Message: &exMsg, - Module: &module, + Type: errorType, + Code: "13", + Message: exMsg, + Module: module, Handled: &handled, Attributes: attributes, - Stacktrace: []*StacktraceFrame{{Filename: tests.StringPtr("st file")}}, + Stacktrace: []*StacktraceFrame{{Filename: "st file"}}, } level := "level" @@ -142,91 +133,55 @@ func TestEventFields(t *testing.T) { logMsg := "error log message" paramMsg := "param message" log := Log{ - Level: &level, + Level: level, Message: logMsg, - ParamMessage: ¶mMsg, - LoggerName: &loggerName, + ParamMessage: paramMsg, + LoggerName: loggerName, } - baseExceptionHash := md5.New() - io.WriteString(baseExceptionHash, *baseException().Message) - // 706a38d554b47b8f82c6b542725c05dc - baseExceptionGroupingKey := hex.EncodeToString(baseExceptionHash.Sum(nil)) - - baseLogHash := md5.New() - io.WriteString(baseLogHash, baseLog().Message) - baseLogGroupingKey := hex.EncodeToString(baseLogHash.Sum(nil)) - trID := "945254c5-67a5-417e-8a4e-aa29efcbfb79" tests := map[string]struct { Error Error Output common.MapStr }{ "minimal": { - Error: Error{}, - Output: common.MapStr{ - "grouping_key": hex.EncodeToString(md5.New().Sum(nil)), - }, + Error: Error{}, + Output: common.MapStr(nil), + }, + "withGroupingKey": { + Error: Error{GroupingKey: "foo"}, + Output: common.MapStr{"grouping_key": "foo"}, }, "withLog": { Error: Error{Log: baseLog()}, Output: common.MapStr{ - "log": common.MapStr{"message": "error log message"}, - "grouping_key": baseLogGroupingKey, + "log": common.MapStr{"message": "error log message"}, }, }, "withLogAndException": { Error: Error{Exception: baseException(), Log: baseLog()}, Output: common.MapStr{ - "exception": []common.MapStr{{"message": "exception message"}}, - "log": common.MapStr{"message": "error log message"}, - "grouping_key": baseExceptionGroupingKey, + "exception": []common.MapStr{{"message": "exception message"}}, + "log": common.MapStr{"message": "error log message"}, }, }, "withException": { Error: Error{Exception: baseException()}, Output: common.MapStr{ - "exception": []common.MapStr{{"message": "exception message"}}, - "grouping_key": baseExceptionGroupingKey, + "exception": []common.MapStr{{"message": "exception message"}}, }, }, "stringCode": { Error: Error{Exception: baseException().withCode("13")}, Output: common.MapStr{ - "exception": []common.MapStr{{"message": "exception message", "code": "13"}}, - "grouping_key": baseExceptionGroupingKey, - }, - }, - "intCode": { - Error: Error{Exception: baseException().withCode(13)}, - Output: common.MapStr{ - "exception": []common.MapStr{{"message": "exception message", "code": "13"}}, - "grouping_key": baseExceptionGroupingKey, - }, - }, - "floatCode": { - Error: Error{Exception: baseException().withCode(13.0)}, - Output: common.MapStr{ - "exception": []common.MapStr{{"message": "exception message", "code": "13"}}, - "grouping_key": baseExceptionGroupingKey, + "exception": []common.MapStr{{"message": "exception message", "code": "13"}}, }, }, "withFrames": { Error: Error{ - ID: &id, - Timestamp: time.Now(), - Culprit: &culprit, - Exception: &exception, - Log: &log, - TransactionID: trID, - RUM: true, - - // Service name and version are required for sourcemapping. - Metadata: Metadata{ - Service: Service{ - Name: "myservice", - Version: "myservice", - }, - }, + ID: id, + Culprit: culprit, + Exception: &exception, + Log: &log, }, Output: common.MapStr{ "id": "45678", @@ -235,10 +190,6 @@ func TestEventFields(t *testing.T) { "stacktrace": []common.MapStr{{ "filename": "st file", "exclude_from_grouping": false, - "sourcemap": common.MapStr{ - "error": "Colno mandatory for sourcemapping.", - "updated": false, - }, }}, "code": "13", "message": "exception message", @@ -253,573 +204,15 @@ func TestEventFields(t *testing.T) { "logger_name": "logger", "level": "level", }, - "grouping_key": "2725d2590215a6e975f393bf438f90ef", }, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { - output := tc.Error.Transform(context.Background(), &transform.Config{ - RUM: transform.RUMConfig{SourcemapStore: &sourcemap.Store{}}, - }) - require.Len(t, output, 1) - fields := output[0].Fields["error"] - assert.Equal(t, tc.Output, fields) + event := APMEvent{Error: &tc.Error} + beatEvent := event.BeatEvent(context.Background()) + assert.Equal(t, tc.Output, beatEvent.Fields["error"]) }) } } - -func TestEvents(t *testing.T) { - timestamp := time.Date(2019, 1, 3, 15, 17, 4, 908.596*1e6, - time.FixedZone("+0100", 3600)) - timestampUs := timestamp.UnixNano() / 1000 - exMsg := "exception message" - trID := "945254c5-67a5-417e-8a4e-aa29efcbfb79" - sampledTrue, sampledFalse := true, false - transactionType := "request" - - email, userIP, userAgent := "m@m.com", "127.0.0.1", "js-1.0" - uid := "1234567889" - url, referer := "https://localhost", "http://localhost" - labels := Labels(common.MapStr{"key": true}) - custom := Custom(common.MapStr{"foo": "bar"}) - - serviceName, agentName, version := "myservice", "go", "1.0" - md := Metadata{ - Service: Service{ - Name: serviceName, Version: version, - Agent: Agent{Name: agentName, Version: version}, - }, - Labels: common.MapStr{"label": 101}, - } - - mdWithContext := md - mdWithContext.User = User{ID: uid, Email: email} - mdWithContext.Client.IP = net.ParseIP(userIP) - mdWithContext.UserAgent.Original = userAgent - - for name, tc := range map[string]struct { - Transformable transform.Transformable - Output common.MapStr - Msg string - }{ - "valid": { - Transformable: &Error{Timestamp: timestamp, Metadata: md}, - Output: common.MapStr{ - "agent": common.MapStr{"name": "go", "version": "1.0"}, - "service": common.MapStr{"name": "myservice", "version": "1.0"}, - "error": common.MapStr{ - "grouping_key": "d41d8cd98f00b204e9800998ecf8427e", - }, - "processor": common.MapStr{"event": "error", "name": "error"}, - "timestamp": common.MapStr{"us": timestampUs}, - "labels": common.MapStr{"label": 101}, - }, - }, - "notSampled": { - Transformable: &Error{Timestamp: timestamp, Metadata: md, TransactionSampled: &sampledFalse}, - Output: common.MapStr{ - "transaction": common.MapStr{"sampled": false}, - "agent": common.MapStr{"name": "go", "version": "1.0"}, - "service": common.MapStr{"name": "myservice", "version": "1.0"}, - "error": common.MapStr{ - "grouping_key": "d41d8cd98f00b204e9800998ecf8427e", - }, - "processor": common.MapStr{"event": "error", "name": "error"}, - "timestamp": common.MapStr{"us": timestampUs}, - "labels": common.MapStr{"label": 101}, - }, - }, - "withMeta": { - Transformable: &Error{Timestamp: timestamp, Metadata: md, TransactionType: &transactionType}, - Output: common.MapStr{ - "transaction": common.MapStr{"type": "request"}, - "error": common.MapStr{ - "grouping_key": "d41d8cd98f00b204e9800998ecf8427e", - }, - "processor": common.MapStr{"event": "error", "name": "error"}, - "service": common.MapStr{"name": "myservice", "version": "1.0"}, - "timestamp": common.MapStr{"us": timestampUs}, - "agent": common.MapStr{"name": "go", "version": "1.0"}, - "labels": common.MapStr{"label": 101}, - }, - }, - "withContext": { - Transformable: &Error{ - Timestamp: timestamp, - Metadata: mdWithContext, - Log: baseLog(), - Exception: &Exception{ - Message: &exMsg, - Stacktrace: Stacktrace{&StacktraceFrame{Filename: tests.StringPtr("myFile")}}, - }, - TransactionID: trID, - TransactionSampled: &sampledTrue, - Labels: &labels, - Page: &Page{URL: &URL{Original: &url}, Referer: &referer}, - Custom: &custom, - RUM: true, - }, - - Output: common.MapStr{ - "labels": common.MapStr{"key": true, "label": 101}, - "service": common.MapStr{"name": "myservice", "version": "1.0"}, - "agent": common.MapStr{"name": "go", "version": "1.0"}, - "user": common.MapStr{"id": uid, "email": email}, - "client": common.MapStr{"ip": userIP}, - "source": common.MapStr{"ip": userIP}, - "user_agent": common.MapStr{"original": userAgent}, - "error": common.MapStr{ - "custom": common.MapStr{ - "foo": "bar", - }, - "grouping_key": "a61a65e048f403d9bcb2863d517fb48d", - "log": common.MapStr{"message": "error log message"}, - "exception": []common.MapStr{{ - "message": "exception message", - "stacktrace": []common.MapStr{{ - "exclude_from_grouping": false, - "filename": "myFile", - "sourcemap": common.MapStr{ - "error": "Colno mandatory for sourcemapping.", - "updated": false, - }, - }}, - }}, - "page": common.MapStr{"url": url, "referer": referer}, - }, - "http": common.MapStr{ - "request": common.MapStr{ - "referrer": referer, - }, - }, - "url": common.MapStr{"original": url}, - "processor": common.MapStr{"event": "error", "name": "error"}, - "transaction": common.MapStr{"id": "945254c5-67a5-417e-8a4e-aa29efcbfb79", "sampled": true}, - "timestamp": common.MapStr{"us": timestampUs}, - }, - }, - } { - t.Run(name, func(t *testing.T) { - outputEvents := tc.Transformable.Transform(context.Background(), &transform.Config{ - RUM: transform.RUMConfig{SourcemapStore: &sourcemap.Store{}}, - }) - require.Len(t, outputEvents, 1) - outputEvent := outputEvents[0] - assert.Equal(t, tc.Output, outputEvent.Fields) - assert.Equal(t, timestamp, outputEvent.Timestamp) - - }) - } -} - -func TestCulprit(t *testing.T) { - c := "foo" - fct := "fct" - truthy := true - st := Stacktrace{ - &StacktraceFrame{Filename: tests.StringPtr("a"), Function: &fct}, - } - stUpdate := Stacktrace{ - &StacktraceFrame{Filename: tests.StringPtr("a"), Function: &fct}, - &StacktraceFrame{Filename: tests.StringPtr("a"), LibraryFrame: &truthy, SourcemapUpdated: &truthy}, - &StacktraceFrame{Filename: tests.StringPtr("f"), Function: &fct, SourcemapUpdated: &truthy}, - &StacktraceFrame{Filename: tests.StringPtr("bar"), Function: &fct, SourcemapUpdated: &truthy}, - } - store := &sourcemap.Store{} - tests := []struct { - event Error - config transform.Config - culprit string - msg string - }{ - { - event: Error{Culprit: &c}, - config: transform.Config{}, - culprit: "foo", - msg: "No Sourcemap in config", - }, - { - event: Error{Culprit: &c}, - config: transform.Config{RUM: transform.RUMConfig{SourcemapStore: store}}, - culprit: "foo", - msg: "No Stacktrace Frame given.", - }, - { - event: Error{Culprit: &c, Log: &Log{Stacktrace: st}}, - config: transform.Config{RUM: transform.RUMConfig{SourcemapStore: store}}, - culprit: "foo", - msg: "Log.StacktraceFrame has no updated frame", - }, - { - event: Error{ - Culprit: &c, - Log: &Log{ - Stacktrace: Stacktrace{ - &StacktraceFrame{ - Filename: tests.StringPtr("f"), - Classname: tests.StringPtr("xyz"), - SourcemapUpdated: &truthy, - }, - }, - }, - }, - config: transform.Config{RUM: transform.RUMConfig{SourcemapStore: store}}, - culprit: "f", - msg: "Adapt culprit to first valid Log.StacktraceFrame filename information.", - }, - { - event: Error{ - Culprit: &c, - Log: &Log{ - Stacktrace: Stacktrace{ - &StacktraceFrame{ - Classname: tests.StringPtr("xyz"), - SourcemapUpdated: &truthy, - }, - }, - }, - }, - config: transform.Config{RUM: transform.RUMConfig{SourcemapStore: store}}, - culprit: "xyz", - msg: "Adapt culprit Log.StacktraceFrame classname information.", - }, - { - event: Error{ - Culprit: &c, - Exception: &Exception{Stacktrace: stUpdate}, - }, - config: transform.Config{RUM: transform.RUMConfig{SourcemapStore: store}}, - culprit: "f in fct", - msg: "Adapt culprit to first valid Exception.StacktraceFrame information.", - }, - { - event: Error{ - Culprit: &c, - Log: &Log{Stacktrace: st}, - Exception: &Exception{Stacktrace: stUpdate}, - }, - config: transform.Config{RUM: transform.RUMConfig{SourcemapStore: store}}, - culprit: "f in fct", - msg: "Log and Exception StacktraceFrame given, only one changes culprit.", - }, - { - event: Error{ - Culprit: &c, - Log: &Log{ - Stacktrace: Stacktrace{ - &StacktraceFrame{ - Filename: tests.StringPtr("a"), - Function: &fct, - SourcemapUpdated: &truthy, - }, - }, - }, - Exception: &Exception{Stacktrace: stUpdate}, - }, - config: transform.Config{RUM: transform.RUMConfig{SourcemapStore: store}}, - culprit: "a in fct", - msg: "Log Stacktrace is prioritized over Exception StacktraceFrame", - }, - } - for idx, test := range tests { - t.Run(fmt.Sprint(idx), func(t *testing.T) { - - test.event.updateCulprit(&test.config) - assert.Equal(t, test.culprit, *test.event.Culprit, - fmt.Sprintf("(%v) %s: expected <%v>, received <%v>", idx, test.msg, test.culprit, *test.event.Culprit)) - }) - } -} - -func TestErrorTransformPage(t *testing.T) { - id := "123" - urlExample := "http://example.com/path" - - tests := []struct { - Error Error - Output common.MapStr - Msg string - }{ - { - Error: Error{ - ID: &id, - Page: &Page{ - URL: ParseURL(urlExample, ""), - Referer: nil, - }, - }, - Output: common.MapStr{ - "domain": "example.com", - "full": "http://example.com/path", - "original": "http://example.com/path", - "path": "/path", - "scheme": "http", - }, - Msg: "With page URL", - }, - { - Error: Error{ - ID: &id, - Timestamp: time.Now(), - URL: ParseURL("https://localhost:8200/", ""), - Page: &Page{ - URL: ParseURL(urlExample, ""), - Referer: nil, - }, - }, - Output: common.MapStr{ - "domain": "localhost", - "full": "https://localhost:8200/", - "original": "https://localhost:8200/", - "path": "/", - "port": 8200, - "scheme": "https", - }, - Msg: "With Page URL and Request URL", - }, - } - - for idx, test := range tests { - output := test.Error.Transform(context.Background(), &transform.Config{}) - assert.Equal(t, test.Output, output[0].Fields["url"], fmt.Sprintf("Failed at idx %v; %s", idx, test.Msg)) - } -} - -func TestEmptyGroupingKey(t *testing.T) { - emptyGroupingKey := hex.EncodeToString(md5.New().Sum(nil)) - e := Error{} - assert.Equal(t, emptyGroupingKey, e.calcGroupingKey(flattenExceptionTree(e.Exception))) -} - -func TestExplicitGroupingKey(t *testing.T) { - attr := "hello world" - diffAttr := "huhu" - - groupingKey := hex.EncodeToString(md5With(attr)) - - e1 := Error{Log: baseLog().withParamMsg(attr)} - e2 := Error{Exception: baseException().withType(attr)} - e3 := Error{Log: baseLog().withFrames([]*StacktraceFrame{{Function: &attr}})} - e4 := Error{Exception: baseException().withFrames([]*StacktraceFrame{{Function: &attr}})} - e5 := Error{ - Log: baseLog().withFrames([]*StacktraceFrame{{Function: &diffAttr}}), - Exception: baseException().withFrames([]*StacktraceFrame{{Function: &attr}}), - } - - for idx, e := range []Error{e1, e2, e3, e4, e5} { - assert.Equal(t, groupingKey, e.calcGroupingKey(flattenExceptionTree(e.Exception)), "grouping_key mismatch", idx) - } -} - -func TestFramesUsableForGroupingKey(t *testing.T) { - webpackLineno := 77 - tmpLineno := 45 - st1 := Stacktrace{ - &StacktraceFrame{Filename: tests.StringPtr("/a/b/c"), ExcludeFromGrouping: false}, - &StacktraceFrame{Filename: tests.StringPtr("webpack"), Lineno: &webpackLineno, ExcludeFromGrouping: false}, - &StacktraceFrame{Filename: tests.StringPtr("~/tmp"), Lineno: &tmpLineno, ExcludeFromGrouping: true}, - } - st2 := Stacktrace{ - &StacktraceFrame{Filename: tests.StringPtr("/a/b/c"), ExcludeFromGrouping: false}, - &StacktraceFrame{Filename: tests.StringPtr("webpack"), Lineno: &webpackLineno, ExcludeFromGrouping: false}, - &StacktraceFrame{Filename: tests.StringPtr("~/tmp"), Lineno: &tmpLineno, ExcludeFromGrouping: false}, - } - exMsg := "base exception" - e1 := Error{Exception: &Exception{Message: &exMsg, Stacktrace: st1}} - e2 := Error{Exception: &Exception{Message: &exMsg, Stacktrace: st2}} - key1 := e1.calcGroupingKey(flattenExceptionTree(e1.Exception)) - key2 := e2.calcGroupingKey(flattenExceptionTree(e2.Exception)) - assert.NotEqual(t, key1, key2) -} - -func TestFallbackGroupingKey(t *testing.T) { - lineno := 12 - filename := "file" - - groupingKey := hex.EncodeToString(md5With(filename)) - - e := Error{Exception: baseException().withFrames([]*StacktraceFrame{{Filename: &filename}})} - assert.Equal(t, groupingKey, e.calcGroupingKey(flattenExceptionTree(e.Exception))) - - e = Error{Exception: baseException(), Log: baseLog().withFrames([]*StacktraceFrame{{Lineno: &lineno, Filename: &filename}})} - assert.Equal(t, groupingKey, e.calcGroupingKey(flattenExceptionTree(e.Exception))) -} - -func TestNoFallbackGroupingKey(t *testing.T) { - lineno := 1 - function := "function" - filename := "file" - module := "module" - - groupingKey := hex.EncodeToString(md5With(module, function)) - - e := Error{ - Exception: baseException().withFrames([]*StacktraceFrame{ - {Lineno: &lineno, Module: &module, Filename: &filename, Function: &function}, - }), - } - assert.Equal(t, groupingKey, e.calcGroupingKey(flattenExceptionTree(e.Exception))) -} - -func TestGroupableEvents(t *testing.T) { - value := "value" - name := "name" - var tests = []struct { - e1 Error - e2 Error - result bool - }{ - { - e1: Error{ - Log: baseLog().withParamMsg(value), - }, - e2: Error{ - Log: baseLog().withParamMsg(value), - }, - result: true, - }, - { - e1: Error{ - Exception: baseException().withType(value), - }, - e2: Error{ - Log: baseLog().withParamMsg(value), - }, - result: true, - }, - { - e1: Error{ - Log: baseLog().withParamMsg(value), Exception: baseException().withType(value), - }, - e2: Error{ - Log: baseLog().withParamMsg(value), Exception: baseException().withType(value), - }, - result: true, - }, - { - e1: Error{ - Log: baseLog().withParamMsg(value), Exception: baseException().withType(value), - }, - e2: Error{ - Log: baseLog().withParamMsg(value), - }, - result: false, - }, - { - e1: Error{ - Log: baseLog().withFrames([]*StacktraceFrame{{Function: &value}}), - }, - e2: Error{ - Log: baseLog().withFrames([]*StacktraceFrame{{Function: &value}}), - }, - result: true, - }, - { - e1: Error{ - Log: baseLog().withFrames([]*StacktraceFrame{}), - }, - e2: Error{ - Log: baseLog().withFrames([]*StacktraceFrame{}), - }, - result: true, - }, - { - e1: Error{ - Log: baseLog().withFrames([]*StacktraceFrame{}), - }, - e2: Error{}, - result: false, - }, - { - e1: Error{ - Log: baseLog().withFrames([]*StacktraceFrame{{Module: &value}}), - }, - e2: Error{ - Log: baseLog().withFrames([]*StacktraceFrame{{Filename: &value}}), - }, - result: true, - }, - { - e1: Error{ - Log: baseLog().withFrames([]*StacktraceFrame{{Filename: &name}}), - }, - e2: Error{ - Log: baseLog().withFrames([]*StacktraceFrame{{Module: &value, Filename: &name}}), - }, - result: false, - }, - { - e1: Error{ - Log: baseLog().withFrames([]*StacktraceFrame{{Module: &value, Filename: &name}}), - }, - e2: Error{ - Exception: baseException().withFrames([]*StacktraceFrame{{Module: &value, Filename: tests.StringPtr("nameEx")}}), - }, - result: true, - }, - { - e1: Error{ - Log: baseLog().withFrames([]*StacktraceFrame{{Filename: &name}}), - }, - e2: Error{ - Exception: baseException().withFrames([]*StacktraceFrame{{Filename: &name}}), - }, - result: true, - }, - } - - for idx, test := range tests { - sameGroup := test.e1.calcGroupingKey(flattenExceptionTree(test.e1.Exception)) == - test.e2.calcGroupingKey(flattenExceptionTree(test.e2.Exception)) - assert.Equal(t, test.result, sameGroup, - "grouping_key mismatch", idx) - } -} - -func md5With(args ...string) []byte { - md5 := md5.New() - for _, arg := range args { - md5.Write([]byte(arg)) - } - return md5.Sum(nil) -} - -func TestSourcemapping(t *testing.T) { - event := Error{ - Metadata: Metadata{ - Service: Service{ - Name: "foo", - Version: "bar", - }, - }, - Exception: &Exception{ - Message: tests.StringPtr("exception message"), - Stacktrace: Stacktrace{ - &StacktraceFrame{ - Filename: tests.StringPtr("/a/b/c"), - Lineno: tests.IntPtr(1), - Colno: tests.IntPtr(23), - AbsPath: tests.StringPtr("../a/b"), - }, - }, - }, - RUM: true, - } - - // transform without sourcemap store - transformedNoSourcemap := event.fields(context.Background(), &transform.Config{}) - assert.Equal(t, 1, *event.Exception.Stacktrace[0].Lineno) - - // transform with sourcemap store - store, err := sourcemap.NewStore(test.ESClientWithValidSourcemap(t), "apm-*sourcemap*", time.Minute) - require.NoError(t, err) - transformedWithSourcemap := event.fields(context.Background(), &transform.Config{ - RUM: transform.RUMConfig{SourcemapStore: store}, - }) - assert.Equal(t, 5, *event.Exception.Stacktrace[0].Lineno) - - assert.NotEqual(t, transformedNoSourcemap["exception"], transformedWithSourcemap["exception"]) - assert.NotEqual(t, transformedNoSourcemap["grouping_key"], transformedWithSourcemap["grouping_key"]) -} diff --git a/model/event.go b/model/event.go new file mode 100644 index 00000000000..376c7b623d8 --- /dev/null +++ b/model/event.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import ( + "time" + + "github.com/elastic/beats/v7/libbeat/common" +) + +// Event holds information about an event, in ECS terms. +// +// https://www.elastic.co/guide/en/ecs/current/ecs-event.html +type Event struct { + // Duration holds the event duration. + // + // TODO(axw) emit an `event.duration` field with the duration in + // nanoseconds, in 8.0. For now we emit event-specific duration fields. + // See https://github.com/elastic/apm-server/issues/5999 + Duration time.Duration + + // Outcome holds the event outcome: "success", "failure", or "unknown". + Outcome string +} + +func (e *Event) fields() common.MapStr { + var fields mapStr + fields.maybeSetString("outcome", e.Outcome) + return common.MapStr(fields) +} diff --git a/model/experience.go b/model/experience.go index 7dda281ed9c..1b0b94e436a 100644 --- a/model/experience.go +++ b/model/experience.go @@ -34,6 +34,22 @@ type UserExperience struct { // TotalBlockingTime holds the Total Blocking Time (TBT) metric value, // or a negative value if TBT is unknown. See https://web.dev/tbt/ TotalBlockingTime float64 + + // Longtask holds longtask metrics. If Longtask.Count is negative, + // then Longtask is considered unset. See https://www.w3.org/TR/longtasks/ + Longtask LongtaskMetrics +} + +// LongtaskMetrics holds metrics related to RUM longtasks. +type LongtaskMetrics struct { + // Count holds the number of longtasks, or a negative value if unknown. + Count int + + // Sum holds the sum of longtask durations. + Sum float64 + + // Max holds the maximum longtask duration. + Max float64 } func (u *UserExperience) Fields() common.MapStr { @@ -50,5 +66,12 @@ func (u *UserExperience) Fields() common.MapStr { if u.TotalBlockingTime >= 0 { fields.set("tbt", u.TotalBlockingTime) } + if u.Longtask.Count >= 0 { + fields.set("longtask", common.MapStr{ + "count": u.Longtask.Count, + "sum": u.Longtask.Sum, + "max": u.Longtask.Max, + }) + } return common.MapStr(fields) } diff --git a/model/experience_test.go b/model/experience_test.go index 108a48447e0..e2f40f99e21 100644 --- a/model/experience_test.go +++ b/model/experience_test.go @@ -37,6 +37,7 @@ func TestUserExperienceFields(t *testing.T) { CumulativeLayoutShift: -1, FirstInputDelay: -1, TotalBlockingTime: -1, + Longtask: LongtaskMetrics{Count: -1}, }, Expected: nil, }, { @@ -44,11 +45,21 @@ func TestUserExperienceFields(t *testing.T) { CumulativeLayoutShift: 1, FirstInputDelay: 2.3, TotalBlockingTime: 4.56, + Longtask: LongtaskMetrics{ + Count: 3, + Sum: 2, + Max: 1, + }, }, Expected: common.MapStr{ "cls": 1.0, "fid": 2.3, "tbt": 4.56, + "longtask": common.MapStr{ + "count": 3, + "sum": 2.0, + "max": 1.0, + }, }, }} for _, test := range tests { diff --git a/model/host.go b/model/host.go new file mode 100644 index 00000000000..4c52130bf21 --- /dev/null +++ b/model/host.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import ( + "net" + + "github.com/elastic/beats/v7/libbeat/common" +) + +type Host struct { + // Hostname holds the detected hostname of the host. + Hostname string + + // Name holds the user-defined name of the host, or the + // detected hostname. + Name string + + // ID holds a unique ID for the host. + ID string + + // Architecture holds the host machine architecture. + Architecture string + + // Type holds the host type, e.g. cloud instance machine type. + Type string + + // IP holds the host IP address. + // + // TODO(axw) this should be a slice. + IP net.IP + + // OS holds information about the operating system running on the host. + OS OS +} + +func (h *Host) fields() common.MapStr { + if h == nil { + return nil + } + var fields mapStr + fields.maybeSetString("hostname", h.Hostname) + fields.maybeSetString("name", h.Name) + fields.maybeSetString("architecture", h.Architecture) + fields.maybeSetString("type", h.Type) + if h.IP != nil { + fields.set("ip", h.IP.String()) + } + fields.maybeSetMapStr("os", h.OS.fields()) + return common.MapStr(fields) +} diff --git a/model/host_test.go b/model/host_test.go new file mode 100644 index 00000000000..5dbe9d98fb0 --- /dev/null +++ b/model/host_test.go @@ -0,0 +1,63 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import ( + "context" + "encoding/json" + "net" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/approvaltest" +) + +func TestSystemTransformation(t *testing.T) { + detected, configured := "host", "custom hostname" + + for name, host := range map[string]Host{ + "hostname": {Hostname: detected}, + "ignored hostname": {Name: configured}, + "full hostname info": {Hostname: detected, Name: configured}, + "full": { + Hostname: detected, + Name: configured, + Architecture: "amd", + Type: "t2.medium", + IP: net.ParseIP("127.0.0.1"), + OS: OS{ + Platform: "osx", + Full: "Mac OS Mojave", + Type: "macos", + }, + }, + } { + t.Run(name, func(t *testing.T) { + event := &APMEvent{Host: host, Transaction: &Transaction{}} + beatEvent := event.BeatEvent(context.Background()) + + resultJSON, err := json.Marshal(beatEvent.Fields["host"]) + require.NoError(t, err) + name := filepath.Join("test_approved", "host", strings.ReplaceAll(name, " ", "_")) + approvaltest.ApproveJSON(t, name, resultJSON) + }) + } +} diff --git a/model/http.go b/model/http.go new file mode 100644 index 00000000000..8d1305c0b6d --- /dev/null +++ b/model/http.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import ( + "github.com/elastic/beats/v7/libbeat/common" +) + +// HTTP holds information about an HTTP request and/or response. +type HTTP struct { + Version string + Request *HTTPRequest + Response *HTTPResponse +} + +// HTTPRequest holds information about an HTTP request. +type HTTPRequest struct { + Method string + Referrer string + Body interface{} + + // Non-ECS fields: + + Headers common.MapStr + Env common.MapStr + Cookies common.MapStr +} + +// HTTPResponse holds information about an HTTP response. +type HTTPResponse struct { + StatusCode int + + // Non-ECS fields: + + Headers common.MapStr + Finished *bool + HeadersSent *bool + TransferSize *float64 + EncodedBodySize *float64 + DecodedBodySize *float64 +} + +func (h *HTTP) fields() common.MapStr { + var fields mapStr + fields.maybeSetString("version", h.Version) + if h.Request != nil { + fields.maybeSetMapStr("request", h.Request.fields()) + } + if h.Response != nil { + fields.maybeSetMapStr("response", h.Response.fields()) + } + return common.MapStr(fields) +} + +func (h *HTTPRequest) fields() common.MapStr { + var fields mapStr + fields.maybeSetString("method", h.Method) + fields.maybeSetString("referrer", h.Referrer) + fields.maybeSetMapStr("headers", h.Headers) + fields.maybeSetMapStr("env", h.Env) + fields.maybeSetMapStr("cookies", h.Cookies) + if h.Body != nil { + fields.set("body.original", h.Body) + } + return common.MapStr(fields) +} + +func (h *HTTPResponse) fields() common.MapStr { + var fields mapStr + if h.StatusCode > 0 { + fields.set("status_code", h.StatusCode) + } + fields.maybeSetMapStr("headers", h.Headers) + fields.maybeSetBool("finished", h.Finished) + fields.maybeSetBool("headers_sent", h.HeadersSent) + fields.maybeSetFloat64ptr("transfer_size", h.TransferSize) + fields.maybeSetFloat64ptr("encoded_body_size", h.EncodedBodySize) + fields.maybeSetFloat64ptr("decoded_body_size", h.DecodedBodySize) + return common.MapStr(fields) +} diff --git a/model/labels.go b/model/labels.go new file mode 100644 index 00000000000..85951e3d8ed --- /dev/null +++ b/model/labels.go @@ -0,0 +1,52 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import ( + "strings" + + "github.com/elastic/beats/v7/libbeat/common" +) + +// Label keys are sanitized, replacing the reserved characters '.', '*' and '"' +// with '_'. Null-valued labels are omitted. +func sanitizeLabels(labels common.MapStr) common.MapStr { + for k, v := range labels { + if v == nil { + delete(labels, k) + continue + } + if k2 := sanitizeLabelKey(k); k != k2 { + delete(labels, k) + labels[k2] = v + } + } + return labels +} + +func sanitizeLabelKey(k string) string { + return strings.Map(replaceReservedLabelKeyRune, k) +} + +func replaceReservedLabelKeyRune(r rune) rune { + switch r { + case '.', '*', '"': + return '_' + } + return r +} diff --git a/model/log.go b/model/log.go new file mode 100644 index 00000000000..97f44c6a54f --- /dev/null +++ b/model/log.go @@ -0,0 +1,27 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +const ( + AppLogsDataset = "apm.app" +) + +var ( + // LogProcessor is the Processor value that should be assigned to log events. + LogProcessor = Processor{Name: "log", Event: "log"} +) diff --git a/model/mapstr.go b/model/mapstr.go index d918f0d7dd4..e729159ec28 100644 --- a/model/mapstr.go +++ b/model/mapstr.go @@ -36,6 +36,30 @@ func (m *mapStr) maybeSetString(k, v string) bool { return false } +func (m *mapStr) maybeSetBool(k string, v *bool) bool { + if v != nil { + m.set(k, *v) + return true + } + return false +} + +func (m *mapStr) maybeSetIntptr(k string, v *int) bool { + if v != nil { + m.set(k, *v) + return true + } + return false +} + +func (m *mapStr) maybeSetFloat64ptr(k string, v *float64) bool { + if v != nil { + m.set(k, *v) + return true + } + return false +} + func (m *mapStr) maybeSetMapStr(k string, v common.MapStr) bool { if len(v) > 0 { m.set(k, v) diff --git a/model/message.go b/model/message.go index 8451aec0295..75f7a76903f 100644 --- a/model/message.go +++ b/model/message.go @@ -21,16 +21,14 @@ import ( "net/http" "github.com/elastic/beats/v7/libbeat/common" - - "github.com/elastic/apm-server/utility" ) // Message holds information about a recorded message, such as the message body and meta information type Message struct { - Body *string + Body string Headers http.Header AgeMillis *int - QueueName *string + QueueName string } // Fields returns a MapStr holding the transformed message information @@ -38,14 +36,16 @@ func (m *Message) Fields() common.MapStr { if m == nil { return nil } - fields := common.MapStr{} - if m.QueueName != nil { - utility.Set(fields, "queue", common.MapStr{"name": m.QueueName}) + var fields mapStr + if m.QueueName != "" { + fields.set("queue", common.MapStr{"name": m.QueueName}) } if m.AgeMillis != nil { - utility.Set(fields, "age", common.MapStr{"ms": m.AgeMillis}) + fields.set("age", common.MapStr{"ms": *m.AgeMillis}) + } + fields.maybeSetString("body", m.Body) + if len(m.Headers) > 0 { + fields.set("headers", m.Headers) } - utility.Set(fields, "body", m.Body) - utility.Set(fields, "headers", m.Headers) - return fields + return common.MapStr(fields) } diff --git a/model/message_test.go b/model/message_test.go index bc26db0d7e0..2778975f67f 100644 --- a/model/message_test.go +++ b/model/message_test.go @@ -34,11 +34,11 @@ func TestMessaging_Fields(t *testing.T) { require.Nil(t, m.Fields()) m = &Message{} - require.Equal(t, common.MapStr{}, m.Fields()) + require.Nil(t, m.Fields()) m = &Message{ - QueueName: tests.StringPtr("orders"), - Body: tests.StringPtr("order confirmed"), + QueueName: "orders", + Body: "order confirmed", Headers: http.Header{"Internal": []string{"false"}, "Services": []string{"user", "order"}}, AgeMillis: tests.IntPtr(1577958057123), } diff --git a/model/metadata.go b/model/metadata.go deleted file mode 100644 index 9729cbf7e0c..00000000000 --- a/model/metadata.go +++ /dev/null @@ -1,61 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package model - -import ( - "github.com/elastic/beats/v7/libbeat/common" - - "github.com/elastic/apm-server/utility" -) - -type Metadata struct { - Service Service - Process Process - System System - User User - UserAgent UserAgent - Client Client - Cloud Cloud - Labels common.MapStr -} - -func (m *Metadata) Set(out common.MapStr) common.MapStr { - fields := (*mapStr)(&out) - fields.maybeSetMapStr("service", m.Service.Fields(m.System.Container.ID, m.System.name())) - fields.maybeSetMapStr("agent", m.Service.AgentFields()) - fields.maybeSetMapStr("host", m.System.fields()) - fields.maybeSetMapStr("process", m.Process.fields()) - fields.maybeSetMapStr("user", m.User.Fields()) - fields.maybeSetMapStr("client", m.Client.fields()) - fields.maybeSetMapStr("user_agent", m.UserAgent.fields()) - fields.maybeSetMapStr("container", m.System.containerFields()) - fields.maybeSetMapStr("kubernetes", m.System.kubernetesFields()) - fields.maybeSetMapStr("cloud", m.Cloud.fields()) - if len(m.Labels) > 0 { - // These labels are merged with event-specific labels, - // hence we clone the map to avoid updating the shared - // metadata map. - // - // TODO(axw) we should only clone as needed or, better, - // avoid cloning altogether. For example, we could use - // DeepUpdateNoOverwrite in the other direction to copy - // the shared map into an event-specific labels map. - utility.Set(out, "labels", m.Labels) - } - return out -} diff --git a/model/metadata/generated/schema/metadata.go b/model/metadata/generated/schema/metadata.go deleted file mode 100644 index 5e20b0daf22..00000000000 --- a/model/metadata/generated/schema/metadata.go +++ /dev/null @@ -1,436 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package schema - -const ModelSchema = `{ - "$id": "docs/spec/metadata.json", - "title": "Metadata", - "description": "Metadata concerning the other objects in the stream.", - "type": "object", - "properties": { - "service": { - "type": [ - "object" - ], - "properties": { - "agent": { - "description": "Name and version of the Elastic APM agent", - "type": [ - "object" - ], - "properties": { - "name": { - "description": "Name of the Elastic APM agent, e.g. \"Python\"", - "type": [ - "string" - ], - "maxLength": 1024, - "minLength": 1 - }, - "version": { - "description": "Version of the Elastic APM agent, e.g.\"1.0.0\"", - "type": [ - "string" - ], - "maxLength": 1024 - }, - "ephemeral_id": { - "description": "Free format ID used for metrics correlation by some agents", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - }, - "required": [ - "name", - "version" - ] - }, - "framework": { - "description": "Name and version of the web framework used", - "type": [ - "object", - "null" - ], - "properties": { - "name": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "version": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "language": { - "description": "Name and version of the programming language used", - "type": [ - "object", - "null" - ], - "properties": { - "name": { - "type": [ - "string" - ], - "maxLength": 1024 - }, - "version": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - }, - "required": [ - "name" - ] - }, - "name": { - "description": "Immutable name of the service emitting this event", - "type": [ - "string" - ], - "pattern": "^[a-zA-Z0-9 _-]+$", - "maxLength": 1024, - "minLength": 1 - }, - "environment": { - "description": "Environment name of the service, e.g. \"production\" or \"staging\"", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "runtime": { - "description": "Name and version of the language runtime running this service", - "type": [ - "object", - "null" - ], - "properties": { - "name": { - "type": [ - "string" - ], - "maxLength": 1024 - }, - "version": { - "type": [ - "string" - ], - "maxLength": 1024 - } - }, - "required": [ - "name", - "version" - ] - }, - "version": { - "description": "Version of the service emitting this event", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "node": { - "description": "Unique meaningful name of the service node.", - "type": [ - "object", - "null" - ], - "properties": { - "configured_name": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - } - }, - "required": [ - "name", - "agent" - ] - }, - "process": { - "$id": "docs/spec/process.json", - "title": "Process", - "type": ["object", "null"], - "properties": { - "pid": { - "description": "Process ID of the service", - "type": ["integer"] - }, - "ppid": { - "description": "Parent process ID of the service", - "type": ["integer", "null"] - }, - "title": { - "type": ["string", "null"], - "maxLength": 1024 - }, - "argv": { - "description": "Command line arguments used to start this process", - "type": ["array", "null"], - "minItems": 0, - "items": { - "type": "string" - } - } - }, - "required": ["pid"] - }, - "system": { - "$id": "docs/spec/system.json", - "title": "System", - "type": ["object", "null"], - "properties": { - "architecture": { - "description": "Architecture of the system the agent is running on.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "hostname": { - "description": "Deprecated. Hostname of the system the agent is running on. Will be ignored if kubernetes information is set.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "detected_hostname": { - "description": "Hostname of the host the monitored service is running on. It normally contains what the hostname command returns on the host machine. Will be ignored if kubernetes information is set, otherwise should always be set.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "configured_hostname": { - "description": "Name of the host the monitored service is running on. It should only be set when configured by the user. If empty, will be set to detected_hostname or derived from kubernetes information if provided.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "platform": { - "description": "Name of the system platform the agent is running on.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "container": { - "properties": { - "id" : { - "description": "Container ID", - "type": ["string"], - "maxLength": 1024 - } - }, - "required": ["id"] - }, - "kubernetes": { - "properties": { - "namespace": { - "description": "Kubernetes namespace", - "type": ["string", "null"], - "maxLength": 1024 - }, - "pod":{ - "properties": { - "name": { - "description": "Kubernetes pod name", - "type": ["string", "null"], - "maxLength": 1024 - }, - "uid": { - "description": "Kubernetes pod uid", - "type": ["string", "null"], - "maxLength": 1024 - } - } - }, - "node":{ - "properties": { - "name": { - "description": "Kubernetes node name", - "type": ["string", "null"], - "maxLength": 1024 - } - } - } - } - } - } - }, - "user": { - "description": "Describes the authenticated User for a request.", - "$id": "docs/spec/user.json", - "title": "User", - "type": ["object", "null"], - "properties": { - "id": { - "description": "Identifier of the logged in user, e.g. the primary key of the user", - "type": ["string", "integer", "null"], - "maxLength": 1024 - }, - "email": { - "description": "Email of the logged in user", - "type": ["string", "null"], - "maxLength": 1024 - }, - "username": { - "description": "The username of the logged in user", - "type": ["string", "null"], - "maxLength": 1024 - } - } - }, - "cloud": { - "$id": "docs/spec/cloud.json", - "title": "Cloud", - "type": [ - "object", - "null" - ], - "properties": { - "account": { - "properties": { - "id": { - "description": "Cloud account ID", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "name": { - "description": "Cloud account name", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "availability_zone": { - "description": "Cloud availability zone name. e.g. us-east-1a", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "instance": { - "properties": { - "id": { - "description": "Cloud instance/machine ID", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "name": { - "description": "Cloud instance/machine name", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "machine": { - "properties": { - "type": { - "description": "Cloud instance/machine type", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "project": { - "properties": { - "id": { - "description": "Cloud project ID", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "name": { - "description": "Cloud project name", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "provider": { - "description": "Cloud provider name. e.g. aws, azure, gcp, digitalocean.", - "type": [ - "string" - ], - "maxLength": 1024 - }, - "region": { - "description": "Cloud region name. e.g. us-east-1", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - }, - "required": [ - "provider" - ] - }, - "labels": { - "$id": "docs/spec/tags.json", - "title": "Tags", - "type": ["object", "null"], - "description": "A flat mapping of user-defined tags with string, boolean or number values.", - "patternProperties": { - "^[^.*\"]*$": { - "type": ["string", "boolean", "number", "null"], - "maxLength": 1024 - } - }, - "additionalProperties": false - } - }, - "required": [ - "service" - ] -}` diff --git a/model/metadata/generated/schema/rum_v3_metadata.go b/model/metadata/generated/schema/rum_v3_metadata.go deleted file mode 100644 index ba12ea232c3..00000000000 --- a/model/metadata/generated/schema/rum_v3_metadata.go +++ /dev/null @@ -1,170 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package schema - -const RUMV3Schema = `{ - "$id": "docs/spec/rum_v3_metadata.json", - "title": "Metadata", - "description": "Metadata concerning the other objects in the stream.", - "type": [ - "object" - ], - "properties": { - "se": { - "$id": "docs/spec/rum_v3_service.json", - "title": "Service", - "type": [ - "object" - ], - "properties": { - "a": { - "description": "Name and version of the Elastic APM agent", - "type": [ - "object" - ], - "properties": { - "n": { - "description": "Name of the Elastic APM agent, e.g. \"Python\"", - "type": [ - "string" - ], - "minLength": 1, - "maxLength": 1024 - }, - "ve": { - "description": "Version of the Elastic APM agent, e.g.\"1.0.0\"", - "type": [ - "string" - ], - "maxLength": 1024 - } - }, - "required": [ - "n", - "ve" - ] - }, - "fw": { - "description": "Name and version of the web framework used", - "type": [ - "object", - "null" - ], - "properties": { - "n": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "ve": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "la": { - "description": "Name and version of the programming language used", - "type": [ - "object", - "null" - ], - "properties": { - "n": { - "type": [ - "string" - ], - "maxLength": 1024 - }, - "ve": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - }, - "required": [ - "n" - ] - }, - "n": { - "description": "Immutable name of the service emitting this event", - "type": [ - "string" - ], - "pattern": "^[a-zA-Z0-9 _-]+$", - "minLength": 1, - "maxLength": 1024 - }, - "en": { - "description": "Environment name of the service, e.g. \"production\" or \"staging\"", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "ru": { - "description": "Name and version of the language runtime running this service", - "type": [ - "object", - "null" - ], - "properties": { - "n": { - "type": [ - "string" - ], - "maxLength": 1024 - }, - "ve": { - "type": [ - "string" - ], - "maxLength": 1024 - } - }, - "required": [ - "n", - "ve" - ] - }, - "ve": { - "description": "Version of the service emitting this event", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - }, - "required": [ - "a", - "n" - ] - } - }, - "required": [ - "se" - ] -}` diff --git a/model/metadata_test.go b/model/metadata_test.go deleted file mode 100644 index 85646949dd8..00000000000 --- a/model/metadata_test.go +++ /dev/null @@ -1,170 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package model - -import ( - "net" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/beats/v7/libbeat/common" - - "github.com/elastic/apm-server/tests" -) - -func TestMetadata_Set(t *testing.T) { - pid := 1234 - host := "host" - containerID := "container-123" - serviceName, serviceNodeName := "myservice", "serviceABC" - uid := "12321" - mail := "user@email.com" - agentName := "elastic-node" - - for _, test := range []struct { - input Metadata - fields common.MapStr - output common.MapStr - }{ - { - input: Metadata{ - Service: Service{ - Name: serviceName, - Node: ServiceNode{Name: serviceNodeName}, - Agent: Agent{ - Name: agentName, - Version: agentVersion, - }, - }, - System: System{DetectedHostname: host, Container: Container{ID: containerID}}, - Process: Process{Pid: pid}, - User: User{ID: uid, Email: mail}, - }, - fields: common.MapStr{ - "foo": "bar", - "user": common.MapStr{ - "email": "override@email.com", - }, - }, - output: common.MapStr{ - "foo": "bar", - "agent": common.MapStr{"version": "1.0.0", "name": "elastic-node"}, - "container": common.MapStr{"id": containerID}, - "host": common.MapStr{"hostname": host, "name": host}, - "process": common.MapStr{"pid": pid}, - "service": common.MapStr{ - "name": "myservice", - "node": common.MapStr{"name": serviceNodeName}, - }, - "user": common.MapStr{"id": "12321", "email": "user@email.com"}, - }, - }, - { - input: Metadata{ - Service: Service{}, - System: System{DetectedHostname: host, Container: Container{ID: containerID}}, - }, - fields: common.MapStr{}, - output: common.MapStr{ - "host": common.MapStr{"hostname": host, "name": host}, - "container": common.MapStr{"id": containerID}, - "service": common.MapStr{"node": common.MapStr{"name": containerID}}}, - }, - { - input: Metadata{ - Service: Service{}, - System: System{DetectedHostname: host}, - }, - fields: common.MapStr{}, - output: common.MapStr{ - "host": common.MapStr{"hostname": host, "name": host}, - "service": common.MapStr{"node": common.MapStr{"name": host}}}, - }, - } { - assert.Equal(t, test.output, test.input.Set(test.fields)) - } -} - -func BenchmarkMetadataSet(b *testing.B) { - test := func(b *testing.B, name string, input Metadata) { - b.Run(name, func(b *testing.B) { - b.ReportAllocs() - b.ResetTimer() - - out := make(common.MapStr) - for i := 0; i < b.N; i++ { - input.Set(out) - for k := range out { - delete(out, k) - } - } - }) - } - - test(b, "minimal", Metadata{ - Service: Service{ - Name: "foo", - Version: "1.0", - }, - }) - test(b, "full", Metadata{ - Service: Service{ - Name: "foo", - Version: "1.0", - Environment: "production", - Node: ServiceNode{Name: "foo-bar"}, - Language: Language{Name: "go", Version: "++"}, - Runtime: Runtime{Name: "gc", Version: "1.0"}, - Framework: Framework{Name: "never", Version: "again"}, - Agent: Agent{Name: "go", Version: "2.0"}, - }, - Process: Process{ - Pid: 123, - Ppid: tests.IntPtr(122), - Title: "case", - Argv: []string{"apm-server"}, - }, - System: System{ - DetectedHostname: "detected", - ConfiguredHostname: "configured", - Architecture: "x86_64", - Platform: "linux", - IP: net.ParseIP("10.1.1.1"), - Container: Container{ID: "docker"}, - Kubernetes: Kubernetes{ - Namespace: "system", - NodeName: "node01", - PodName: "pet", - PodUID: "cattle", - }, - }, - User: User{ - ID: "123", - Email: "me@example.com", - Name: "bob", - }, - UserAgent: UserAgent{ - Original: "user-agent", - }, - Client: Client{ - IP: net.ParseIP("10.1.1.2"), - }, - Labels: common.MapStr{"k": "v", "n": 1, "f": 1.5, "b": false}, - }) -} diff --git a/model/metricset.go b/model/metricset.go index 4b1a7f57de8..09725245d8f 100644 --- a/model/metricset.go +++ b/model/metricset.go @@ -18,81 +18,77 @@ package model import ( - "context" "time" - "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" - "github.com/elastic/beats/v7/libbeat/logp" - "github.com/elastic/beats/v7/libbeat/monitoring" - - logs "github.com/elastic/apm-server/log" - "github.com/elastic/apm-server/transform" - "github.com/elastic/apm-server/utility" ) const ( - metricsetProcessorName = "metric" - metricsetDocType = "metric" - metricsetEventKey = "event" - metricsetTransactionKey = "transaction" - metricsetSpanKey = "span" + AppMetricsDataset = "apm.app" + InternalMetricsDataset = "apm.internal" ) var ( - metricsetMetrics = monitoring.Default.NewRegistry("apm-server.processor.metric") - metricsetTransformations = monitoring.NewInt(metricsetMetrics, "transformations") - metricsetProcessorEntry = common.MapStr{"name": metricsetProcessorName, "event": metricsetDocType} + // MetricsetProcessor is the Processor value that should be assigned to metricset events. + MetricsetProcessor = Processor{Name: "metric", Event: "metric"} ) -// Metricset describes a set of metrics and associated metadata. -type Metricset struct { - // Timestamp holds the time at which the metrics were published. - Timestamp time.Time - - // Metadata holds common metadata describing the entities with which - // the metrics are associated: service, system, etc. - Metadata Metadata +// MetricType describes the type of a metric: gauge, counter, or histogram. +type MetricType string - // Event holds information about the event category with which the - // metrics are associated. - Event MetricsetEventCategorization - - // Transaction holds information about the transaction group with - // which the metrics are associated. - Transaction MetricsetTransaction - - // Span holds information about the span types with which the - // metrics are associated. - Span MetricsetSpan - - // Labels holds arbitrary labels to apply to the metrics. - // - // These labels override any with the same names in Metadata.Labels. - Labels common.MapStr +// Valid MetricType values. +const ( + MetricTypeGauge MetricType = "gauge" + MetricTypeCounter MetricType = "counter" + MetricTypeHistogram MetricType = "histogram" +) +// Metricset describes a set of metrics and associated metadata. +type Metricset struct { // Samples holds the metrics in the set. - Samples []Sample + Samples map[string]MetricsetSample // TimeseriesInstanceID holds an optional identifier for the timeseries // instance, such as a hash of the labels used for aggregating the // metrics. TimeseriesInstanceID string -} -// Sample represents a single named metric. -// -// TODO(axw) consider renaming this to "MetricSample" or similar, as -// "Sample" isn't very meaningful in the context of the model package. -type Sample struct { - // Name holds the metric name. + // Name holds an optional name for the metricset. Name string + // DocCount holds the document count for pre-aggregated metrics. + // + // See https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-doc-count-field.html + DocCount int64 +} + +// MetricsetSample represents a single named metric. +type MetricsetSample struct { + // Type holds an optional metric type. + // + // If Type is unspecified or invalid, it will be ignored. + Type MetricType + + // Unit holds an optional unit: + // + // - "percent" (value is in the range [0,1]) + // - "byte" + // - a time unit: "nanos", "micros", "ms", "s", "m", "h", "d" + // + // If Unit is unspecified or invalid, it will be ignored. + Unit string + // Value holds the metric value for single-value metrics. // // If Counts and Values are specified, then Value will be ignored. Value float64 + // Histogram holds bucket values and counts for histogram metrics. + Histogram +} + +// Histogram holds bucket values and counts for a histogram metric. +type Histogram struct { // Values holds the bucket values for histogram metrics. // // These values must be provided in ascending order. @@ -108,121 +104,60 @@ type Sample struct { Counts []int64 } -// MetricsetEventCategorization holds ECS Event Categorization fields -// for inclusion in metrics. Typically these fields will have been -// included in the metric aggregation logic. -// -// See https://www.elastic.co/guide/en/ecs/current/ecs-category-field-values-reference.html -type MetricsetEventCategorization struct { - // Outcome holds the event outcome: "success", "failure", or "unknown". - Outcome string -} - -// MetricsetTransaction provides enough information to connect a metricset to the related kind of transactions. -type MetricsetTransaction struct { - // Name holds the transaction name: "GET /foo", etc. - Name string - - // Type holds the transaction type: "request", "message", etc. - Type string - - // Result holds the transaction result: "HTTP 2xx", "OK", "Error", etc. - Result string - - // Root indicates whether or not the transaction is the trace root. - // - // If Root is false, then it will be omitted from the output event. - Root bool +func (h *Histogram) fields() common.MapStr { + if len(h.Counts) == 0 { + return nil + } + var fields mapStr + fields.set("counts", h.Counts) + fields.set("values", h.Values) + return common.MapStr(fields) } -// MetricsetSpan provides enough information to connect a metricset to the related kind of spans. -type MetricsetSpan struct { - // Type holds the span type: "external", "db", etc. - Type string - - // Subtype holds the span subtype: "http", "sql", etc. - Subtype string +// AggregatedDuration holds a count and sum of aggregated durations. +type AggregatedDuration struct { + // Count holds the number of durations aggregated. + Count int - // DestinationService holds information about the target of outgoing requests - DestinationService DestinationService + // Sum holds the sum of aggregated durations. + Sum time.Duration } -func (me *Metricset) Transform(ctx context.Context, _ *transform.Config) []beat.Event { - metricsetTransformations.Inc() - if me == nil { +func (a *AggregatedDuration) fields() common.MapStr { + if a.Count == 0 { return nil } - - fields := common.MapStr{} - for _, sample := range me.Samples { - if err := sample.set(fields); err != nil { - logp.NewLogger(logs.Transform).Warnf("failed to transform sample %#v", sample) - continue - } - } - - fields["processor"] = metricsetProcessorEntry - me.Metadata.Set(fields) - if eventFields := me.Event.fields(); eventFields != nil { - utility.DeepUpdate(fields, metricsetEventKey, eventFields) - } - if transactionFields := me.Transaction.fields(); transactionFields != nil { - utility.DeepUpdate(fields, metricsetTransactionKey, transactionFields) - } - if spanFields := me.Span.fields(); spanFields != nil { - utility.DeepUpdate(fields, metricsetSpanKey, spanFields) - } - - // merges with metadata labels, overrides conflicting keys - utility.DeepUpdate(fields, "labels", me.Labels) - - if me.TimeseriesInstanceID != "" { - fields["timeseries"] = common.MapStr{"instance": me.TimeseriesInstanceID} - } - - return []beat.Event{{ - Fields: fields, - Timestamp: me.Timestamp, - }} -} - -func (e *MetricsetEventCategorization) fields() common.MapStr { var fields mapStr - fields.maybeSetString("outcome", e.Outcome) + fields.set("count", a.Count) + fields.set("sum.us", a.Sum.Microseconds()) return common.MapStr(fields) } -func (t *MetricsetTransaction) fields() common.MapStr { - var fields mapStr - fields.maybeSetString("type", t.Type) - fields.maybeSetString("name", t.Name) - fields.maybeSetString("result", t.Result) - if t.Root { - fields.set("root", true) +func (me *Metricset) setFields(fields *mapStr) { + if me.TimeseriesInstanceID != "" { + fields.set("timeseries", common.MapStr{"instance": me.TimeseriesInstanceID}) } - return common.MapStr(fields) -} + if me.DocCount > 0 { + fields.set("_doc_count", me.DocCount) + } + fields.maybeSetString("metricset.name", me.Name) -func (s *MetricsetSpan) fields() common.MapStr { - var fields mapStr - fields.maybeSetString("type", s.Type) - fields.maybeSetString("subtype", s.Subtype) - if destinationServiceFields := s.DestinationService.fields(); len(destinationServiceFields) != 0 { - fields.set("destination", common.MapStr{"service": destinationServiceFields}) + var metricDescriptions mapStr + for name, sample := range me.Samples { + sample.set(name, fields) + + var md mapStr + md.maybeSetString("type", string(sample.Type)) + md.maybeSetString("unit", sample.Unit) + metricDescriptions.maybeSetMapStr(name, common.MapStr(md)) } - return common.MapStr(fields) + fields.maybeSetMapStr("_metric_descriptions", common.MapStr(metricDescriptions)) } -func (s *Sample) set(fields common.MapStr) error { - switch { - case len(s.Counts) > 0: - _, err := fields.Put(s.Name, common.MapStr{ - "counts": s.Counts, - "values": s.Values, - }) - return err - default: - _, err := fields.Put(s.Name, s.Value) - return err +func (s *MetricsetSample) set(name string, fields *mapStr) { + if s.Type == MetricTypeHistogram { + fields.set(name, s.Histogram.fields()) + } else { + fields.set(name, s.Value) } } diff --git a/model/metricset/_meta/fields.yml b/model/metricset/_meta/fields.yml index fad05e9493e..f6dbe950028 100644 --- a/model/metricset/_meta/fields.yml +++ b/model/metricset/_meta/fields.yml @@ -1,5 +1,865 @@ +- key: apm-application-metrics + title: "APM Application Metrics" + description: APM application metrics. + short_config: true + fields: + - name: histogram + type: histogram + dynamic_template: true + +- key: apm-transaction-metrics + title: "APM Transaction Metrics" + description: > + APM transaction metrics, and transaction metrics-specific properties, + such as transaction.root. + short_config: true + fields: + - name: processor.name + type: keyword + description: Processor name. + overwrite: true + + - name: processor.event + type: keyword + description: Processor event. + overwrite: true + + - name: timeseries.instance + type: keyword + description: Time series instance ID + overwrite: true + + - name: timestamp + type: group + fields: + - name: us + type: long + count: 1 + description: > + Timestamp of the event in microseconds since Unix epoch. + overwrite: true + + - name: labels + type: object + object_type_params: + - object_type: keyword + - object_type: boolean + - object_type: scaled_float + scaling_factor: 1000000 + dynamic: true + overwrite: true + description: > + A flat mapping of user-defined labels with string, boolean or number values. + + - name: metricset + type: group + fields: + - name: name + type: keyword + description: > + Name of the set of metrics. + example: transaction + + - name: service + type: group + dynamic: false + description: > + Service fields. + fields: + - name: name + type: keyword + description: > + Immutable name of the service emitting this event. + overwrite: true + + - name: version + type: keyword + description: > + Version of the service emitting this event. + overwrite: true + + - name: environment + type: keyword + description: > + Service environment. + overwrite: true + + - name: node + type: group + fields: + - name: name + type: keyword + description: > + Unique meaningful name of the service node. + overwrite: true + + - name: language + type: group + fields: + + - name: name + type: keyword + description: > + Name of the programming language used. + overwrite: true + + - name: version + type: keyword + description: > + Version of the programming language used. + overwrite: true + + - name: runtime + type: group + fields: + + - name: name + type: keyword + description: > + Name of the runtime used. + overwrite: true + + - name: version + type: keyword + description: > + Version of the runtime used. + overwrite: true + + - name: framework + type: group + fields: + + - name: name + type: keyword + description: > + Name of the framework used. + overwrite: true + + - name: version + type: keyword + description: > + Version of the framework used. + overwrite: true + + - name: transaction + type: group + dynamic: false + fields: + - name: id + type: keyword + description: > + The transaction ID. + overwrite: true + - name: sampled + type: boolean + description: > + Transactions that are 'sampled' will include all available information. Transactions that are not sampled will not have spans or context. + overwrite: true + - name: type + type: keyword + description: > + Keyword of specific relevance in the service's domain (eg. 'request', 'backgroundjob', etc) + overwrite: true + - name: name + type: keyword + multi_fields: + - name: text + type: text + description: > + Generic designation of a transaction in the scope of a single service (eg. 'GET /users/:id'). + overwrite: true + + - name: duration + type: group + fields: + - name: count + type: long + overwrite: true + description: Number of aggregated transactions. + - name: sum + type: group + fields: + - name: us + type: long + unit: micros + overwrite: true + description: Aggregated transaction duration, in microseconds. + + - name: self_time + type: group + description: > + Portion of the transaction's duration where no direct child was running + fields: + - name: count + type: long + overwrite: true + description: Number of aggregated transactions. + - name: sum + type: group + fields: + - name: us + type: long + unit: micros + overwrite: true + description: > + Aggregated transaction duration, excluding the time periods where a + direct child was running, in microseconds. + + - name: breakdown + type: group + fields: + - name: count + type: long + overwrite: true + description: > + Counter for collected breakdowns for the transaction + + - name: root + type: boolean + description: > + Identifies metrics for root transactions. This can be used for calculating metrics for traces. + + - name: result + type: keyword + description: > + The result of the transaction. HTTP status code for HTTP-related transactions. + + - name: span + type: group + dynamic: false + fields: + - name: type + type: keyword + count: 1 + description: > + Keyword of specific relevance in the service's domain (eg: 'db.postgresql.query', 'template.erb', 'cache', etc). + overwrite: true + + - name: subtype + type: keyword + count: 1 + description: > + A further sub-division of the type (e.g. postgresql, elasticsearch) + overwrite: true + + - name: self_time + type: group + description: > + Portion of the span's duration where no direct child was running + fields: + - name: count + type: long + overwrite: true + description: Number of aggregated spans. + - name: sum + type: group + fields: + - name: us + type: long + unit: micros + overwrite: true + description: > + Aggregated span duration, excluding the time periods where a + direct child was running, in microseconds. + + - name: destination + type: group + dynamic: false + fields: + + - name: service + type: group + dynamic: false + description: Destination service context + fields: + + - name: resource + type: keyword + description: > + Identifier for the destination service resource being operated on (e.g. 'http://elastic.co:80', 'elasticsearch', 'rabbitmq/queue_name') + + - name: agent + type: group + dynamic: false + fields: + + - name: name + type: keyword + description: > + Name of the agent used. + overwrite: true + + - name: version + type: keyword + description: > + Version of the agent used. + overwrite: true + + - name: ephemeral_id + type: keyword + description: > + The Ephemeral ID identifies a running process. + overwrite: true + + - name: container + type: group + dynamic: false + title: Container + description: > + Container fields are used for meta information about the specific container + that is the source of information. These fields help correlate data based + containers from any runtime. + fields: + + - name: id + type: keyword + description: > + Unique container id. + overwrite: true + + - name: kubernetes + type: group + dynamic: false + title: Kubernetes + description: > + Kubernetes metadata reported by agents + fields: + + - name: namespace + type: keyword + description: > + Kubernetes namespace + overwrite: true + + - name: node + type: group + fields: + - name: name + type: keyword + description: > + Kubernetes node name + overwrite: true + + - name: pod + type: group + fields: + + - name: name + type: keyword + description: > + Kubernetes pod name + overwrite: true + + - name: uid + type: keyword + description: > + Kubernetes Pod UID + overwrite: true + + - name: network + type: group + dynamic: false + description: > + Optional network fields + fields: + + - name: connection + type: group + description: > + Network connection details + fields: + + - name: type + type: keyword + description: > + Network connection type, eg. "wifi", "cell" + overwrite: true + + - name: subtype + type: keyword + description: > + Detailed network connection sub-type, e.g. "LTE", "CDMA" + overwrite: true + + - name: carrier + type: group + description: > + Network operator + fields: + + - name: name + type: keyword + overwrite: true + description: > + Carrier name, eg. Vodafone, T-Mobile, etc. + + - name: mcc + type: keyword + overwrite: true + description: > + Mobile country code + + - name: mnc + type: keyword + overwrite: true + description: > + Mobile network code + + - name: icc + type: keyword + overwrite: true + description: > + ISO country code, eg. US + + - name: host + type: group + dynamic: false + description: > + Optional host fields. + fields: + + - name: architecture + type: keyword + description: > + The architecture of the host the event was recorded on. + overwrite: true + + - name: hostname + type: keyword + description: > + The hostname of the host the event was recorded on. + overwrite: true + + - name: name + type: keyword + description: > + Name of the host the event was recorded on. + It can contain same information as host.hostname or a name specified by the user. + overwrite: true + + - name: ip + type: ip + description: > + IP of the host that records the event. + overwrite: true + + - name: os + title: Operating System + group: 2 + description: > + The OS fields contain information about the operating system. + type: group + fields: + - name: platform + type: keyword + description: > + The platform of the host the event was recorded on. + overwrite: true + + - name: process + type: group + dynamic: false + description: > + Information pertaining to the running process where the data was collected + fields: + - name: args + level: extended + type: keyword + description: > + Process arguments. + May be filtered to protect sensitive information. + overwrite: true + + - name: pid + type: long + description: > + Numeric process ID of the service process. + overwrite: true + + - name: ppid + type: long + description: > + Numeric ID of the service's parent process. + overwrite: true + + - name: title + type: keyword + description: > + Service process title. + overwrite: true + + - name: observer + type: group + dynamic: false + fields: + + - name: listening + type: keyword + overwrite: true + description: > + Address the server is listening on. + + - name: hostname + type: keyword + overwrite: true + description: > + Hostname of the APM Server. + + - name: version + type: keyword + overwrite: true + description: > + APM Server version. + + - name: version_major + type: byte + overwrite: true + description: > + Major version number of the observer + + - name: type + type: keyword + overwrite: true + description: > + The type will be set to `apm-server`. + + - name: id + type: keyword + overwrite: true + description: > + Unique identifier of the APM Server. + + - name: ephemeral_id + type: keyword + overwrite: true + description: > + Ephemeral identifier of the APM Server. + + - name: user + type: group + dynamic: false + fields: + + - name: name + type: keyword + description: > + The username of the logged in user. + overwrite: true + + - name: id + type: keyword + description: > + Identifier of the logged in user. + overwrite: true + + - name: email + type: keyword + description: > + Email of the logged in user. + overwrite: true + + - name: client + dynamic: false + type: group + fields: + + - name: domain + type: keyword + ignore_above: 1024 + description: > + Client domain. + overwrite: true + + - name: ip + type: ip + description: > + IP address of the client of a recorded event. + This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + overwrite: true + + - name: port + type: long + description: > + Port of the client. + overwrite: true + + - name: source + dynamic: false + type: group + fields: + + - name: domain + type: keyword + ignore_above: 1024 + description: > + Source domain. + overwrite: true + + - name: ip + type: ip + description: > + IP address of the source of a recorded event. + This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + overwrite: true + + - name: port + type: long + description: > + Port of the source. + overwrite: true + + - name: destination + title: Destination + group: 2 + description: 'Destination fields describe details about the destination of a packet/event. + + Destination fields are usually populated in conjunction with source fields.' + type: group + fields: + - name: address + level: extended + type: keyword + ignore_above: 1024 + description: 'Some event destination addresses are defined ambiguously. The + event will sometimes list an IP, a domain or a unix socket. You should always + store the raw address in the `.address` field. + Then it should be duplicated to `.ip` or `.domain`, depending on which one + it is.' + overwrite: true + + - name: ip + level: core + type: ip + description: 'IP addess of the destination. + Can be one of multiple IPv4 or IPv6 addresses.' + overwrite: true + + - name: port + level: core + type: long + format: string + description: Port of the destination. + overwrite: true + + - name: user_agent + dynamic: false + title: User agent + description: > + The user_agent fields normally come from a browser request. They often + show up in web service logs coming from the parsed user agent string. + type: group + overwrite: true + fields: + + - name: original + type: keyword + description: > + Unparsed version of the user_agent. + example: "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1" + overwrite: true + + multi_fields: + - name: text + type: text + description: > + Software agent acting in behalf of a user, eg. a web browser / OS combination. + overwrite: true + + - name: name + type: keyword + overwrite: true + example: Safari + description: > + Name of the user agent. + + - name: version + type: keyword + overwrite: true + description: > + Version of the user agent. + example: 12.0 + + - name: device + type: group + overwrite: true + title: Device + description: > + Information concerning the device. + fields: + + - name: name + type: keyword + overwrite: true + example: iPhone + description: > + Name of the device. + + - name: os + type: group + overwrite: true + title: Operating System + description: > + The OS fields contain information about the operating system. + fields: + + - name: platform + type: keyword + overwrite: true + description: > + Operating system platform (such centos, ubuntu, windows). + example: darwin + + - name: name + type: keyword + overwrite: true + example: "Mac OS X" + description: > + Operating system name, without the version. + + - name: full + type: keyword + overwrite: true + example: "Mac OS Mojave" + description: > + Operating system name, including the version or code name. + + - name: family + type: keyword + overwrite: true + example: "debian" + description: > + OS family (such as redhat, debian, freebsd, windows). + + - name: version + type: keyword + overwrite: true + example: "10.14.1" + description: > + Operating system version as a raw string. + + - name: kernel + type: keyword + overwrite: true + example: "4.4.0-112-generic" + description: > + Operating system kernel version as a raw string. + + - name: cloud + title: Cloud + group: 2 + type: group + description: > + Cloud metadata reported by agents + fields: + - name: account + type: group + dynamic: false + fields: + - name: id + level: extended + type: keyword + ignore_above: 1024 + description: Cloud account ID + overwrite: true + - name: name + level: extended + type: keyword + ignore_above: 1024 + description: Cloud account name + overwrite: true + - name: availability_zone + level: extended + type: keyword + ignore_above: 1024 + description: Cloud availability zone name + example: us-east1-a + overwrite: true + - name: instance + type: group + dynamic: false + fields: + - name: id + level: extended + type: keyword + ignore_above: 1024 + description: Cloud instance/machine ID + overwrite: true + - name: name + level: extended + type: keyword + ignore_above: 1024 + description: Cloud instance/machine name + overwrite: true + - name: machine + type: group + dynamic: false + fields: + - name: type + level: extended + type: keyword + ignore_above: 1024 + description: Cloud instance/machine type + example: t2.medium + overwrite: true + - name: project + type: group + dynamic: false + fields: + - name: id + level: extended + type: keyword + ignore_above: 1024 + description: Cloud project ID + overwrite: true + - name: name + level: extended + type: keyword + ignore_above: 1024 + description: Cloud project name + overwrite: true + - name: provider + level: extended + type: keyword + ignore_above: 1024 + description: Cloud provider name + example: gcp + overwrite: true + - name: region + level: extended + type: keyword + ignore_above: 1024 + description: Cloud region name + example: us-east1 + overwrite: true + - name: service + type: group + dynamic: false + fields: + - name: name + level: extended + type: keyword + ignore_above: 1024 + description: > + Cloud service name, intended to distinguish services running on + different platforms within a provider. + overwrite: true + + - name: event + type: group + fields: + + - name: outcome + level: core + type: keyword + ignore_above: 1024 + description: > + `event.outcome` simply denotes whether the event represents a success or a + failure from the perspective of the entity that produced the event. + example: success + overwrite: true + - key: system - title: "APM System Metrics" + title: "System Metrics" description: > System status metrics, like CPU and memory usage, that are collected from the operating system. short_config: true @@ -18,6 +878,8 @@ - name: total.norm.pct type: scaled_float format: percent + unit: percent + metric_type: gauge description: > The percentage of CPU time spent by the process since the last event. This value is normalized by the number of CPU cores and it ranges @@ -31,6 +893,8 @@ - name: total type: long format: bytes + unit: byte + metric_type: gauge description: > Total memory. - name: actual @@ -41,6 +905,8 @@ - name: free type: long format: bytes + unit: byte + metric_type: gauge description: > Actual free memory in bytes. It is calculated based on the OS. On Linux it consists of the free memory plus caches and buffers. On OSX it is a sum of free memory and the inactive memory. On Windows, it is equal @@ -59,6 +925,8 @@ - name: total.norm.pct type: scaled_float format: percent + unit: percent + metric_type: gauge description: > The percentage of CPU time spent by the process since the last event. This value is normalized by the number of CPU cores and it ranges @@ -71,17 +939,67 @@ - name: size type: long format: bytes + unit: byte + metric_type: gauge description: > The total virtual memory the process has. - name: rss.bytes type: long format: bytes + unit: byte + metric_type: gauge description: > The Resident Set Size. The amount of memory the process occupied in main memory (RAM). - name: cgroup type: group description: Metrics and limits for the cgroup, collected by APM agents on Linux. fields: + - name: cpu + type: group + description: CPU-specific cgroup metrics and limits. + fields: + - name: id + type: keyword + description: ID for the current cgroup CPU. + - name: cfs + type: group + description: Completely Fair Scheduler (CFS) cgroup metrics. + fields: + - name: period.us + type: long + description: CFS period in microseconds. + unit: micros + metric_type: gauge + - name: quota.us + type: long + description: CFS quota in microseconds. + unit: micros + metric_type: gauge + - name: stats.periods + type: long + description: Number of periods seen by the CPU. + metric_type: counter + - name: stats.throttled.periods + type: long + description: Number of throttled periods seen by the CPU. + metric_type: counter + - name: stats.throttled.ns + type: long + description: Nanoseconds spent throttled seen by the CPU. + unit: nanos + metric_type: counter + - name: cpuacct + type: group + description: CPU Accounting-specific cgroup metrics and limits. + fields: + - name: id + type: keyword + description: ID for the current cgroup CPU. + - name: total.ns + type: long + description: Total CPU time for the current cgroup CPU in nanoseconds. + unit: nanos + metric_type: counter - name: memory type: group description: Memory-specific cgroup metrics and limits. @@ -89,29 +1007,12 @@ - name: mem.limit.bytes type: long format: bytes - description: Memory limit for the current cgroup slice. + unit: byte + metric_type: gauge + description: Memory limit for the current cgroup slice. - name: mem.usage.bytes type: long format: bytes - description: Memory usage by the current cgroup slice. - - name: stats.inactive_file.bytes - type: long - format: bytes - description: Inactive memory for the current cgroup slice. - -- key: apm-transaction-metrics - title: "APM Transaction Metrics" - description: > - APM transaction metrics, and transaction metrics-specific properties, - such as transaction.root. - short_config: true - fields: - - name: transaction - type: group - dynamic: false - fields: - - - name: root - type: boolean - description: > - Identifies metrics for root transactions. This can be used for calculating metrics for traces. + unit: byte + metric_type: gauge + description: Memory usage by the current cgroup slice. diff --git a/model/metricset/generated/schema/metricset.go b/model/metricset/generated/schema/metricset.go deleted file mode 100644 index 73d570e6566..00000000000 --- a/model/metricset/generated/schema/metricset.go +++ /dev/null @@ -1,114 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package schema - -const ModelSchema = `{ - "$id": "docs/spec/metricsets/metricset.json", - "type": "object", - "description": "Data captured by an agent representing an event occurring in a monitored service", - "allOf": [ - { "$id": "docs/spec/timestamp_epoch.json", - "title": "Timestamp Epoch", - "description": "Object with 'timestamp' property.", - "type": ["object"], - "properties": { - "timestamp": { - "description": "Recorded time of the event, UTC based and formatted as microseconds since Unix epoch", - "type": ["integer", "null"] - } - }}, - { "$id": "docs/spec/span_type.json", - "title": "Span Type", - "type": ["object"], - "properties": { - "type": { - "type": "string", - "description": "Keyword of specific relevance in the service's domain (eg: 'db.postgresql.query', 'template.erb', etc)", - "maxLength": 1024 - } - } }, - { "$id": "docs/spec/span_subtype.json", - "title": "Span Subtype", - "type": ["object"], - "properties": { - "subtype": { - "type": ["string", "null"], - "description": "A further sub-division of the type (e.g. postgresql, elasticsearch)", - "maxLength": 1024 - } - } }, - { "$id": "docs/spec/transaction_name.json", - "title": "Transaction Name", - "type": ["object"], - "properties": { - "name": { - "type": ["string","null"], - "description": "Generic designation of a transaction in the scope of a single service (eg: 'GET /users/:id')", - "maxLength": 1024 - } - } }, - { "$id": "docs/spec/transaction_type.json", - "title": "Transaction Type", - "type": ["object"], - "properties": { - "type": { - "type": "string", - "description": "Keyword of specific relevance in the service's domain (eg: 'request', 'backgroundjob', etc)", - "maxLength": 1024 - } - } }, - { - "properties": { - "samples": { - "type": [ - "object" - ], - "description": "Sampled application metrics collected from the agent.", - "patternProperties": { - "^[^*\"]*$": { - "$schema": "http://json-schema.org/draft-04/schema#", - "$id": "docs/spec/metricsets/sample.json", - "type": ["object", "null"], - "description": "A single metric sample.", - "properties": { - "value": {"type": "number"} - }, - "required": ["value"] - } - }, - "additionalProperties": false - }, - "tags": { - "$id": "docs/spec/tags.json", - "title": "Tags", - "type": ["object", "null"], - "description": "A flat mapping of user-defined tags with string, boolean or number values.", - "patternProperties": { - "^[^.*\"]*$": { - "type": ["string", "boolean", "number", "null"], - "maxLength": 1024 - } - }, - "additionalProperties": false - } - }, - "required": ["samples"] - } - ] -} -` diff --git a/model/metricset/generated/schema/rum_v3_metricset.go b/model/metricset/generated/schema/rum_v3_metricset.go deleted file mode 100644 index 1ff16b99300..00000000000 --- a/model/metricset/generated/schema/rum_v3_metricset.go +++ /dev/null @@ -1,117 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package schema - -const RUMV3Schema = `{ - "$id": "docs/spec/metricsets/rum_v3_metricset.json", - "description": "Data captured by an agent representing an event occurring in a monitored service", - "properties": { - "y": { - "type": ["object", "null"], - "description": "span", - "properties": { - "t": { - "type": "string", - "description": "type", - "maxLength": 1024 - }, - "su": { - "type": ["string", "null"], - "description": "subtype", - "maxLength": 1024 - } - } - }, - "sa": { - "type": "object", - "description": "Sampled application metrics collected from the agent.", - "properties": { - "xdc": { - "description": "transaction.duration.count", - "$schema": "http://json-schema.org/draft-04/schema#", - "$id": "docs/spec/metricsets/rum_v3_sample.json", - "type": ["object", "null"], - "description": "A single metric sample.", - "properties": { - "v": {"type": "number"} - }, - "required": ["v"] - }, - "xds": { - "description": "transaction.duration.sum.us", - "$schema": "http://json-schema.org/draft-04/schema#", - "$id": "docs/spec/metricsets/rum_v3_sample.json", - "type": ["object", "null"], - "description": "A single metric sample.", - "properties": { - "v": {"type": "number"} - }, - "required": ["v"] - }, - "xbc": { - "description": "transaction.breakdown.count", - "$schema": "http://json-schema.org/draft-04/schema#", - "$id": "docs/spec/metricsets/rum_v3_sample.json", - "type": ["object", "null"], - "description": "A single metric sample.", - "properties": { - "v": {"type": "number"} - }, - "required": ["v"] - }, - "ysc": { - "description": "span.self_time.count", - "$schema": "http://json-schema.org/draft-04/schema#", - "$id": "docs/spec/metricsets/rum_v3_sample.json", - "type": ["object", "null"], - "description": "A single metric sample.", - "properties": { - "v": {"type": "number"} - }, - "required": ["v"] - }, - "yss": { - "description": "span.self_time.sum.us", - "$schema": "http://json-schema.org/draft-04/schema#", - "$id": "docs/spec/metricsets/rum_v3_sample.json", - "type": ["object", "null"], - "description": "A single metric sample.", - "properties": { - "v": {"type": "number"} - }, - "required": ["v"] - } - } - }, - "g": { - "$id": "docs/spec/tags.json", - "title": "Tags", - "type": ["object", "null"], - "description": "A flat mapping of user-defined tags with string, boolean or number values.", - "patternProperties": { - "^[^.*\"]*$": { - "type": ["string", "boolean", "number", "null"], - "maxLength": 1024 - } - }, - "additionalProperties": false - } - }, - "required": ["sa"] -} -` diff --git a/model/metricset_test.go b/model/metricset_test.go index fb6975b1899..ce5bbde3e69 100644 --- a/model/metricset_test.go +++ b/model/metricset_test.go @@ -26,196 +26,182 @@ import ( "github.com/stretchr/testify/assert" "github.com/elastic/beats/v7/libbeat/common" - - "github.com/elastic/apm-server/transform" ) -func TestTransform(t *testing.T) { - timestamp := time.Now() - metadata := Metadata{ - Service: Service{Name: "myservice"}, - } - resource := "external-service" - - const ( - trType = "request" - trName = "GET /" - trResult = "HTTP 2xx" - - spType = "db" - spSubtype = "sql" - - eventOutcome = "success" - ) - +func TestMetricset(t *testing.T) { tests := []struct { Metricset *Metricset - Output []common.MapStr + Output common.MapStr Msg string }{ { - Metricset: nil, - Output: nil, - Msg: "Nil metric", + Metricset: &Metricset{}, + Output: common.MapStr{}, + Msg: "Payload with empty metric.", }, { - Metricset: &Metricset{Timestamp: timestamp, Metadata: metadata}, - Output: []common.MapStr{ - { - "processor": common.MapStr{"event": "metric", "name": "metric"}, - "service": common.MapStr{ - "name": "myservice", - }, - }, + Metricset: &Metricset{Name: "raj"}, + Output: common.MapStr{ + "metricset.name": "raj", }, - Msg: "Payload with empty metric.", + Msg: "Payload with metricset name.", }, { Metricset: &Metricset{ - Metadata: metadata, - Timestamp: timestamp, - Samples: []Sample{{ - Name: "transaction.duration.histogram", - Counts: []int64{1}, - Values: []float64{42}, - }}, - Transaction: MetricsetTransaction{ - Type: trType, - Name: trName, - Result: trResult, - Root: true, + Samples: map[string]MetricsetSample{ + "a.counter": {Value: 612}, + "some.gauge": {Value: 9.16}, }, - TimeseriesInstanceID: "foo", }, - Output: []common.MapStr{ - { - "processor": common.MapStr{"event": "metric", "name": "metric"}, - "service": common.MapStr{"name": "myservice"}, - "timeseries": common.MapStr{"instance": "foo"}, - "transaction": common.MapStr{ - "name": trName, - "type": trType, - "result": trResult, - "root": true, - "duration": common.MapStr{ - "histogram": common.MapStr{ - "counts": []int64{1}, - "values": []float64{42}, - }, - }, - }, - }, + Output: common.MapStr{ + "a.counter": 612.0, + "some.gauge": 9.16, }, - Msg: "Payload with extended transaction metadata.", + Msg: "Payload with valid metric.", }, { Metricset: &Metricset{ - Metadata: metadata, - Timestamp: timestamp, - Samples: []Sample{{ - Name: "metric_field", - Value: 123, - }}, - Event: MetricsetEventCategorization{ - Outcome: eventOutcome, - }, + TimeseriesInstanceID: "foo", + DocCount: 6, }, - Output: []common.MapStr{ - { - "processor": common.MapStr{"event": "metric", "name": "metric"}, - "service": common.MapStr{"name": "myservice"}, - "event": common.MapStr{"outcome": eventOutcome}, - "metric_field": 123.0, - }, + Output: common.MapStr{ + "timeseries": common.MapStr{"instance": "foo"}, + "_doc_count": int64(6), }, - Msg: "Payload with event categorization metadata.", + Msg: "Timeseries instance and _doc_count", }, { Metricset: &Metricset{ - Metadata: metadata, - Labels: common.MapStr{"a.b": "a.b.value"}, - Timestamp: timestamp, - Samples: []Sample{ - { - Name: "a.counter", - Value: 612, - }, - { - Name: "some.gauge", - Value: 9.16, - }, - { - Name: "histo.gram", - Value: 666, // Value is ignored when Counts/Values are specified - Counts: []int64{1, 2, 3}, - Values: []float64{4.5, 6.0, 9.0}, - }, - }, - Span: MetricsetSpan{Type: spType, Subtype: spSubtype}, - Transaction: MetricsetTransaction{Type: trType, Name: trName}, - }, - Output: []common.MapStr{ - { - "processor": common.MapStr{"event": "metric", "name": "metric"}, - "service": common.MapStr{"name": "myservice"}, - "transaction": common.MapStr{"name": trName, "type": trType}, - "span": common.MapStr{"type": spType, "subtype": spSubtype}, - "labels": common.MapStr{"a.b": "a.b.value"}, - - "a": common.MapStr{"counter": float64(612)}, - "some": common.MapStr{"gauge": float64(9.16)}, - "histo": common.MapStr{ - "gram": common.MapStr{ - "counts": []int64{1, 2, 3}, - "values": []float64{4.5, 6.0, 9.0}, + Samples: map[string]MetricsetSample{ + "latency_histogram": { + Type: "histogram", + Unit: "s", + Histogram: Histogram{ + Counts: []int64{1, 2, 3}, + Values: []float64{1.1, 2.2, 3.3}, }, }, - }, - }, - Msg: "Payload with valid metric.", - }, - { - Metricset: &Metricset{ - Timestamp: timestamp, - Metadata: metadata, - Span: MetricsetSpan{Type: spType, Subtype: spSubtype, DestinationService: DestinationService{ - Resource: &resource, - }}, - Samples: []Sample{ - { - Name: "destination.service.response_time.count", - Value: 40, + "just_type": { + Type: "counter", + Value: 123, }, - { - Name: "destination.service.response_time.sum.us", - Value: 500000, + "just_unit": { + Unit: "percent", + Value: 0.99, }, }, }, - Output: []common.MapStr{ - { - "processor": common.MapStr{"event": "metric", "name": "metric"}, - "service": common.MapStr{"name": "myservice"}, - "span": common.MapStr{"type": spType, "subtype": spSubtype, - "destination": common.MapStr{"service": common.MapStr{"resource": resource}}}, - "destination": common.MapStr{"service": common.MapStr{"response_time": common.MapStr{ - "count": 40.0, - "sum": common.MapStr{"us": 500000.0}, + Output: common.MapStr{ + "latency_histogram": common.MapStr{ + "counts": []int64{1, 2, 3}, + "values": []float64{1.1, 2.2, 3.3}, + }, + "just_type": 123.0, + "just_unit": 0.99, + "_metric_descriptions": common.MapStr{ + "latency_histogram": common.MapStr{ + "type": "histogram", + "unit": "s", }, + "just_type": common.MapStr{ + "type": "counter", }, + "just_unit": common.MapStr{ + "unit": "percent", }, }, }, - Msg: "Payload with destination service.", + Msg: "Payload with metric type and unit.", }, } for idx, test := range tests { - outputEvents := test.Metricset.Transform(context.Background(), &transform.Config{}) + event := APMEvent{Metricset: test.Metricset} + outputEvent := event.BeatEvent(context.Background()) + assert.Equal(t, test.Output, outputEvent.Fields, fmt.Sprintf("Failed at idx %v; %s", idx, test.Msg)) + } +} + +func TestTransformMetricsetTransaction(t *testing.T) { + event := APMEvent{ + Processor: MetricsetProcessor, + Transaction: &Transaction{ + Name: "transaction_name", + Type: "transaction_type", + Result: "transaction_result", + BreakdownCount: 123, + AggregatedDuration: AggregatedDuration{ + Count: 456, + Sum: time.Millisecond, + }, + DurationHistogram: Histogram{ + Counts: []int64{1, 2, 3}, + Values: []float64{4.5, 6.0, 9.0}, + }, + }, + Metricset: &Metricset{Name: "transaction"}, + } + beatEvent := event.BeatEvent(context.Background()) + assert.Equal(t, common.MapStr{ + "processor": common.MapStr{"name": "metric", "event": "metric"}, + "metricset.name": "transaction", + "transaction": common.MapStr{ + "name": "transaction_name", + "type": "transaction_type", + "result": "transaction_result", + "breakdown.count": 123, + "duration": common.MapStr{ + "count": 456, + "sum.us": int64(1000), + }, + "duration.histogram": common.MapStr{ + "counts": []int64{1, 2, 3}, + "values": []float64{4.5, 6.0, 9.0}, + }, + }, + }, beatEvent.Fields) +} - for j, outputEvent := range outputEvents { - assert.Equal(t, test.Output[j], outputEvent.Fields, fmt.Sprintf("Failed at idx %v; %s", idx, test.Msg)) - assert.Equal(t, timestamp, outputEvent.Timestamp, fmt.Sprintf("Bad timestamp at idx %v; %s", idx, test.Msg)) - } +func TestTransformMetricsetSpan(t *testing.T) { + event := APMEvent{ + Processor: MetricsetProcessor, + Span: &Span{ + Type: "span_type", + Subtype: "span_subtype", + SelfTime: AggregatedDuration{ + Count: 123, + Sum: time.Millisecond, + }, + DestinationService: &DestinationService{ + Resource: "destination_service_resource", + ResponseTime: AggregatedDuration{ + Count: 456, + Sum: time.Second, + }, + }, + }, + Metricset: &Metricset{Name: "span"}, } + beatEvent := event.BeatEvent(context.Background()) + assert.Equal(t, common.MapStr{ + "processor": common.MapStr{"name": "metric", "event": "metric"}, + "metricset.name": "span", + "span": common.MapStr{ + "type": "span_type", + "subtype": "span_subtype", + "self_time": common.MapStr{ + "count": 123, + "sum.us": int64(1000), + }, + "destination": common.MapStr{ + "service": common.MapStr{ + "resource": "destination_service_resource", + "response_time": common.MapStr{ + "count": 456, + "sum.us": int64(1000000), + }, + }, + }, + }, + }, beatEvent.Fields) } diff --git a/model/modeldecoder/cloud.go b/model/modeldecoder/cloud.go deleted file mode 100644 index 74d49004253..00000000000 --- a/model/modeldecoder/cloud.go +++ /dev/null @@ -1,46 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "github.com/elastic/apm-server/model" -) - -func decodeCloud(input map[string]interface{}, out *model.Cloud) { - if input == nil { - return - } - decodeString(input, "availability_zone", &out.AvailabilityZone) - decodeString(input, "provider", &out.Provider) - decodeString(input, "region", &out.Region) - if account := getObject(input, "account"); account != nil { - decodeString(account, "id", &out.AccountID) - decodeString(account, "name", &out.AccountName) - } - if instance := getObject(input, "instance"); instance != nil { - decodeString(instance, "id", &out.InstanceID) - decodeString(instance, "name", &out.InstanceName) - } - if machine := getObject(input, "machine"); machine != nil { - decodeString(machine, "type", &out.MachineType) - } - if project := getObject(input, "project"); project != nil { - decodeString(project, "id", &out.ProjectID) - decodeString(project, "name", &out.ProjectName) - } -} diff --git a/model/modeldecoder/container.go b/model/modeldecoder/container.go deleted file mode 100644 index 8b794120d3f..00000000000 --- a/model/modeldecoder/container.go +++ /dev/null @@ -1,27 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import "github.com/elastic/apm-server/model" - -func decodeContainer(input map[string]interface{}, out *model.Container) { - if input == nil { - return - } - decodeString(input, "id", &out.ID) -} diff --git a/model/modeldecoder/container_test.go b/model/modeldecoder/container_test.go deleted file mode 100644 index 24dfcc28f7c..00000000000 --- a/model/modeldecoder/container_test.go +++ /dev/null @@ -1,44 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/apm-server/model" -) - -func TestContainerDecode(t *testing.T) { - id := "container-id" - for _, test := range []struct { - input map[string]interface{} - c model.Container - }{ - {input: nil}, - { - input: map[string]interface{}{"id": id}, - c: model.Container{ID: id}, - }, - } { - var container model.Container - decodeContainer(test.input, &container) - assert.Equal(t, test.c, container) - } -} diff --git a/model/modeldecoder/context.go b/model/modeldecoder/context.go deleted file mode 100644 index 92c3bfc3d41..00000000000 --- a/model/modeldecoder/context.go +++ /dev/null @@ -1,240 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "net" - "strconv" - "strings" - - "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/model/modeldecoder/field" - - "github.com/elastic/beats/v7/libbeat/common" - - "github.com/elastic/apm-server/utility" -) - -// decodeContext parses all information from input, nested under key context and returns an instance of Context. -func decodeContext(input map[string]interface{}, cfg Config, meta *model.Metadata) (*model.Context, error) { - if input == nil { - return &model.Context{}, nil - } - - decoder := utility.ManualDecoder{} - fieldName := field.Mapper(cfg.HasShortFieldNames) - - var experimental interface{} - if cfg.Experimental { - experimental = decoder.Interface(input, "experimental") - } - http, err := decodeHTTP(input, cfg.HasShortFieldNames, decoder.Err) - url, err := decodeURL(input, err) - custom, err := decodeCustom(input, cfg.HasShortFieldNames, err) - page, err := decodePage(input, cfg.HasShortFieldNames, err) - message, err := decodeMessage(input, err) - if err != nil { - return nil, err - } - - ctx := model.Context{ - Http: http, - URL: url, - Page: page, - Custom: custom, - Message: message, - Experimental: experimental, - } - - if tagsInp := getObject(input, fieldName("tags")); tagsInp != nil { - var labels model.Labels - decodeLabels(tagsInp, (*common.MapStr)(&labels)) - ctx.Labels = &labels - } - - if userInp := getObject(input, fieldName("user")); userInp != nil { - // Per-event user metadata replaces stream user metadata. - meta.User = model.User{} - decodeUser(userInp, cfg.HasShortFieldNames, &meta.User, &meta.Client) - } - if ua := http.UserAgent(); ua != "" { - meta.UserAgent.Original = ua - } - if meta.Client.IP == nil { - meta.Client.IP = getHTTPClientIP(http) - } - - if serviceInp := getObject(input, fieldName("service")); serviceInp != nil { - // Per-event service metadata is merged with stream service metadata. - decodeService(serviceInp, cfg.HasShortFieldNames, &meta.Service) - } - - return &ctx, nil -} - -func decodeURL(raw common.MapStr, err error) (*model.URL, error) { - if err != nil { - return nil, err - } - - decoder := utility.ManualDecoder{} - req := decoder.MapStr(raw, "request") - if req == nil { - return nil, decoder.Err - } - - inpURL := decoder.MapStr(req, "url") - url := model.URL{ - Original: decoder.StringPtr(inpURL, "raw"), - Full: decoder.StringPtr(inpURL, "full"), - Domain: decoder.StringPtr(inpURL, "hostname"), - Path: decoder.StringPtr(inpURL, "pathname"), - Query: decoder.StringPtr(inpURL, "search"), - Fragment: decoder.StringPtr(inpURL, "hash"), - } - if scheme := decoder.StringPtr(inpURL, "protocol"); scheme != nil { - trimmed := strings.TrimSuffix(*scheme, ":") - url.Scheme = &trimmed - } - err = decoder.Err - if url.Port = decoder.IntPtr(inpURL, "port"); url.Port != nil { - return &url, nil - } else if portStr := decoder.StringPtr(inpURL, "port"); portStr != nil { - var p int - if p, err = strconv.Atoi(*portStr); err == nil { - url.Port = &p - } - } - - return &url, err -} - -func getHTTPClientIP(http *model.Http) net.IP { - if http == nil || http.Request == nil { - return nil - } - // http.Request.Headers and http.Request.Socket information is - // only set for backend events try to first extract an IP address - // from the headers, if not possible use IP address from socket - // remote_address - if ip := utility.ExtractIPFromHeader(http.Request.Headers); ip != nil { - return ip - } - if http.Request.Socket != nil && http.Request.Socket.RemoteAddress != nil { - return utility.ParseIP(*http.Request.Socket.RemoteAddress) - } - return nil -} - -func decodeHTTP(raw common.MapStr, hasShortFieldNames bool, err error) (*model.Http, error) { - if err != nil { - return nil, err - } - var h *model.Http - decoder := utility.ManualDecoder{} - fieldName := field.Mapper(hasShortFieldNames) - - inpReq := decoder.MapStr(raw, fieldName("request")) - if inpReq != nil { - h = &model.Http{ - Version: decoder.StringPtr(inpReq, fieldName("http_version")), - Request: &model.Req{ - Method: strings.ToLower(decoder.String(inpReq, fieldName("method"))), - Env: decoder.Interface(inpReq, fieldName("env")), - Socket: &model.Socket{ - RemoteAddress: decoder.StringPtr(inpReq, "remote_address", "socket"), - Encrypted: decoder.BoolPtr(inpReq, "encrypted", "socket"), - }, - Body: decoder.Interface(inpReq, "body"), - Cookies: decoder.Interface(inpReq, "cookies"), - Headers: decoder.Headers(inpReq, fieldName("headers")), - }, - } - } - - if inpResp := decoder.MapStr(raw, fieldName("response")); inpResp != nil { - if h == nil { - h = &model.Http{} - } - h.Response = &model.Resp{ - Finished: decoder.BoolPtr(inpResp, "finished"), - HeadersSent: decoder.BoolPtr(inpResp, "headers_sent"), - } - minimalResp, err := decodeMinimalHTTPResponse(raw, hasShortFieldNames, decoder.Err) - if err != nil { - return nil, err - } - if minimalResp != nil { - h.Response.MinimalResp = *minimalResp - } - } - return h, decoder.Err -} - -func decodeMinimalHTTPResponse(raw common.MapStr, hasShortFieldNames bool, err error) (*model.MinimalResp, error) { - if err != nil { - return nil, err - } - decoder := utility.ManualDecoder{} - fieldName := field.Mapper(hasShortFieldNames) - - inpResp := decoder.MapStr(raw, fieldName("response")) - if inpResp == nil { - return nil, nil - } - headers := decoder.Headers(inpResp, fieldName("headers")) - return &model.MinimalResp{ - StatusCode: decoder.IntPtr(inpResp, fieldName("status_code")), - Headers: headers, - DecodedBodySize: decoder.Float64Ptr(inpResp, fieldName("decoded_body_size")), - EncodedBodySize: decoder.Float64Ptr(inpResp, fieldName("encoded_body_size")), - TransferSize: decoder.Float64Ptr(inpResp, fieldName("transfer_size")), - }, decoder.Err -} - -func decodePage(raw common.MapStr, hasShortFieldNames bool, err error) (*model.Page, error) { - if err != nil { - return nil, err - } - fieldName := field.Mapper(hasShortFieldNames) - pageInput, ok := raw[fieldName("page")].(map[string]interface{}) - if !ok { - return nil, nil - } - decoder := utility.ManualDecoder{} - page := &model.Page{ - Referer: decoder.StringPtr(pageInput, fieldName("referer")), - } - if pageURL := decoder.StringPtr(pageInput, fieldName("url")); pageURL != nil { - page.URL = model.ParseURL(*pageURL, "") - } - return page, decoder.Err -} - -func decodeCustom(raw common.MapStr, hasShortFieldNames bool, err error) (*model.Custom, error) { - if err != nil { - return nil, err - } - decoder := utility.ManualDecoder{} - fieldName := field.Mapper(hasShortFieldNames) - if c := decoder.MapStr(raw, fieldName("custom")); decoder.Err == nil && c != nil { - custom := model.Custom(c) - return &custom, nil - } - return nil, decoder.Err -} diff --git a/model/modeldecoder/context_test.go b/model/modeldecoder/context_test.go deleted file mode 100644 index e9013b67d8d..00000000000 --- a/model/modeldecoder/context_test.go +++ /dev/null @@ -1,275 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "encoding/json" - "fmt" - "net" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/beats/v7/libbeat/common" - - "github.com/elastic/apm-server/approvaltest" - "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/utility" -) - -func TestDecodeContext(t *testing.T) { - for name, test := range map[string]struct { - input map[string]interface{} - cfg Config - errOut string - }{ - "input_nil": {}, - "empty": {input: map[string]interface{}{}}, - "request_body_string": { - input: map[string]interface{}{ - "request": map[string]interface{}{ - "method": "Get", - "url": map[string]interface{}{}, - "body": "user-request", - }, - }}, - "url_port_string": { - input: map[string]interface{}{ - "request": map[string]interface{}{ - "method": "Get", - "url": map[string]interface{}{"port": "8080"}, - }, - }}, - "url_port_invalid_string": { - input: map[string]interface{}{ - "request": map[string]interface{}{ - "method": "Get", - "url": map[string]interface{}{"port": "this is an invalid port"}, - }, - }, - errOut: "strconv.Atoi", - }, - "user_id_integer": { - input: map[string]interface{}{ - "user": map[string]interface{}{"username": "john", "ip": "10.15.21.3", "id": json.Number("1234")}}, - }, - "no_request_method": { - input: map[string]interface{}{ - "request": map[string]interface{}{ - "url": map[string]interface{}{"raw": "127.0.0.1"}}}, - errOut: utility.ErrFetch("method", nil).Error(), - }, - "no_url_protocol": { - input: map[string]interface{}{ - "request": map[string]interface{}{ - "method": "Get", - "url": map[string]interface{}{"raw": "127.0.0.1"}}}, - }, - "experimental is not true": { - input: map[string]interface{}{ - "experimental": "experimental data", - }, - }, - "client_ip_from_socket": { - input: map[string]interface{}{ - "request": map[string]interface{}{ - "method": "POST", - "socket": map[string]interface{}{"encrypted": false, "remote_address": "10.1.23.5"}, - }, - }, - }, - "client_ip_from_socket_invalid_headers": { - input: map[string]interface{}{ - "request": map[string]interface{}{ - "method": "POST", - "headers": map[string]interface{}{"X-Forwarded-For": "192.13.14:8097"}, - "socket": map[string]interface{}{"encrypted": false, "remote_address": "10.1.23.5"}, - }, - }, - }, - "client_ip_from_forwarded_header": { - input: map[string]interface{}{ - "request": map[string]interface{}{ - "method": "POST", - "headers": map[string]interface{}{ - "Forwarded": "for=192.13.14.5", - "X-Forwarded-For": "178.3.11.17", - }, - "socket": map[string]interface{}{"encrypted": false, "remote_address": "10.1.23.5"}, - }, - }, - }, - "client_ip_header_case_insensitive": { - input: map[string]interface{}{ - "request": map[string]interface{}{ - "method": "POST", - "headers": map[string]interface{}{ - "x-real-ip": "192.13.14.5", - "X-Forwarded-For": "178.3.11.17", - }, - "socket": map[string]interface{}{"encrypted": false, "remote_address": "10.1.23.5"}, - }, - }, - }, - "full_event with experimental=true": { - input: map[string]interface{}{ - "experimental": map[string]interface{}{"foo": "bar"}, - "undefined": "val", - "custom": map[string]interface{}{"a": "b"}, - "response": map[string]interface{}{ - "finished": false, - "headers": map[string]interface{}{"Content-Type": []string{"text/html"}}, - "headers_sent": true, - "status_code": json.Number("202")}, - "request": map[string]interface{}{ - "body": map[string]interface{}{"k": map[string]interface{}{"b": "v"}}, - "env": map[string]interface{}{"env": map[string]interface{}{"b": "v"}}, - "headers": map[string]interface{}{"host": []string{"a", "b"}}, - "http_version": "2.0", - "method": "POST", - "socket": map[string]interface{}{"encrypted": false, "remote_address": "10.1.23.5"}, - "url": map[string]interface{}{ - "raw": "127.0.0.1", - "protocol": "https:", - "full": "https://127.0.0.1", - "hostname": "example.com", - "port": json.Number("8080"), - "pathname": "/search", - "search": "id=1", - "hash": "x13ab", - }, - "cookies": map[string]interface{}{"c1": "b", "c2": "c"}}, - "tags": map[string]interface{}{"ab": "c", "status": 200, "success": false}, - "user": map[string]interface{}{ - "username": "john", - "email": "doe", - "ip": "192.158.0.1", - "id": "12345678ab", - }, - "service": map[string]interface{}{ - "name": "myService", - "version": "5.1.3", - "environment": "staging", - "language": common.MapStr{ - "name": "ecmascript", - "version": "8", - }, - "runtime": common.MapStr{ - "name": "node", - "version": "8.0.0", - }, - "framework": common.MapStr{ - "name": "Express", - "version": "1.2.3", - }, - "agent": common.MapStr{ - "name": "elastic-node", - "version": "1.0.0", - "ephemeral_id": "abcdef123", - }}, - "page": map[string]interface{}{"url": "https://example.com", "referer": "http://refer.example.com"}, - "message": map[string]interface{}{ - "queue": map[string]interface{}{"name": "order"}, - "topic": map[string]interface{}{"name": "routeA"}}, - }, - cfg: Config{Experimental: true}, - }, - } { - t.Run(name, func(t *testing.T) { - var meta model.Metadata // ignored - out, err := decodeContext(test.input, test.cfg, &meta) - if test.errOut != "" { - if assert.Error(t, err) { - assert.Contains(t, err.Error(), test.errOut) - } - } else { - assert.NoError(t, err) - resultName := fmt.Sprintf("test_approved_model/context_%s", name) - resultJSON, err := json.Marshal(out) - require.NoError(t, err) - approvaltest.ApproveJSON(t, resultName, resultJSON) - } - }) - } -} - -func TestDecodeContextMetadata(t *testing.T) { - inputMetadata := model.Metadata{ - Service: model.Service{ - Name: "myService", // unmodified - Version: "5.1.2", - }, - User: model.User{ID: "12345678ab"}, - } - - mergedMetadata := inputMetadata - mergedMetadata.Service.Version = "5.1.3" // override - mergedMetadata.Service.Environment = "staging" // added - mergedMetadata.Service.Language.Name = "ecmascript" - mergedMetadata.Service.Language.Version = "8" - mergedMetadata.Service.Runtime.Name = "node" - mergedMetadata.Service.Runtime.Version = "8.0.0" - mergedMetadata.Service.Framework.Name = "Express" - mergedMetadata.Service.Framework.Version = "1.2.3" - mergedMetadata.Service.Agent.Name = "elastic-node" - mergedMetadata.Service.Agent.Version = "1.0.0" - mergedMetadata.Service.Agent.EphemeralID = "abcdef123" - mergedMetadata.User = model.User{ - // ID is missing because per-event user metadata - // replaces stream user metadata. This is unlike - // service metadata above, which is merged. - Name: "john", - Email: "john.doe@testing.invalid", - } - mergedMetadata.Client.IP = net.ParseIP("10.1.1.1") - - input := map[string]interface{}{ - "tags": map[string]interface{}{"ab": "c", "status": 200, "success": false}, - "user": map[string]interface{}{ - "username": mergedMetadata.User.Name, - "email": mergedMetadata.User.Email, - "ip": mergedMetadata.Client.IP.String(), - }, - "service": map[string]interface{}{ - "version": mergedMetadata.Service.Version, - "environment": mergedMetadata.Service.Environment, - "language": map[string]interface{}{ - "name": mergedMetadata.Service.Language.Name, - "version": mergedMetadata.Service.Language.Version, - }, - "runtime": map[string]interface{}{ - "name": mergedMetadata.Service.Runtime.Name, - "version": mergedMetadata.Service.Runtime.Version, - }, - "framework": map[string]interface{}{ - "name": mergedMetadata.Service.Framework.Name, - "version": mergedMetadata.Service.Framework.Version, - }, - "agent": map[string]interface{}{ - "name": mergedMetadata.Service.Agent.Name, - "version": mergedMetadata.Service.Agent.Version, - "ephemeral_id": mergedMetadata.Service.Agent.EphemeralID, - }, - }, - } - - _, err := decodeContext(input, Config{}, &inputMetadata) - require.NoError(t, err) - assert.Equal(t, mergedMetadata, inputMetadata) -} diff --git a/model/modeldecoder/doc.go b/model/modeldecoder/doc.go deleted file mode 100644 index 8e9a1634da8..00000000000 --- a/model/modeldecoder/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Package modeldecoder holds functions for decoding -// Elastic APM agent events into model objects. -package modeldecoder diff --git a/model/modeldecoder/error.go b/model/modeldecoder/error.go deleted file mode 100644 index 048d3b5fafe..00000000000 --- a/model/modeldecoder/error.go +++ /dev/null @@ -1,166 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "github.com/pkg/errors" - "github.com/santhosh-tekuri/jsonschema" - - m "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/model/error/generated/schema" - "github.com/elastic/apm-server/model/modeldecoder/field" - "github.com/elastic/apm-server/utility" - "github.com/elastic/apm-server/validation" -) - -var ( - errorSchema = validation.CreateSchema(schema.ModelSchema, "error") - rumV3ErrorSchema = validation.CreateSchema(schema.RUMV3Schema, "error") -) - -// DecodeRUMV3Error decodes a v3 RUM error. -func DecodeRUMV3Error(input Input, batch *m.Batch) error { - apmError, err := decodeError(input, rumV3ErrorSchema) - if err != nil { - return err - } - batch.Errors = append(batch.Errors, apmError) - return nil -} - -// DecodeRUMV2Error decodes a v2 RUM error. -func DecodeRUMV2Error(input Input, batch *m.Batch) error { - apmError, err := decodeError(input, errorSchema) - if err != nil { - return err - } - apmError.RUM = true - batch.Errors = append(batch.Errors, apmError) - return nil -} - -// DecodeError decodes a v2 error. -func DecodeError(input Input, batch *m.Batch) error { - apmError, err := decodeError(input, errorSchema) - if err != nil { - return err - } - batch.Errors = append(batch.Errors, apmError) - return nil -} - -func decodeError(input Input, schema *jsonschema.Schema) (*m.Error, error) { - raw, err := validation.ValidateObject(input.Raw, schema) - if err != nil { - return nil, errors.Wrap(err, "failed to validate error") - } - - fieldName := field.Mapper(input.Config.HasShortFieldNames) - ctx, err := decodeContext(getObject(raw, fieldName("context")), input.Config, &input.Metadata) - if err != nil { - return nil, err - } - - decoder := utility.ManualDecoder{} - e := m.Error{ - Metadata: input.Metadata, - ID: decoder.StringPtr(raw, "id"), - Culprit: decoder.StringPtr(raw, fieldName("culprit")), - Labels: ctx.Labels, - Page: ctx.Page, - HTTP: ctx.Http, - URL: ctx.URL, - Custom: ctx.Custom, - Experimental: ctx.Experimental, - Timestamp: decoder.TimeEpochMicro(raw, "timestamp"), - TransactionSampled: decoder.BoolPtr(raw, fieldName("sampled"), fieldName("transaction")), - TransactionType: decoder.StringPtr(raw, fieldName("type"), fieldName("transaction")), - } - decodeString(raw, fieldName("parent_id"), &e.ParentID) - decodeString(raw, fieldName("trace_id"), &e.TraceID) - decodeString(raw, fieldName("transaction_id"), &e.TransactionID) - - ex := decoder.MapStr(raw, fieldName("exception")) - e.Exception = decodeException(&decoder, input.Config.HasShortFieldNames)(ex) - - log := decoder.MapStr(raw, fieldName("log")) - logMsg := decoder.StringPtr(log, fieldName("message")) - if logMsg != nil { - e.Log = &m.Log{ - Message: *logMsg, - ParamMessage: decoder.StringPtr(log, fieldName("param_message")), - Level: decoder.StringPtr(log, fieldName("level")), - LoggerName: decoder.StringPtr(log, fieldName("logger_name")), - Stacktrace: m.Stacktrace{}, - } - var stacktrace *m.Stacktrace - stacktrace, decoder.Err = decodeStacktrace(log[fieldName("stacktrace")], input.Config.HasShortFieldNames, decoder.Err) - if stacktrace != nil { - e.Log.Stacktrace = *stacktrace - } - } - if decoder.Err != nil { - return nil, decoder.Err - } - if e.Timestamp.IsZero() { - e.Timestamp = input.RequestTime - } - - return &e, nil -} - -type exceptionDecoder func(map[string]interface{}) *m.Exception - -func decodeException(decoder *utility.ManualDecoder, hasShortFieldNames bool) exceptionDecoder { - var decode exceptionDecoder - fieldName := field.Mapper(hasShortFieldNames) - decode = func(exceptionTree map[string]interface{}) *m.Exception { - exMsg := decoder.StringPtr(exceptionTree, fieldName("message")) - exType := decoder.StringPtr(exceptionTree, fieldName("type")) - if decoder.Err != nil || (exMsg == nil && exType == nil) { - return nil - } - ex := m.Exception{ - Message: exMsg, - Type: exType, - Code: decoder.Interface(exceptionTree, fieldName("code")), - Module: decoder.StringPtr(exceptionTree, fieldName("module")), - Attributes: decoder.Interface(exceptionTree, fieldName("attributes")), - Handled: decoder.BoolPtr(exceptionTree, fieldName("handled")), - Stacktrace: m.Stacktrace{}, - } - var stacktrace *m.Stacktrace - stacktrace, decoder.Err = decodeStacktrace(exceptionTree[fieldName("stacktrace")], hasShortFieldNames, decoder.Err) - if stacktrace != nil { - ex.Stacktrace = *stacktrace - } - for _, cause := range decoder.InterfaceArr(exceptionTree, fieldName("cause")) { - e, ok := cause.(map[string]interface{}) - if !ok { - decoder.Err = errors.New("cause must be an exception") - return nil - } - nested := decode(e) - if nested != nil { - ex.Cause = append(ex.Cause, *nested) - } - } - return &ex - } - return decode -} diff --git a/model/modeldecoder/error_test.go b/model/modeldecoder/error_test.go deleted file mode 100644 index a530079aa05..00000000000 --- a/model/modeldecoder/error_test.go +++ /dev/null @@ -1,387 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "encoding/json" - "net" - "net/http" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - m "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/tests" -) - -func TestErrorEventDecode(t *testing.T) { - timestamp := json.Number("1496170407154000") - timestampParsed := time.Date(2017, 5, 30, 18, 53, 27, 154*1e6, time.UTC) - requestTime := time.Now() - - id, culprit, lineno := "123", "foo()", 2 - parentID, traceID, transactionID := "0123456789abcdef", "01234567890123456789abcdefabcdef", "abcdefabcdef0000" - name, userID, email, userIP := "jane", "abc123", "j@d.com", "127.0.0.1" - pURL, referer, origURL := "https://mypage.com", "http:mypage.com", "127.0.0.1" - code, module, exType, handled := "200", "a", "errorEx", false - exAttrs := map[string]interface{}{"a": "b", "c": 123, "d": map[string]interface{}{"e": "f"}} - exMsg, logMsg, paramMsg, level, logger := "Exception Msg", "Log Msg", "log pm", "error", "mylogger" - transactionSampled := true - transactionType := "request" - labels := m.Labels{"ab": "c"} - ua := "go-1.1" - page := m.Page{URL: m.ParseURL(pURL, ""), Referer: &referer} - custom := m.Custom{"a": "b"} - request := m.Req{Method: "post", Socket: &m.Socket{}, Headers: http.Header{"User-Agent": []string{ua}}, Cookies: map[string]interface{}{"a": "b"}} - response := m.Resp{Finished: new(bool), MinimalResp: m.MinimalResp{Headers: http.Header{"Content-Type": []string{"text/html"}}}} - h := m.Http{Request: &request, Response: &response} - ctxURL := m.URL{Original: &origURL} - inputMetadata := m.Metadata{ - Service: m.Service{Name: "foo"}, - } - - mergedMetadata := inputMetadata - mergedMetadata.User = m.User{Name: name, Email: email, ID: userID} - mergedMetadata.UserAgent.Original = ua - mergedMetadata.Client.IP = net.ParseIP(userIP) - - // baseInput holds the minimal valid input. Test-specific input is added to this. - baseInput := map[string]interface{}{ - "id": id, - "exception": map[string]interface{}{"message": exMsg}, - } - - for name, test := range map[string]struct { - input map[string]interface{} - cfg Config - e *m.Error - }{ - "minimal valid error": { - input: map[string]interface{}{}, - e: &m.Error{ - Metadata: inputMetadata, - ID: &id, - Timestamp: requestTime, - Exception: &m.Exception{Message: &exMsg, Stacktrace: m.Stacktrace{}}, - }, - }, - "minimal valid error with specified timestamp": { - input: map[string]interface{}{"timestamp": timestamp}, - e: &m.Error{ - Metadata: inputMetadata, - ID: &id, - Timestamp: timestampParsed, - Exception: &m.Exception{Message: &exMsg, Stacktrace: m.Stacktrace{}}, - }, - }, - "minimal valid error with log and exception": { - input: map[string]interface{}{ - "exception": map[string]interface{}{"message": exMsg}, - "log": map[string]interface{}{"message": logMsg}, - }, - e: &m.Error{ - Metadata: inputMetadata, - ID: &id, - Timestamp: requestTime, - Exception: &m.Exception{Message: &exMsg, Stacktrace: m.Stacktrace{}}, - Log: &m.Log{Message: logMsg, Stacktrace: m.Stacktrace{}}, - }, - }, - "valid error experimental=true, no experimental payload": { - input: map[string]interface{}{ - "context": map[string]interface{}{"foo": []string{"a", "b"}}, - }, - e: &m.Error{ - Metadata: inputMetadata, - ID: &id, - Timestamp: requestTime, - Exception: &m.Exception{Message: &exMsg, Stacktrace: m.Stacktrace{}}, - }, - cfg: Config{Experimental: true}, - }, - "valid error experimental=false": { - input: map[string]interface{}{ - "context": map[string]interface{}{"experimental": []string{"a", "b"}}, - }, - e: &m.Error{ - Metadata: inputMetadata, - ID: &id, - Timestamp: requestTime, - Exception: &m.Exception{Message: &exMsg, Stacktrace: m.Stacktrace{}}, - }, - cfg: Config{Experimental: false}, - }, - "valid error experimental=true": { - input: map[string]interface{}{ - "context": map[string]interface{}{"experimental": []string{"a", "b"}}, - }, - e: &m.Error{ - Metadata: inputMetadata, - ID: &id, - Timestamp: requestTime, - Exception: &m.Exception{Message: &exMsg, Stacktrace: m.Stacktrace{}}, - Experimental: []string{"a", "b"}, - }, - cfg: Config{Experimental: true}, - }, - "full valid error event": { - input: map[string]interface{}{ - "timestamp": timestamp, - "context": map[string]interface{}{ - "a": "b", - "user": map[string]interface{}{"username": name, "email": email, "ip": userIP, "id": userID}, - "tags": map[string]interface{}{"ab": "c"}, - "page": map[string]interface{}{"url": pURL, "referer": referer}, - "custom": map[string]interface{}{"a": "b"}, - "request": map[string]interface{}{ - "method": "POST", - "url": map[string]interface{}{"raw": "127.0.0.1"}, - "headers": map[string]interface{}{"user-agent": ua}, - "cookies": map[string]interface{}{"a": "b"}}, - "response": map[string]interface{}{ - "finished": false, - "headers": map[string]interface{}{"Content-Type": "text/html"}}, - }, - "exception": map[string]interface{}{ - "message": exMsg, - "code": code, - "module": module, - "attributes": exAttrs, - "type": exType, - "handled": handled, - "stacktrace": []interface{}{ - map[string]interface{}{ - "filename": "file", - }, - }, - }, - "log": map[string]interface{}{ - "message": logMsg, - "param_message": paramMsg, - "level": level, "logger_name": logger, - "stacktrace": []interface{}{ - map[string]interface{}{ - "filename": "log file", "lineno": 2.0, - }, - }, - }, - "id": id, - "transaction_id": transactionID, - "parent_id": parentID, - "trace_id": traceID, - "culprit": culprit, - "transaction": map[string]interface{}{"sampled": transactionSampled, "type": transactionType}, - }, - e: &m.Error{ - Metadata: mergedMetadata, - Timestamp: timestampParsed, - Labels: &labels, - Page: &page, - Custom: &custom, - HTTP: &h, - URL: &ctxURL, - Exception: &m.Exception{ - Message: &exMsg, - Code: code, - Type: &exType, - Module: &module, - Attributes: exAttrs, - Handled: &handled, - Stacktrace: m.Stacktrace{ - &m.StacktraceFrame{Filename: tests.StringPtr("file")}, - }, - }, - Log: &m.Log{ - Message: logMsg, - ParamMessage: ¶mMsg, - Level: &level, - LoggerName: &logger, - Stacktrace: m.Stacktrace{ - &m.StacktraceFrame{Filename: tests.StringPtr("log file"), Lineno: &lineno}, - }, - }, - ID: &id, - TransactionID: transactionID, - TransactionSampled: &transactionSampled, - TransactionType: &transactionType, - ParentID: parentID, - TraceID: traceID, - Culprit: &culprit, - }, - }, - } { - t.Run(name, func(t *testing.T) { - input := make(map[string]interface{}) - for k, v := range baseInput { - input[k] = v - } - for k, v := range test.input { - if v == nil { - delete(input, k) - } else { - input[k] = v - } - } - batch := &m.Batch{} - err := DecodeError(Input{ - Raw: input, - RequestTime: requestTime, - Metadata: inputMetadata, - Config: test.cfg, - }, batch) - require.NoError(t, err) - assert.Equal(t, test.e, batch.Errors[0]) - }) - } -} - -func TestErrorEventDecodeInvalid(t *testing.T) { - err := DecodeError(Input{Raw: nil}, &m.Batch{}) - require.EqualError(t, err, "failed to validate error: error validating JSON: input missing") - - err = DecodeError(Input{Raw: ""}, &m.Batch{}) - require.EqualError(t, err, "failed to validate error: error validating JSON: invalid input type") - - // baseInput holds the minimal valid input. Test-specific input is added to this. - baseInput := map[string]interface{}{ - "id": "id", - "exception": map[string]interface{}{ - "message": "message", - }, - } - err = DecodeError(Input{Raw: baseInput}, &m.Batch{}) - require.NoError(t, err) - - for name, test := range map[string]struct { - input map[string]interface{} - e *m.Error - }{ - "error decoding timestamp": { - input: map[string]interface{}{"timestamp": 123}, - }, - "error decoding transaction id": { - input: map[string]interface{}{"transaction_id": 123}, - }, - "parent id given, but no trace id": { - input: map[string]interface{}{"parent_id": "123"}, - }, - "trace id given, but no parent id": { - input: map[string]interface{}{"trace_id": "123"}, - }, - "invalid type for exception stacktrace": { - input: map[string]interface{}{ - "exception": map[string]interface{}{ - "message": "Exception Msg", - "stacktrace": "123", - }, - }, - }, - "invalid type for log stacktrace": { - input: map[string]interface{}{ - "exception": nil, - "log": map[string]interface{}{ - "message": "Log Msg", - "stacktrace": "123", - }, - }, - }, - } { - t.Run(name, func(t *testing.T) { - input := make(map[string]interface{}) - for k, v := range baseInput { - input[k] = v - } - for k, v := range test.input { - if v == nil { - delete(input, k) - } else { - input[k] = v - } - } - err := DecodeError(Input{Raw: input}, &m.Batch{}) - require.Error(t, err) - t.Logf("%s", err) - }) - } -} - -func TestDecodingAnomalies(t *testing.T) { - - t.Run("exception decoder doesn't erase existing errors", func(t *testing.T) { - badID := map[string]interface{}{ - "id": 7.4, - "exception": map[string]interface{}{ - "message": "message0", - "type": "type0", - }, - } - result := &m.Batch{} - err := DecodeError(Input{Raw: badID}, result) - assert.Error(t, err) - assert.Nil(t, result.Errors) - }) - - t.Run("exception decoding error bubbles up", func(t *testing.T) { - badException := map[string]interface{}{ - "id": "id", - "exception": map[string]interface{}{ - "message": "message0", - "type": "type0", - "cause": []interface{}{ - map[string]interface{}{"message": "message1", "type": 7.4}, - }, - }, - } - result := &m.Batch{} - err := DecodeError(Input{Raw: badException}, result) - assert.Error(t, err) - assert.Nil(t, result.Errors) - }) - - t.Run("wrong cause type", func(t *testing.T) { - badException := map[string]interface{}{ - "id": "id", - "exception": map[string]interface{}{ - "message": "message0", - "type": "type0", - "cause": []interface{}{7.4}, - }, - } - err := DecodeError(Input{Raw: badException}, &m.Batch{}) - require.Error(t, err) - assert.Regexp(t, "failed to validate error:(.|\n)*properties/cause/items/type(.|\n)*expected object or null, but got number", err.Error()) - }) - - t.Run("handle nil exceptions", func(t *testing.T) { - emptyCauses := map[string]interface{}{ - "exception": map[string]interface{}{ - "message": "message0", - "type": "type0", - "cause": []interface{}{ - map[string]interface{}{"message": "message1", "type": "type1", "cause": []interface{}{}}, - map[string]interface{}{}, - }, - }, - } - err := DecodeError(Input{Raw: emptyCauses}, &m.Batch{}) - require.Error(t, err) - assert.Regexp(t, "failed to validate error:(.|\n)* missing properties: \"id\"", err.Error()) - }) -} diff --git a/model/modeldecoder/experience.go b/model/modeldecoder/experience.go deleted file mode 100644 index 073554adea5..00000000000 --- a/model/modeldecoder/experience.go +++ /dev/null @@ -1,38 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "github.com/elastic/apm-server/model" -) - -func decodeUserExperience(input map[string]interface{}, out *model.UserExperience) { - if input == nil { - return - } - - if !decodeFloat64(input, "cls", &out.CumulativeLayoutShift) { - out.CumulativeLayoutShift = -1 - } - if !decodeFloat64(input, "fid", &out.FirstInputDelay) { - out.FirstInputDelay = -1 - } - if !decodeFloat64(input, "tbt", &out.TotalBlockingTime) { - out.TotalBlockingTime = -1 - } -} diff --git a/model/modeldecoder/field/rum_v3_mapping.go b/model/modeldecoder/field/rum_v3_mapping.go deleted file mode 100644 index be143c2233f..00000000000 --- a/model/modeldecoder/field/rum_v3_mapping.go +++ /dev/null @@ -1,168 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package field - -var rumV3Mapping = map[string]string{ - "abs_path": "ap", - "action": "ac", - "address": "ad", - "agent": "a", - "attributes": "at", - "breakdown": "b", - "cause": "ca", - "classname": "cn", - "code": "cd", - "colno": "co", - "connectEnd": "ce", - "connectStart": "cs", - "context": "c", - "context_line": "cli", - "culprit": "cl", - "custom": "cu", - "decoded_body_size": "dbs", - "destination": "dt", - "domComplete": "dc", - "domContentLoadedEventEnd": "de", - "domContentLoadedEventStart": "ds", - "domInteractive": "di", - "domLoading": "dl", - "domainLookupEnd": "le", - "domainLookupStart": "ls", - "dropped": "dd", - "duration": "d", - "email": "em", - "encoded_body_size": "ebs", - "env": "en", - "environment": "en", - "error": "e", - "exception": "ex", - "experience": "exp", - "fetchStart": "fs", - "filename": "f", - "firstContentfulPaint": "fp", - "framework": "fw", - "function": "fn", - "handled": "hd", - "headers": "he", - "http": "h", - "http_version": "hve", - "labels": "l", - "language": "la", - "largestContentfulPaint": "lp", - "level": "lv", - "lineno": "li", - "loadEventEnd": "ee", - "loadEventStart": "es", - "log": "log", - "logger_name": "ln", - "marks": "k", - "message": "mg", - "metadata": "m", - "method": "mt", - "metricset": "me", - "module": "mo", - "name": "n", - "navigationTiming": "nt", - "outcome": "o", - "page": "p", - "param_message": "pmg", - "parent_id": "pid", - "parent_idx": "pi", - "port": "po", - "post_context": "poc", - "pre_context": "prc", - "referer": "rf", - "request": "q", - "requestStart": "qs", - "resource": "rc", - "result": "rt", - "response": "r", - "responseEnd": "re", - "responseStart": "rs", - "runtime": "ru", - "sampled": "sm", - "samples": "sa", - "sample_rate": "sr", - "server-timing": "set", - "service": "se", - "span": "y", - "span.self_time.count": "ysc", - "span.self_time.sum.us": "yss", - "span_count": "yc", - "stacktrace": "st", - "start": "s", - "started": "sd", - "status_code": "sc", - "subtype": "su", - "sync": "sy", - "tags": "g", - "timeToFirstByte": "fb", - "trace_id": "tid", - "transaction": "x", - "transaction_id": "xid", - "transaction.breakdown.count": "xbc", - "transaction.duration.count": "xdc", - "transaction.duration.sum.us": "xds", - "transfer_size": "ts", - "type": "t", - "url": "url", - "user": "u", - "username": "un", - "value": "v", - "version": "ve", -} - -var rumV3InverseMapping = make(map[string]string) - -func init() { - for k, v := range rumV3Mapping { - rumV3InverseMapping[v] = k - } -} - -func Mapper(shortFieldNames bool) func(string) string { - if shortFieldNames { - return rumV3Mapper - } - return identityMapper -} - -func InverseMapper(shortFieldNames bool) func(string) string { - if shortFieldNames { - return rumV3InverseMapper - } - return identityMapper -} - -func rumV3Mapper(long string) string { - if short, ok := rumV3Mapping[long]; ok { - return short - } - return long -} - -func rumV3InverseMapper(short string) string { - if long, ok := rumV3InverseMapping[short]; ok { - return long - } - return short -} - -func identityMapper(s string) string { - return s -} diff --git a/model/modeldecoder/generator/bool.go b/model/modeldecoder/generator/bool.go new file mode 100644 index 00000000000..0db47b34086 --- /dev/null +++ b/model/modeldecoder/generator/bool.go @@ -0,0 +1,24 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package generator + +func generateJSONPropertyBool(info *fieldInfo, parent *property, child *property) error { + child.Type.add(TypeNameBool) + parent.Properties[jsonSchemaName(info.field)] = child + return nil +} diff --git a/model/modeldecoder/generator/cmd/main.go b/model/modeldecoder/generator/cmd/main.go index 2068feb12e8..7bd5484b4e8 100644 --- a/model/modeldecoder/generator/cmd/main.go +++ b/model/modeldecoder/generator/cmd/main.go @@ -18,11 +18,13 @@ package main import ( - "bytes" - "go/format" - "os" + "fmt" + "io/ioutil" "path" "path/filepath" + "strings" + + "golang.org/x/tools/imports" "github.com/elastic/apm-server/model/modeldecoder/generator" ) @@ -30,58 +32,75 @@ import ( const ( basePath = "github.com/elastic/apm-server" modeldecoderPath = "model/modeldecoder" + jsonSchemaPath = "docs/spec/" ) var ( importPath = path.Join(basePath, modeldecoderPath) - typPath = path.Join(importPath, "nullable") ) func main() { - genV2Models() - genRUMV3Models() + generateV2() + generateV3RUM() } -func genV2Models() { +func generateV2() { pkg := "v2" - rootObjs := []string{"metadataRoot"} - out := filepath.Join(filepath.FromSlash(modeldecoderPath), pkg, "model_generated.go") - gen, err := generator.NewGenerator(importPath, pkg, typPath, rootObjs) + + p := path.Join(importPath, pkg) + parsed, err := generator.Parse(p) if err != nil { panic(err) } - generate(gen, out) + generateCode(p, pkg, parsed, []string{"metadataRoot", "errorRoot", "metricsetRoot", "spanRoot", "transactionRoot"}) + generateJSONSchema(p, pkg, parsed, []string{"metadata", "errorEvent", "metricset", "span", "transaction"}) } -func genRUMV3Models() { +func generateV3RUM() { pkg := "rumv3" - rootObjs := []string{"metadataRoot"} - out := filepath.Join(filepath.FromSlash(modeldecoderPath), pkg, "model_generated.go") - gen, err := generator.NewGenerator(importPath, pkg, typPath, rootObjs) + p := path.Join(importPath, pkg) + parsed, err := generator.Parse(p) if err != nil { panic(err) } - generate(gen, out) -} - -type gen interface { - Generate() (bytes.Buffer, error) + generateCode(p, pkg, parsed, []string{"metadataRoot", "errorRoot", "transactionRoot"}) + generateJSONSchema(p, pkg, parsed, []string{"metadata", "errorEvent", "span", "transaction"}) } -func generate(g gen, p string) { - b, err := g.Generate() +func generateCode(path string, pkg string, parsed *generator.Parsed, root []string) { + rootTypes := make([]string, len(root)) + for i := 0; i < len(root); i++ { + rootTypes[i] = fmt.Sprintf("%s.%s", path, root[i]) + } + code, err := generator.NewCodeGenerator(parsed, rootTypes) if err != nil { panic(err) } - fmtd, err := format.Source(b.Bytes()) + out := filepath.Join(filepath.FromSlash(modeldecoderPath), pkg, "model_generated.go") + b, err := code.Generate() if err != nil { panic(err) } - f, err := os.Create(p) + formatted, err := imports.Process(out, b.Bytes(), nil) if err != nil { panic(err) } - if _, err := f.Write(fmtd); err != nil { + ioutil.WriteFile(out, formatted, 0644) +} + +func generateJSONSchema(path string, pkg string, parsed *generator.Parsed, root []string) { + jsonSchema, err := generator.NewJSONSchemaGenerator(parsed) + if err != nil { panic(err) } + outPath := filepath.Join(filepath.FromSlash(jsonSchemaPath), pkg) + for _, rootEventName := range root { + rootEvent := fmt.Sprintf("%s.%s", path, rootEventName) + b, err := jsonSchema.Generate(outPath, rootEvent) + if err != nil { + panic(err) + } + out := filepath.Join(outPath, fmt.Sprintf("%s.json", strings.TrimSuffix(rootEventName, "Event"))) + ioutil.WriteFile(out, b.Bytes(), 0644) + } } diff --git a/model/modeldecoder/generator/code.go b/model/modeldecoder/generator/code.go new file mode 100644 index 00000000000..a33763e3f7a --- /dev/null +++ b/model/modeldecoder/generator/code.go @@ -0,0 +1,359 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package generator + +import ( + "bytes" + "fmt" + "go/types" + "io" + "reflect" + "sort" + "strings" + + "github.com/pkg/errors" +) + +const ( + anonymousField = "_" +) + +// CodeGenerator creates following struct methods +// `IsSet() bool` +// `Reset()` +// `validate() error` +// on all exported and anonymous structs that are referenced +// by at least one of the root types +type CodeGenerator struct { + buf bytes.Buffer + parsed *Parsed + rootObjs []structType + + // keep track of already processed types in case one type is + // referenced multiple times + processedTypes map[string]struct{} +} + +type validationGenerator func(io.Writer, []structField, structField, bool) error + +// NewCodeGenerator takes an importPath and the package name for which +// the type definitions should be loaded. +// The nullableTypePath is used to implement validation rules specific to types +// of the nullable package. The generator creates methods only for types referenced +// directly or indirectly by any of the root types. +func NewCodeGenerator(parsed *Parsed, rootTypes []string) (*CodeGenerator, error) { + g := CodeGenerator{ + parsed: parsed, + rootObjs: make([]structType, len(rootTypes)), + processedTypes: make(map[string]struct{}), + } + for i := 0; i < len(rootTypes); i++ { + rootStruct, ok := parsed.structTypes[rootTypes[i]] + if !ok { + return nil, fmt.Errorf("object with root key %s not found", rootTypes[i]) + } + g.rootObjs[i] = rootStruct + } + return &g, nil +} + +// Generate generates the code for given root structs and all +// dependencies and returns it as bytes.Buffer +func (g *CodeGenerator) Generate() (bytes.Buffer, error) { + fmt.Fprintf(&g.buf, ` +// Code generated by "modeldecoder/generator". DO NOT EDIT. + +package %s + +import ( + "fmt" + "encoding/json" + "github.com/pkg/errors" + "regexp" + "unicode/utf8" +) + +var ( +`[1:], g.parsed.pkgName) + for _, name := range sortKeys(g.parsed.patternVariables) { + fmt.Fprintf(&g.buf, ` +%sRegexp = regexp.MustCompile(%s) +`[1:], name, name) + } + fmt.Fprint(&g.buf, ` +) +`[1:]) + + // run generator code + for _, rootObj := range g.rootObjs { + if err := g.generate(rootObj, ""); err != nil { + return g.buf, errors.Wrap(err, "code generator") + } + } + return g.buf, nil +} + +// create flattened field keys by recursively iterating through the struct types; +// there is only struct local knowledge and no knowledge about the parent, +// deriving the absolute key is not possible in scenarios where one struct +// type is referenced as a field in multiple struct types +func (g *CodeGenerator) generate(st structType, key string) error { + if _, ok := g.processedTypes[st.name]; ok { + return nil + } + g.processedTypes[st.name] = struct{}{} + if err := g.generateIsSet(st, key); err != nil { + return err + } + if err := g.generateReset(st, key); err != nil { + return err + } + if err := g.generateValidation(st, key); err != nil { + return err + } + if key != "" { + key += "." + } + for _, field := range st.fields { + var childTyp types.Type + switch fieldTyp := field.Type().Underlying().(type) { + case *types.Map: + childTyp = fieldTyp.Elem() + case *types.Slice: + childTyp = fieldTyp.Elem() + default: + childTyp = field.Type() + } + if child, ok := g.customStruct(childTyp); ok { + if err := g.generate(child, fmt.Sprintf("%s%s", key, jsonName(field))); err != nil { + return err + } + } + } + return nil +} + +// generateIsSet creates `IsSet` methods for struct fields, +// indicating if the fields have been initialized; +// it only considers exported fields, aligned with standard marshal behavior +func (g *CodeGenerator) generateIsSet(structTyp structType, key string) error { + if len(structTyp.fields) == 0 { + return fmt.Errorf("unhandled struct %s (does not have any exported fields)", structTyp.name) + } + fmt.Fprintf(&g.buf, ` +func (val *%s) IsSet() bool { + return`, structTyp.name) + if key != "" { + key += "." + } + prefix := ` ` + for i := 0; i < len(structTyp.fields); i++ { + f := structTyp.fields[i] + if !f.Exported() { + continue + } + g.buf.WriteString(prefix) + if err := generateIsSet(&g.buf, f, "val."); err != nil { + return errors.Wrapf(err, "error generating IsSet() for '%s%s'", key, jsonName(f)) + } + prefix = ` || ` + } + fmt.Fprint(&g.buf, ` +} +`) + return nil +} + +func generateIsSet(w io.Writer, field structField, fieldSelectorPrefix string) error { + switch typ := field.Type().Underlying(); typ.(type) { + case *types.Slice, *types.Map: + fmt.Fprintf(w, "(len(%s%s) > 0)", fieldSelectorPrefix, field.Name()) + return nil + case *types.Struct: + fmt.Fprintf(w, "%s%s.IsSet()", fieldSelectorPrefix, field.Name()) + return nil + default: + return fmt.Errorf("unhandled type %T generating IsSet() for '%s'", typ, jsonName(field)) + } +} + +// generateReset creates `Reset` methods for struct fields setting them to +// their zero values or calling their `Reset` methods +// it only considers exported fields +func (g *CodeGenerator) generateReset(structTyp structType, key string) error { + fmt.Fprintf(&g.buf, ` +func (val *%s) Reset() { +`, structTyp.name) + if key != "" { + key += "." + } + for _, f := range structTyp.fields { + if !f.Exported() { + continue + } + switch t := f.Type().Underlying().(type) { + case *types.Slice: + // the slice len is set to zero, not returning the underlying + // memory to the garbage collector; when the size of slices differs + // this potentially leads to keeping more memory allocated than required; + + // if slice type is a model struct, + // call its Reset() function + if _, ok := g.customStruct(t.Elem()); ok { + fmt.Fprintf(&g.buf, ` +for i := range val.%s{ + val.%s[i].Reset() +} +`[1:], f.Name(), f.Name()) + } + // then reset size of slice to 0 + fmt.Fprintf(&g.buf, ` +val.%s = val.%s[:0] +`[1:], f.Name(), f.Name()) + + case *types.Map: + // the map is cleared, not returning the underlying memory to + // the garbage collector; when map size differs this potentially + // leads to keeping more memory allocated than required + fmt.Fprintf(&g.buf, ` +for k := range val.%s { + delete(val.%s, k) +} +`[1:], f.Name(), f.Name()) + + case *types.Struct: + fmt.Fprintf(&g.buf, ` +val.%s.Reset() +`[1:], f.Name()) + default: + return fmt.Errorf("unhandled type %T for Reset() for '%s%s'", t, key, jsonName(f)) + } + } + fmt.Fprint(&g.buf, ` +} +`[1:]) + return nil +} + +// generateValidation creates `validate` methods for struct fields +// it only considers exported and anonymous fields +func (g *CodeGenerator) generateValidation(structTyp structType, key string) error { + fmt.Fprintf(&g.buf, ` +func (val *%s) validate() error { +`, structTyp.name) + var isRoot bool + for _, rootObjs := range g.rootObjs { + if structTyp.name == rootObjs.name { + isRoot = true + break + } + } + if !isRoot { + fmt.Fprint(&g.buf, ` +if !val.IsSet() { + return nil +} +`[1:]) + } + + var validation validationGenerator + for i := 0; i < len(structTyp.fields); i++ { + f := structTyp.fields[i] + // according to https://golang.org/pkg/go/types/#Var.Anonymous + // f.Anonymous() actually checks if f is embedded, not anonymous, + // so we need to do a name check instead + if !f.Exported() && f.Name() != anonymousField { + continue + } + var custom bool + switch f.Type().String() { + case nullableTypeString: + validation = generateNullableStringValidation + case nullableTypeInt: + validation = generateNullableIntValidation + case nullableTypeFloat64: + // right now we can reuse the validation rules for int + // and only introduce dedicated rules for float64 when they diverge + validation = generateNullableIntValidation + case nullableTypeInterface: + validation = generateNullableInterfaceValidation + default: + switch t := f.Type().Underlying().(type) { + case *types.Slice: + validation = generateSliceValidation + _, custom = g.customStruct(t.Elem()) + case *types.Map: + validation = generateMapValidation + _, custom = g.customStruct(t.Elem()) + case *types.Struct: + validation = generateStructValidation + _, custom = g.customStruct(f.Type()) + default: + return errors.Wrap(fmt.Errorf("unhandled type %T", t), flattenName(key, f)) + } + } + if err := validation(&g.buf, structTyp.fields, f, custom); err != nil { + return errors.Wrap(err, flattenName(key, f)) + } + } + fmt.Fprint(&g.buf, ` +return nil +} +`[1:]) + return nil +} + +func (g *CodeGenerator) customStruct(typ types.Type) (t structType, ok bool) { + t, ok = g.parsed.structTypes[typ.String()] + return +} + +func flattenName(key string, f structField) string { + if key != "" { + key += "." + } + return fmt.Sprintf("%s%s", key, jsonName(f)) +} + +func jsonName(f structField) string { + parts := parseTag(f.tag, "json") + if len(parts) == 0 { + return strings.ToLower(f.Name()) + } + return parts[0] +} + +func parseTag(structTag reflect.StructTag, tagName string) []string { + tag, ok := structTag.Lookup(tagName) + if !ok { + return []string{} + } + if tag == "-" { + return nil + } + return strings.Split(tag, ",") +} + +func sortKeys(input map[string]string) []string { + keys := make(sort.StringSlice, 0, len(input)) + for k := range input { + keys = append(keys, k) + } + keys.Sort() + return keys +} diff --git a/model/modeldecoder/generator/generator.go b/model/modeldecoder/generator/generator.go deleted file mode 100644 index d9f92e3005e..00000000000 --- a/model/modeldecoder/generator/generator.go +++ /dev/null @@ -1,590 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package generator - -import ( - "bytes" - "errors" - "fmt" - "go/ast" - "go/token" - "go/types" - "path" - "path/filepath" - "reflect" - "sort" - "strings" - - "golang.org/x/tools/go/packages" -) - -// Generator creates following struct methods -// `IsSet() bool` -// `Reset()` -// `validate() error` -// on all structs (exported and unexported) that are referenced -// by at least one of the root types -type Generator struct { - buf bytes.Buffer - pkgName string - rootObjs map[string]structType - // parsed structs from loading types from the provided package - structTypes structTypes - // keep track of already processed types in case one type is - // referenced multiple times - processedTypes map[string]struct{} - - nullableString, nullableInt, nullableInterface string -} - -// NewGenerator takes an importPath and the package name for which -// the type definitions should be loaded. -// The typPkg is used to implement validation rules specific to types -// of the package. The generator creates methods only for types referenced -// directly or indirectly by any of the root types. -func NewGenerator(importPath string, pkg string, typPath string, - root []string) (*Generator, error) { - loaded, err := loadPackage(path.Join(importPath, pkg)) - if err != nil { - return nil, err - } - structTypes, err := parseStructTypes(loaded) - if err != nil { - return nil, err - } - g := Generator{ - pkgName: loaded.Types.Name(), - structTypes: structTypes, - rootObjs: make(map[string]structType, len(root)), - processedTypes: make(map[string]struct{}), - nullableString: fmt.Sprintf("%s.String", typPath), - nullableInt: fmt.Sprintf("%s.Int", typPath), - nullableInterface: fmt.Sprintf("%s.Interface", typPath), - } - for _, r := range root { - rootObjPath := fmt.Sprintf("%s.%s", filepath.Join(importPath, pkg), r) - rootObj, ok := structTypes[rootObjPath] - if !ok { - return nil, fmt.Errorf("object with root key %s not found", rootObjPath) - } - g.rootObjs[rootObj.name] = rootObj - } - return &g, nil -} - -// Generate writes generated methods to the buffer -func (g *Generator) Generate() (bytes.Buffer, error) { - fmt.Fprintf(&g.buf, ` -// Code generated by "modeldecoder/generator". DO NOT EDIT. - -package %s - -import ( - "encoding/json" - "fmt" - "unicode/utf8" -) -`[1:], g.pkgName) - - for _, rootObj := range g.rootObjs { - if err := g.generate(rootObj, ""); err != nil { - return g.buf, err - } - } - return g.buf, nil -} - -const ( - ruleRequired = "required" - ruleMax = "max" - ruleMaxVals = "maxVals" - rulePattern = "pattern" - rulePatternKeys = "patternKeys" - ruleTypes = "types" - ruleTypesVals = "typesVals" -) - -type structTypes map[string]structType - -type structType struct { - name string - fields []structField -} -type structField struct { - name string - typ types.Type - tag reflect.StructTag -} - -// create flattened field keys by recursively iterating through the struct types; -// there is only struct local knowledge and no knowledge about the parent, -// deriving the absolute key is not possible in scenarios where one struct -// type is referenced as a field in multiple struct types -func (g *Generator) generate(st structType, key string) error { - if _, ok := g.processedTypes[st.name]; ok { - return nil - } - g.processedTypes[st.name] = struct{}{} - if err := g.generateIsSet(st, key); err != nil { - return err - } - if err := g.generateReset(st, key); err != nil { - return err - } - if err := g.generateValidation(st, key); err != nil { - return err - } - if key != "" { - key += "." - } - for _, f := range st.fields { - if child, ok := g.structTypes[f.typ.String()]; ok { - if err := g.generate(child, fmt.Sprintf("%s%s", key, jsonName(f))); err != nil { - return err - } - } - } - return nil -} - -func (g *Generator) generateIsSet(structTyp structType, key string) error { - fmt.Fprintf(&g.buf, ` -func (m *%s) IsSet() bool { - return`, structTyp.name) - if key != "" { - key += "." - } - for i := 0; i < len(structTyp.fields); i++ { - prefix := ` ||` - if i == 0 { - prefix = `` - } - f := structTyp.fields[i] - - switch t := f.typ.Underlying().(type) { - case *types.Slice, *types.Map: - fmt.Fprintf(&g.buf, `%s len(m.%s) > 0`, prefix, f.name) - case *types.Struct: - fmt.Fprintf(&g.buf, `%s m.%s.IsSet()`, prefix, f.name) - default: - return fmt.Errorf("unhandled type %T for IsSet() for '%s%s'", t, key, jsonName(f)) - } - } - fmt.Fprint(&g.buf, ` -} -`) - return nil -} - -func (g *Generator) generateReset(structTyp structType, key string) error { - fmt.Fprintf(&g.buf, ` -func (m *%s) Reset() { -`, structTyp.name) - if key != "" { - key += "." - } - for _, f := range structTyp.fields { - switch t := f.typ.Underlying().(type) { - case *types.Slice: - // the slice len is set to zero, not returning the underlying - // memory to the garbage collector; when the size of slices differs - // this potentially leads to keeping more memory allocated than required; - // at the moment metadata.process.argv is the only slice - fmt.Fprintf(&g.buf, ` -m.%s = m.%s[:0] -`[1:], f.name, f.name) - case *types.Map: - // the map is cleared, not returning the underlying memory to - // the garbage collector; when map size differs this potentially - // leads to keeping more memory allocated than required - fmt.Fprintf(&g.buf, ` -for k := range m.%s { - delete(m.%s, k) -} -`[1:], f.name, f.name) - case *types.Struct: - fmt.Fprintf(&g.buf, ` -m.%s.Reset() -`[1:], f.name) - default: - return fmt.Errorf("unhandled type %T for Reset() for '%s%s'", t, key, jsonName(f)) - } - } - fmt.Fprint(&g.buf, ` -} -`[1:]) - return nil -} - -func (g *Generator) generateValidation(structTyp structType, key string) error { - fmt.Fprintf(&g.buf, ` -func (m *%s) validate() error { -`, structTyp.name) - if _, ok := g.rootObjs[structTyp.name]; !ok { - fmt.Fprint(&g.buf, ` -if !m.IsSet() { - return nil -} -`[1:]) - } - - if key != "" { - key += "." - } - for i := 0; i < len(structTyp.fields); i++ { - f := structTyp.fields[i] - flattenedName := fmt.Sprintf("%s%s", key, jsonName(f)) - - // if field is a model struct, call its validation function - if _, ok := g.structTypes[f.typ.String()]; ok { - fmt.Fprintf(&g.buf, ` -if err := m.%s.validate(); err != nil{ - return err -} -`[1:], f.name) - } - - parts, err := validationTag(f.tag) - if err != nil { - return fmt.Errorf("'%s': %w", flattenedName, err) - } - // use a sorted slice of tag keys to create tag related - // validation methods in the same output order on every run - var sortedRules = make([]string, 0, len(parts)) - for k := range parts { - sortedRules = append(sortedRules, k) - } - sort.Slice(sortedRules, func(i, j int) bool { - return sortedRules[i] < sortedRules[j] - }) - - switch t := f.typ.Underlying().(type) { - case *types.Slice: - for _, rule := range sortedRules { - switch rule { - case ruleRequired: - fmt.Fprintf(&g.buf, ` -if len(m.%s) == 0{ - return fmt.Errorf("'%s' required") -} -`[1:], f.name, flattenedName) - default: - return fmt.Errorf("unhandled tag rule '%s' for '%s'", rule, flattenedName) - } - } - case *types.Map: - var required bool - if _, ok := parts[ruleRequired]; ok { - required = true - delete(parts, ruleRequired) - fmt.Fprintf(&g.buf, ` -if len(m.%s) == 0{ - return fmt.Errorf("'%s' required") -} -`[1:], f.name, flattenedName) - } - if len(parts) == 0 { - continue - } - // iterate over map once and run checks - fmt.Fprintf(&g.buf, ` -for k,v := range m.%s{ -`[1:], f.name) - if regex, ok := parts[rulePatternKeys]; ok { - delete(parts, rulePatternKeys) - fmt.Fprintf(&g.buf, ` -if !%s.MatchString(k){ - return fmt.Errorf("validation rule '%s(%s)' violated for '%s'") -} -`[1:], regex, rulePatternKeys, regex, flattenedName) - } - if types, ok := parts[ruleTypesVals]; ok { - delete(parts, ruleTypesVals) - fmt.Fprintf(&g.buf, ` -switch t := v.(type){ -`[1:]) - if !required { - fmt.Fprintf(&g.buf, ` -case nil: -`[1:]) - } - for _, typ := range strings.Split(types, ";") { - if typ == "number" { - typ = "json.Number" - } - fmt.Fprintf(&g.buf, ` -case %s: -`[1:], typ) - if typ == "string" { - if maxVal, ok := parts[ruleMaxVals]; ok { - delete(parts, ruleMaxVals) - fmt.Fprintf(&g.buf, ` -if utf8.RuneCountInString(t) > %s{ - return fmt.Errorf("validation rule '%s(%s)' violated for '%s'") -} -`[1:], maxVal, ruleMaxVals, maxVal, flattenedName) - } - } - } - fmt.Fprintf(&g.buf, ` -default: - return fmt.Errorf("validation rule '%s(%s)' violated for '%s' for key %%s",k) -} -`[1:], ruleTypesVals, types, flattenedName) - } - // close iteration over map - fmt.Fprintf(&g.buf, ` -} -`[1:]) - if len(parts) > 0 { - return fmt.Errorf("unhandled tag rule(s) '%v' for '%s'", parts, flattenedName) - } - case *types.Struct: - switch f.typ.String() { - //TODO(simitt): can these type checks be more generic? - case g.nullableString: - for _, rule := range sortedRules { - val := parts[rule] - switch rule { - case ruleRequired: - fmt.Fprintf(&g.buf, ` -if !m.%s.IsSet() { - return fmt.Errorf("'%s' required") -} -`[1:], f.name, flattenedName) - case ruleMax: - fmt.Fprintf(&g.buf, ` -if utf8.RuneCountInString(m.%s.Val) > %s{ -return fmt.Errorf("validation rule '%s(%s)' violated for '%s'") -} -`[1:], f.name, val, rule, val, flattenedName) - case rulePattern: - fmt.Fprintf(&g.buf, ` -if !%s.MatchString(m.%s.Val){ - return fmt.Errorf("validation rule '%s(%s)' violated for '%s'") -} -`[1:], val, f.name, rule, val, flattenedName) - default: - return fmt.Errorf("unhandled tag rule '%s' for '%s'", rule, flattenedName) - } - } - case g.nullableInt: - for _, rule := range sortedRules { - val := parts[rule] - switch rule { - case ruleRequired: - fmt.Fprintf(&g.buf, ` -if !m.%s.IsSet() { - return fmt.Errorf("'%s' required") -} -`[1:], f.name, flattenedName) - case ruleMax: - fmt.Fprintf(&g.buf, ` -if m.%s.Val > %s{ - return fmt.Errorf("validation rule '%s(%s)' violated for '%s'") -} -`[1:], f.name, val, rule, val, flattenedName) - default: - return fmt.Errorf("unhandled tag rule '%s' for '%s'", rule, flattenedName) - } - } - case g.nullableInterface: - var required bool - if _, ok := parts[ruleRequired]; ok { - required = true - } - for _, rule := range sortedRules { - val := parts[rule] - switch rule { - case ruleRequired: - fmt.Fprintf(&g.buf, ` -if !m.%s.IsSet() { - return fmt.Errorf("'%s' required") -} -`[1:], f.name, flattenedName) - case ruleMax: - //handled in switch statement for string types - case ruleTypes: - fmt.Fprintf(&g.buf, ` -switch t := m.%s.Val.(type){ -`[1:], f.name) - for _, typ := range strings.Split(val, ";") { - if typ == "int" { - fmt.Fprintf(&g.buf, ` -case json.Number: -`[1:]) - fmt.Fprintf(&g.buf, ` -if _, err := t.Int64(); err != nil{ - return fmt.Errorf("validation rule '%s(%s)' violated for '%s'") -} -`[1:], rule, val, flattenedName) - } - fmt.Fprintf(&g.buf, ` -case %s: -`[1:], typ) - if typ == "string" { - if max, ok := parts[ruleMax]; ok { - fmt.Fprintf(&g.buf, ` -if utf8.RuneCountInString(t) > %s{ - return fmt.Errorf("validation rule '%s(%s)' violated for '%s'") -} -`[1:], max, ruleMax, max, flattenedName) - } - } - } - if !required { - fmt.Fprintf(&g.buf, ` -case nil: -`[1:]) - } - fmt.Fprintf(&g.buf, ` -default: - return fmt.Errorf("validation rule '%s(%s)' violated for '%s'") -} -`[1:], rule, val, flattenedName) - default: - return fmt.Errorf("unhandled tag rule '%s' for '%s'", rule, flattenedName) - } - } - default: - for _, rule := range sortedRules { - switch rule { - case ruleRequired: - fmt.Fprintf(&g.buf, ` -if !m.%s.IsSet(){ - return fmt.Errorf("'%s' required") -} -`[1:], f.name, flattenedName) - default: - return fmt.Errorf("unhandled tag rule '%s' for '%s'", rule, flattenedName) - } - } - } - default: - return fmt.Errorf("unhandled type %T for '%s'", t, flattenedName) - } - } - fmt.Fprint(&g.buf, ` - return nil -} -`[1:]) - return nil -} - -func jsonName(f structField) string { - parts := parseTag(f.tag, "json") - if len(parts) == 0 { - return strings.ToLower(f.name) - } - return parts[0] -} - -func loadPackage(pkg string) (*packages.Package, error) { - cfg := packages.Config{ - Mode: packages.NeedTypes | packages.NeedSyntax | packages.NeedTypesInfo} - pkgs, err := packages.Load(&cfg, pkg) - if err != nil { - return nil, err - } - if packages.PrintErrors(pkgs) > 0 { - return nil, errors.New("packages load error") - } - return pkgs[0], nil -} - -func parseStructTypes(pkg *packages.Package) (structTypes, error) { - structs := make(structTypes) - for _, syntax := range pkg.Syntax { - for _, decl := range syntax.Decls { - genDecl, ok := decl.(*ast.GenDecl) - if !ok || genDecl.Tok != token.TYPE { - continue - } - for _, spec := range genDecl.Specs { - typeSpec, ok := spec.(*ast.TypeSpec) - if !ok { - continue - } - obj := pkg.TypesInfo.Defs[typeSpec.Name] - if obj == nil { - continue - } - named := obj.(*types.TypeName).Type().(*types.Named) - typesStruct, ok := named.Underlying().(*types.Struct) - if !ok { - return nil, fmt.Errorf("unhandled type %T", named.Underlying()) - } - numFields := typesStruct.NumFields() - structFields := make([]structField, 0, numFields) - for i := 0; i < numFields; i++ { - f := typesStruct.Field(i) - if !f.Exported() { - continue - } - structFields = append(structFields, structField{ - name: f.Name(), - typ: f.Type(), - tag: reflect.StructTag(typesStruct.Tag(i)), - }) - } - structs[obj.Type().String()] = structType{name: obj.Name(), fields: structFields} - } - } - } - return structs, nil -} - -func parseTag(structTag reflect.StructTag, tagName string) []string { - tag, ok := structTag.Lookup(tagName) - if !ok || tag == "-" { - return nil - } - return strings.Split(tag, ",") -} - -func validationTag(structTag reflect.StructTag) (map[string]string, error) { - parts := parseTag(structTag, "validate") - m := make(map[string]string, len(parts)) - for _, rule := range parts { - parts := strings.Split(rule, "=") - switch len(parts) { - case 1: - // valueless rule e.g. required - if rule != parts[0] { - return nil, fmt.Errorf("malformed tag '%s'", rule) - } - switch rule { - case ruleRequired: - m[rule] = "" - default: - return nil, fmt.Errorf("unhandled tag rule '%s'", rule) - } - case 2: - // rule=value - m[parts[0]] = parts[1] - switch parts[0] { - case ruleMax, ruleMaxVals, rulePattern, rulePatternKeys, ruleTypes, ruleTypesVals: - default: - return nil, fmt.Errorf("unhandled tag rule '%s'", parts[0]) - } - default: - return nil, fmt.Errorf("malformed tag '%s'", rule) - } - } - return m, nil -} diff --git a/model/modeldecoder/generator/generatortest/model.go b/model/modeldecoder/generator/generatortest/model.go new file mode 100644 index 00000000000..3651309fcb9 --- /dev/null +++ b/model/modeldecoder/generator/generatortest/model.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package generatortest + +//lint:file-ignore U1000 Ignore all unused code, it's used for json schema code generation in tests + +import ( + "github.com/elastic/apm-server/model/modeldecoder/nullable" + "github.com/elastic/beats/v7/libbeat/common" +) + +var ( + patternA = `^[^."]*$` + patternB = `^[ab]+$` + patternC = `^[c]+$` + enumA = []string{"open", "closed"} +) + +type String struct { + Required nullable.String `json:"required" validate:"required,enum=enumA"` + NullableEnum nullable.String `json:"nullable_enum" validate:"enum=enumA"` + Nullable nullable.String `json:"nullable" validate:"maxLength=5,minLength=2,pattern=patternA"` +} + +type Number struct { + Required nullable.Int `json:"required" validate:"required,max=250,min=1"` + Nullable nullable.Float64 `json:"nullable" validate:"max=15.9,min=0.5"` +} + +type Bool struct { + Required nullable.Bool `json:"required" validate:"required"` + Nullable nullable.Bool `json:"nullable"` +} + +type HTTPHeader struct { + Required nullable.HTTPHeader `json:"required" validate:"required"` + Nullable nullable.HTTPHeader `json:"nullable"` +} + +type Interface struct { + Required nullable.Interface `json:"required" validate:"required,inputTypes=string;int;float64;bool;object,maxLength=5,minLength=2,pattern=patternA,max=250,min=1.5"` + Nullable nullable.Interface `json:"nullable" validate:"inputTypes=string,enum=enumA"` +} + +type Map struct { + Required common.MapStr `json:"required" validate:"required,inputTypesVals=string;bool;number,maxLengthVals=5,patternKeys=patternB"` + Nullable common.MapStr `json:"nullable"` + Nested map[string]NestedMap `json:"nested_a" validate:"patternKeys=patternB"` + StructMap NestedStruct `json:"nested_b"` +} + +type NestedMap struct { + Required nullable.Float64 `json:"required" validate:"required"` +} + +type NestedStruct struct { + A map[string]NestedStructMap `json:"-" validate:"patternKeys=patternB"` +} + +type NestedStructMap struct { + B map[string]string `json:"-" validate:"patternKeys=patternC"` +} + +type Slice struct { + Strings []string `json:"required" validate:"required,maxLength=3,minLength=2"` + Nullable []string `json:"nullable" validate:"pattern=patternB"` + Children []SliceA `json:"children"` +} + +type SliceA struct { + Number nullable.Float64 `json:"number"` + Slices []SliceA `json:"children"` +} + +type RequiredIfAny struct { + A nullable.String `json:"a" validate:"requiredIfAny=b;c"` + B nullable.String `json:"b"` + C nullable.String `json:"c" validate:"requiredIfAny=b"` + D nullable.String `json:"d" validate:"required"` +} + +type RequiredAnyOf struct { + A nullable.Int + B nullable.Int + _ struct{} `validate:"requiredAnyOf=a;b"` +} + +type Exported struct { + a nullable.Int + _ nullable.Int + B nullable.Int +} diff --git a/model/modeldecoder/generator/httpheader.go b/model/modeldecoder/generator/httpheader.go new file mode 100644 index 00000000000..555c40330e9 --- /dev/null +++ b/model/modeldecoder/generator/httpheader.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package generator + +func generateJSONPropertyHTTPHeader(info *fieldInfo, parent *property, child *property) error { + child.Type.add(TypeNameObject) + child.PatternProperties = make(map[string]*property) + child.PatternProperties[patternHTTPHeaders] = &property{ + Type: &propertyType{names: []propertyTypeName{TypeNameArray, TypeNameString}}, + Items: &property{Type: &propertyType{names: []propertyTypeName{TypeNameString}, required: true}}} + child.AdditionalProperties = new(bool) + parent.Properties[jsonSchemaName(info.field)] = child + return nil +} diff --git a/model/modeldecoder/generator/integer.go b/model/modeldecoder/generator/integer.go new file mode 100644 index 00000000000..bb4e1f5ef91 --- /dev/null +++ b/model/modeldecoder/generator/integer.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package generator + +import ( + "encoding/json" +) + +func generateJSONPropertyInteger(info *fieldInfo, parent *property, child *property) error { + child.Type.add(TypeNameInteger) + parent.Properties[jsonSchemaName(info.field)] = child + return setPropertyRulesInteger(info, child) +} + +func setPropertyRulesInteger(info *fieldInfo, p *property) error { + for tagName, tagValue := range info.tags { + switch tagName { + case tagMax: + p.Max = json.Number(tagValue) + delete(info.tags, tagName) + case tagMin: + p.Min = json.Number(tagValue) + delete(info.tags, tagName) + } + } + return nil +} diff --git a/model/modeldecoder/generator/interface.go b/model/modeldecoder/generator/interface.go new file mode 100644 index 00000000000..9386720df43 --- /dev/null +++ b/model/modeldecoder/generator/interface.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package generator + +import ( + "fmt" +) + +func generateJSONPropertyInterface(info *fieldInfo, parent *property, child *property) error { + name := jsonSchemaName(info.field) + inputTypes, ok := info.tags[tagInputTypes] + if ok { + propTypes, err := propertyTypesFromTag(tagInputTypes, inputTypes) + if err != nil { + return err + } + for _, t := range propTypes { + switch t { + case TypeNameBool: + if err := generateJSONPropertyBool(info, parent, child); err != nil { + return err + } + case TypeNameString: + if _, ok := info.tags[tagEnum]; ok && len(propTypes) > 1 { + return fmt.Errorf("validation tag %s not allowed when multiple values defined for tag %s", tagEnum, tagInputTypes) + } + if err := generateJSONPropertyString(info, parent, child); err != nil { + return err + } + case TypeNameInteger: + if err := generateJSONPropertyInteger(info, parent, child); err != nil { + return err + } + case TypeNameNumber: + if err := generateJSONPropertyJSONNumber(info, parent, child); err != nil { + return err + } + case TypeNameObject: + child.Type.add(TypeNameObject) + default: + return fmt.Errorf("unhandled value %s for tag %s", t, tagInputTypes) + } + } + child.Type.names = propTypes + delete(info.tags, tagInputTypes) + } else { + // no type is specified for interface therefore all input types are allowed + // set type to nil if property is not required, otherwise only reset type names + if !child.Type.required { + child.Type = nil + } else { + child.Type.names = nil + } + } + // NOTE(simitt): targetTypes have never been reflected on schema + delete(info.tags, tagTargetType) + parent.Properties[name] = child + return nil +} diff --git a/model/modeldecoder/generator/jsonnumber.go b/model/modeldecoder/generator/jsonnumber.go new file mode 100644 index 00000000000..ddc883dd572 --- /dev/null +++ b/model/modeldecoder/generator/jsonnumber.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package generator + +import "encoding/json" + +func generateJSONPropertyJSONNumber(info *fieldInfo, parent *property, child *property) error { + child.Type.add(TypeNameNumber) + parent.Properties[jsonSchemaName(info.field)] = child + return setPropertyRulesInteger(info, child) +} + +func setPropertyRulesNumber(info *fieldInfo, p *property) error { + for tagName, tagValue := range info.tags { + switch tagName { + case tagMax: + p.Max = json.Number(tagValue) + delete(info.tags, tagName) + case tagMin: + p.Min = json.Number(tagValue) + delete(info.tags, tagName) + } + } + return nil +} diff --git a/model/modeldecoder/generator/jsonschema.go b/model/modeldecoder/generator/jsonschema.go new file mode 100644 index 00000000000..8b3b722f54b --- /dev/null +++ b/model/modeldecoder/generator/jsonschema.go @@ -0,0 +1,353 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package generator + +import ( + "bytes" + "encoding/json" + "fmt" + "go/types" + "path/filepath" + "strings" + + "github.com/pkg/errors" +) + +// JSONSchemaGenerator holds the parsed package information +// from which it can generate a JSON schema +type JSONSchemaGenerator struct { + parsed *Parsed +} + +// NewJSONSchemaGenerator takes a parsed package information as +// input parameter and returns a JSONSchemaGenerator instance +func NewJSONSchemaGenerator(parsed *Parsed) (*JSONSchemaGenerator, error) { + return &JSONSchemaGenerator{parsed: parsed}, nil +} + +// Generate creates a JSON schema for the given root type, +// based on the parsed information from the JSONSchemaGenerator. +// The schema is returned as bytes.Buffer +func (g *JSONSchemaGenerator) Generate(idPath string, rootType string) (bytes.Buffer, error) { + root, ok := g.parsed.structTypes[rootType] + if !ok { + return bytes.Buffer{}, fmt.Errorf("object with root key %s not found", rootType) + } + typ := propertyType{names: []propertyTypeName{TypeNameObject}, required: true} + property := property{Type: &typ, Properties: make(map[string]*property), Description: root.comment} + if err := g.generate(root, "", &property); err != nil { + return bytes.Buffer{}, errors.Wrap(err, "json-schema generator") + } + id := filepath.Join(idPath, strings.TrimSuffix(root.name, "Event")) + b, err := json.MarshalIndent(schema{ID: id, property: property}, "", " ") + return *bytes.NewBuffer(b), errors.Wrap(err, "json-schema generator") +} + +func (g *JSONSchemaGenerator) generate(st structType, key string, prop *property) error { + if key != "" { + key += "." + } + for _, f := range st.fields { + var err error + name := jsonSchemaName(f) + childProp := property{Properties: make(map[string]*property), Type: &propertyType{}, Description: f.comment} + tags, err := validationTag(f.tag) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("%s%s", key, name)) + } + // handle generic tag rules applicable to all types + if _, ok := tags[tagRequired]; ok { + childProp.Type.required = true + prop.Required = append(prop.Required, name) + delete(tags, tagRequired) + } + if val, ok := tags[tagRequiredIfAny]; ok { + // add all property names as entries to the property's AllOf collection + // after processing all child fields iterate through the AllOf collection + // and ensure the types for the properties in the If and Then clauses do not allow null values + // this can only be done after all fields have been processed and their types are known + for _, ifGiven := range strings.Split(val, ";") { + prop.AllOf = append(prop.AllOf, + &property{ + If: &property{Required: []string{ifGiven}}, + Then: &property{Required: []string{name}}}) + } + delete(tags, tagRequiredIfAny) + } + if val, ok := tags[tagRequiredAnyOf]; ok { + // add all property names as entries to the property's AnyOf collection + // after processing all child fields iterate through the AnyOf collection + // and ensure the types for the properties do not allow null values + // this can only be done after all fields have been processed and their types are known + for _, anyOf := range strings.Split(val, ";") { + prop.AnyOf = append(prop.AnyOf, &property{Required: []string{anyOf}}) + } + } + + if !f.Exported() { + continue + } + flattenedName := fmt.Sprintf("%s%s", key, name) + + info := fieldInfo{field: f, tags: tags, parsed: g.parsed} + switch f.Type().String() { + case nullableTypeBool: + err = generateJSONPropertyBool(&info, prop, &childProp) + case nullableTypeFloat64: + err = generateJSONPropertyJSONNumber(&info, prop, &childProp) + case nullableTypeHTTPHeader: + err = generateJSONPropertyHTTPHeader(&info, prop, &childProp) + case nullableTypeInt, nullableTypeTimeMicrosUnix: + err = generateJSONPropertyInteger(&info, prop, &childProp) + case nullableTypeInterface: + err = generateJSONPropertyInterface(&info, prop, &childProp) + case nullableTypeString: + err = generateJSONPropertyString(&info, prop, &childProp) + default: + switch t := f.Type().Underlying().(type) { + case *types.Map: + nestedProp := property{Properties: make(map[string]*property)} + if err = generateJSONPropertyMap(&info, prop, &childProp, &nestedProp); err != nil { + break + } + if childStruct, ok := g.customStruct(t.Elem()); ok { + err = g.generate(childStruct, flattenedName, &nestedProp) + } + case *types.Slice: + if err = generateJSONPropertySlice(&info, prop, &childProp); err != nil { + break + } + child, ok := g.parsed.structTypes[t.Elem().String()] + if !ok { + break + } + childProp.Items = &property{ + Type: &propertyType{names: []propertyTypeName{TypeNameObject}, required: true}, + Properties: make(map[string]*property), + } + if child.name == st.name { + // if recursive reference to struct itself do not call generate function + break + } + err = g.generate(child, flattenedName, childProp.Items) + case *types.Struct: + if err = generateJSONPropertyStruct(&info, prop, &childProp); err != nil { + break + } + // all non-parsed struct types should have been handled at this point + child, ok := g.parsed.structTypes[f.Type().String()] + if !ok { + err = fmt.Errorf("unhandled type for field %s", name) + break + } + err = g.generate(child, flattenedName, &childProp) + default: + err = fmt.Errorf("unhandled type %T", t) + } + } + if err != nil { + return errors.Wrap(err, flattenedName) + } + for tagName := range tags { + // not all tags have been handled + return errors.Wrap(fmt.Errorf("unhandled tag rule %s", tagName), jsonSchemaName(f)) + } + } + + // iterate through AnyOf and ensure that at least one value is required + for i := 0; i < len(prop.AnyOf); i++ { + prop.AnyOf[i].Properties = make(map[string]*property) + for _, required := range prop.AnyOf[i].Required { + p, ok := prop.Properties[required] + if !ok { + return errors.Wrap(fmt.Errorf("unhandled property %s in %s tag", required, tagRequiredAnyOf), key) + } + prop.AnyOf[i].Properties[required] = &property{Type: &propertyType{required: true, names: p.Type.names}} + } + } + for i := 0; i < len(prop.AllOf); i++ { + pAllOf := prop.AllOf[i] + // set type to required for If branch + prop.AllOf[i].If.Properties = make(map[string]*property) + for _, required := range pAllOf.If.Required { + p, ok := prop.Properties[required] + if !ok { + return errors.Wrap(fmt.Errorf("unhandled property %s in %s tag", required, tagRequiredIfAny), key) + } + prop.AllOf[i].If.Properties[required] = &property{Type: &propertyType{required: true, names: p.Type.names}} + } + // set type to required for Then branch + prop.AllOf[i].Then.Properties = make(map[string]*property) + for _, required := range pAllOf.Then.Required { + p, ok := prop.Properties[required] + if !ok { + return errors.Wrap(fmt.Errorf("unhandled property %s in %s tag", required, tagRequiredIfAny), key) + } + prop.AllOf[i].Then.Properties[required] = &property{Type: &propertyType{required: true, names: p.Type.names}} + } + } + return nil +} + +var ( + patternHTTPHeaders = "[.*]*$" + + propertyTypes = map[string]propertyTypeName{ + nullableTypeBool: TypeNameBool, + "bool": TypeNameBool, + nullableTypeFloat64: TypeNameNumber, + "number": TypeNameNumber, + "float64": TypeNameNumber, + nullableTypeInt: TypeNameInteger, + "int": TypeNameInteger, + "int64": TypeNameInteger, + nullableTypeTimeMicrosUnix: TypeNameInteger, + nullableTypeString: TypeNameString, + "string": TypeNameString, + nullableTypeHTTPHeader: TypeNameObject, + "object": TypeNameObject, + } +) + +type fieldInfo struct { + field structField + tags map[string]string + parsed *Parsed +} + +type schema struct { + ID string `json:"$id,omitempty"` + property +} + +type property struct { + Description string `json:"description,omitempty"` + Type *propertyType `json:"type,omitempty"` + // AdditionalProperties should default to `true` and be set to `false` + // in case PatternProperties are set + AdditionalProperties interface{} `json:"additionalProperties,omitempty"` + PatternProperties map[string]*property `json:"patternProperties,omitempty"` + Properties map[string]*property `json:"properties,omitempty"` + Items *property `json:"items,omitempty"` + Required []string `json:"required,omitempty"` + Enum []*string `json:"enum,omitempty"` + Max json.Number `json:"maximum,omitempty"` + MaxLength json.Number `json:"maxLength,omitempty"` + Min json.Number `json:"minimum,omitempty"` + MinItems *int `json:"minItems,omitempty"` + MinLength json.Number `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + AllOf []*property `json:"allOf,omitempty"` + AnyOf []*property `json:"anyOf,omitempty"` + If *property `json:"if,omitempty"` + Then *property `json:"then,omitempty"` +} + +type propertyType struct { + names []propertyTypeName + required bool +} + +func (t *propertyType) MarshalJSON() ([]byte, error) { + buffer := bytes.NewBufferString("") + if len(t.names) == 0 && !t.required { + buffer.WriteString(`""`) + return buffer.Bytes(), nil + } + multipleTypes := !t.required || len(t.names) > 1 + if multipleTypes { + buffer.WriteString(`[`) + } + if !t.required { + buffer.WriteString(`"null",`) + } + for i := 0; i < len(t.names); i++ { + if i > 0 { + buffer.WriteString(`,`) + } + buffer.WriteString(`"`) + buffer.WriteString(t.names[i].String()) + buffer.WriteString(`"`) + } + if multipleTypes { + buffer.WriteString(`]`) + } + return buffer.Bytes(), nil +} + +func (t *propertyType) add(name propertyTypeName) { + if t.contains(name) { + return + } + t.names = append(t.names, name) +} + +func (t *propertyType) contains(name propertyTypeName) bool { + for _, n := range t.names { + if n == name { + return true + } + } + return false +} + +type propertyTypeName uint8 + +//go:generate stringer -linecomment -type propertyTypeName +const ( + //TypeNameObject represents an object + TypeNameObject propertyTypeName = iota //object + //TypeNameString represents an string + TypeNameString //string + //TypeNameBool represents an boolean + TypeNameBool //boolean + //TypeNameInteger represents an integer + TypeNameInteger //integer + //TypeNameNumber represents a number (float or integer) + TypeNameNumber //number + //TypeNameArray represents and object + TypeNameArray //array +) + +func propertyTypesFromTag(tagName string, tagValue string) ([]propertyTypeName, error) { + names := strings.Split(tagValue, ";") + types := make([]propertyTypeName, len(names)) + var ok bool + for i := 0; i < len(names); i++ { + if types[i], ok = propertyTypes[names[i]]; !ok { + return nil, fmt.Errorf("unhandled value %s for tag %s", names[i], tagName) + } + } + return types, nil +} + +func (g *JSONSchemaGenerator) customStruct(typ types.Type) (t structType, ok bool) { + t, ok = g.parsed.structTypes[typ.String()] + return +} + +func jsonSchemaName(f structField) string { + parts := parseTag(f.tag, "json") + if parts == nil { + return "" + } + if len(parts) == 0 { + return strings.ToLower(f.Name()) + } + return parts[0] +} diff --git a/model/modeldecoder/generator/jsonschema_test.go b/model/modeldecoder/generator/jsonschema_test.go new file mode 100644 index 00000000000..d35a1801c67 --- /dev/null +++ b/model/modeldecoder/generator/jsonschema_test.go @@ -0,0 +1,248 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package generator + +import ( + "fmt" + "path" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/xeipuuv/gojsonschema" + + "github.com/elastic/apm-server/model/modeldecoder/generator/generatortest" +) + +type testcase struct { + n string + data string +} + +func nameOf(i interface{}) string { + return reflect.TypeOf(i).Name() +} + +func TestSchemaString(t *testing.T) { + schema := generateJSONSchema(t, nameOf(generatortest.String{})) + assertValid(t, schema, []testcase{ + {n: "enum", data: `{"required":"closed"}`}, + {n: "minLength", data: `{"required":"open","nullable":"12"}`}, + {n: "maxLength", data: `{"required":"open","nullable":"12345"}`}, + {n: "nullable", data: `{"required":"closed","nullable":null}`}, + {n: "additional-props", data: `{"required":"closed","abc":"**"}`}, + {n: "nullable-enum", data: `{"required":"closed","nullable_enum":"closed"}`}, + {n: "nullable-enum-null", data: `{"required":"closed","nullable_enum":null}`}}) + assertInvalid(t, schema, []testcase{ + {n: "enum", data: `{"required":"***"}`}, + {n: "minLength", data: `{"required":"open","nullable":"1"}`}, + {n: "maxLength", data: `{"required":"open","nullable":"123456"}`}, + {n: "pattern", data: `{"required":"open","nullable":"*.*"}`}, + {n: "required", data: `{"nullable":"***"}`}, + {n: "required-null", data: `{"required":null}`}, + {n: "type", data: `{"required":123}`}, + }) +} + +func TestSchemaNumbers(t *testing.T) { + schema := generateJSONSchema(t, nameOf(generatortest.Number{})) + assertValid(t, schema, []testcase{ + {n: "int-min", data: `{"required":1}`}, + {n: "int-max", data: `{"required":250}`}, + {n: "float-min", data: `{"required":1,"nullable":0.5}`}, + {n: "float-max", data: `{"required":1,"nullable":15.9}`}, + {n: "nullable", data: `{"required":1,"nullable":null}`}, + {n: "additional-props", data: `{"required":1,"abc":123}`}}) + assertInvalid(t, schema, []testcase{ + {n: "violation-int-min", data: `{"required":0}`}, + {n: "violation-int-max", data: `{"required":251}`}, + {n: "violation-float-min", data: `{"required":1,"nullable":0.45}`}, + {n: "violation-float-max", data: `{"required":1,"nullable":15.91}`}, + {n: "violation-required", data: `{"abc":123}`}, + {n: "violation-required-null", data: `{"required":null}`}, + {n: "violation-type", data: `{"required":"123"}`}}) +} + +func TestSchemaBool(t *testing.T) { + schema := generateJSONSchema(t, nameOf(generatortest.Bool{})) + assertValid(t, schema, []testcase{ + {n: "valid", data: `{"required":true}`}, + {n: "nullable", data: `{"required":false,"nullable":null}`}, + {n: "additional-props", data: `{"required":true,"abc":123}`}}) + assertInvalid(t, schema, []testcase{ + {n: "violation-required", data: `{"nullable":true}`}, + {n: "violation-required-null", data: `{"required":null}`}, + {n: "violation-type", data: `{"required":123}`}}) +} + +func TestSchemaHTTPHeader(t *testing.T) { + schema := generateJSONSchema(t, nameOf(generatortest.HTTPHeader{})) + assertValid(t, schema, []testcase{ + {n: "http-header", data: `{"required":{"a":"v","b":["v1","v2"],"a":["v2"]}}`}, + {n: "nullable", data: `{"required":{"a":"v"},"nullable":null}`}, + {n: "additional-props", data: `{"required":{"a":"v"},"abc":123}`}}) + assertInvalid(t, schema, []testcase{ + {n: "violation-required", data: `{"nullable":{"a":"v"}}`}, + {n: "violation-required-null", data: `{"required":null}`}, + {n: "violation-type-int", data: `{"required":123}`}, + {n: "violation-type", data: `{"required":{"a":{"b":"v"}}}`}}) +} +func TestSchemaInterface(t *testing.T) { + schema := generateJSONSchema(t, nameOf(generatortest.Interface{})) + assertValid(t, schema, []testcase{ + {n: "str-minLength", data: `{"required":"12"}`}, + {n: "str-maxLength", data: `{"required":"12345"}`}, + {n: "int-min", data: `{"required":2}`}, + {n: "int-max", data: `{"required":250}`}, + {n: "float-min", data: `{"required":1.5}`}, + {n: "float-max", data: `{"required":250.0}`}, + {n: "bool", data: `{"required":false}`}, + {n: "object", data: `{"required":{"a":{"b":123}}}`}, + {n: "str-enum", data: `{"required":"12","nullable":"closed"}`}, + {n: "nullable", data: `{"required":2,"nullable":null}`}, + {n: "additional-props", data: `{"required":"ab","abc":"**"}`}}) + assertInvalid(t, schema, []testcase{ + {n: "violation-str-minLength", data: `{"required":"1"}`}, + {n: "violation-str-maxLength", data: `{"required":"123456"}`}, + {n: "violation-str-pattern", data: `{"required":"12."}`}, + {n: "violation-int-min", data: `{"required":0}`}, + {n: "violation-int-max", data: `{"required":251}`}, + {n: "violation-float-min", data: `{"required":0.45}`}, + {n: "violation-float-max", data: `{"required":250.01}`}, + {n: "violation-type", data: `{"required":["a"]}`}, + {n: "violation-str-enum", data: `{"required":"12","nullable":"***"}`}, + {n: "violation-type", data: `{"required":2,"nullable":{"a":"b"}}`}, + {n: "violation-required", data: `{"nullable":"abc"}`}, + {n: "violation-required-null", data: `{"required":null}`}}) +} + +func TestSchemaMap(t *testing.T) { + schema := generateJSONSchema(t, nameOf(generatortest.Map{})) + assertValid(t, schema, []testcase{ + {n: "required", data: `{"required":{"aab":"abcde"}}`}, + {n: "all-types", data: `{"required":{"aab":1,"abab":"abcde","bbb":true}}`}, + {n: "additional-props", data: `{"required":{"aab":1},"additional":100}`}, + {n: "nested-a", data: `{"required":{"aab":"abcde"},"nested_a":{"aaa":{"required":10.9}}}`}, + {n: "nested-a-nullable", data: `{"required":{"aab":"abcde"},"nested_a":null}`}, + {n: "nested-b", data: `{"required":{"aab":"abcde"},"nested_b":{"ab":{"c":"v1","cc":"v2"}}}`}, + {n: "nested-b-additional-props", data: `{"abc":123,"required":{"aab":"abcde"},"nested_b":{"ab":{"c":"v1","cc":"v2"}}}`}}) + assertInvalid(t, schema, []testcase{ + {n: "violation-pattern", data: `{"required":{"tgh":"abcde"}}`}, + {n: "violation-maxLengthVals", data: `{"required":{"aab":"abcdef"}}`}, + {n: "violation-required", data: `{"nullable":{"aab":"abcde"}}`}, + {n: "violation-required-null", data: `{"required":null}`}, + {n: "violation-type", data: `{"required":{"aab":{}}}`}, + {n: "violation-nested-a-required", data: `{"required":{"aab":"abcde"},"nested_a":{"aaa":{}}}`}, + {n: "violation-nested-a-required-null", data: `{"required":{"aab":"abcde"},"nested_a":{"aaa":{"required":null}}}`}, + {n: "violation-nested-a-pattern", data: `{"required":{"aab":"abcde"},"nested_a":{"tt":{"required":10.9}}}`}, + {n: "failure-nested-b-additional-props-ab", data: `{"required":{"aab":"abcde"},"nested_b":{"xyz":{"a":"b"},"ab":{"c":"v1","cc":"v2"}}}`}, + {n: "failure-nested-b-additional-props-c", data: `{"required":{"aab":"abcde"},"nested_b":{"ab":{"c":"v1","cc":"v2","d":2}}}`}}) +} + +func TestSchemaSliceStruct(t *testing.T) { + schema := generateJSONSchema(t, nameOf(generatortest.Slice{})) + assertValid(t, schema, []testcase{ + {n: "required", data: `{"required":["cc","cc","ccc","cc"]}`}, + {n: "valid", data: `{"required":["cc"],"nullable":["b"]}`}, + {n: "nullable-minElems", data: `{"required":["cc"],"nullable":[]}`}, + {n: "struct-minElems", data: `{"required":["cc"],"children":[]}`}, + {n: "struct-null", data: `{"required":["cc"],"children":[{}]}`}, + {n: "struct-recursive", data: `{"required":["cc"],"children":[{"number":2.3,"children":[{"number":2.3,"children":[{"number":2.3}]}]}]}`}, + {n: "struct-additional-props", data: `{"required":["cc"],"abc":2,"children":[{"a":1}]}`}}) + assertInvalid(t, schema, []testcase{ + {n: "violation-mexLength", data: `{"required":["cccc"]}`}, + {n: "violation-minLength", data: `{"required":["c"]}`}, + {n: "violation-minElems", data: `{"required":[]}`}, + {n: "violation-required-null", data: `{"required":[null]}`}, + {n: "violation-pattern", data: `{"required":["cc"],"nullable":["cde"]}`}, + {n: "violation-nullable-null", data: `{"required":["cc"],"nullable":[null]}`}}) +} + +func TestSchemaRequiredIfAny(t *testing.T) { + schema := generateJSONSchema(t, nameOf(generatortest.RequiredIfAny{})) + assertValid(t, schema, []testcase{ + {n: "all", data: `{"a":"a","b":"b","c":"c","d":"d"}`}, + {n: "only-required", data: `{"d":"d"}`}, + {n: "only-a", data: `{"a":"a","d":"d"}`}, + {n: "no-b", data: `{"a":"a","c":"c","d":"d"}`}, + {n: "additional-props", data: `{"xyc":34,"d":"d"}`}}) + assertInvalid(t, schema, []testcase{ + {n: "violation-none", data: `{}`}, + {n: "violation-a-if-b", data: `{"b":"b","c":"c","d":"d"}`}, + {n: "violation-a-if-c", data: `{"c":"c","d":"d"}`}, + {n: "violation-c-if-b", data: `{"a":"a","b":"b","d":"d"}`}}) +} + +func TestSchemaRequiredAnyOf(t *testing.T) { + schema := generateJSONSchema(t, nameOf(generatortest.RequiredAnyOf{})) + assertValid(t, schema, []testcase{ + {n: "all", data: `{"a":1,"b":2}`}, + {n: "a", data: `{"a":1}`}, + {n: "b", data: `{"b":1}`}, + {n: "additional-props", data: `{"a":1,"xyc":34}`}}) + assertInvalid(t, schema, []testcase{ + {n: "violation-required", data: `{}`}}) +} + +func TestSchemaOnlyExported(t *testing.T) { + schema := generateJSONSchema(t, nameOf(generatortest.Exported{})) + assertValid(t, schema, []testcase{ + {n: "valid", data: `{"b":1}`}, + {n: "unexported-no-type-checking", data: `{"a":1.5,"_":0.7}`}}) + assertInvalid(t, schema, []testcase{ + {n: "violation-number", data: `{"b":1.5}`}}) +} + +func validate(schema string, data string) (*gojsonschema.Result, error) { + schemaLoader := gojsonschema.NewStringLoader(schema) + dataLoader := gojsonschema.NewStringLoader(data) + return gojsonschema.Validate(schemaLoader, dataLoader) +} + +func generateJSONSchema(t *testing.T, root string) string { + p := path.Join("github.com", "elastic", "apm-server", "model", "modeldecoder", "generator", "generatortest") + parsed, err := Parse(p) + require.NoError(t, err, err) + jsonSchema, err := NewJSONSchemaGenerator(parsed) + require.NoError(t, err, err) + rootEvent := fmt.Sprintf("%s.%s", p, root) + b, err := jsonSchema.Generate("jsonschematest", rootEvent) + require.NoError(t, err, err) + return b.String() +} + +func assertValid(t *testing.T, schema string, cases []testcase) { + for _, tc := range cases { + t.Run(tc.n, func(t *testing.T) { + result, err := validate(schema, tc.data) + require.NoError(t, err) + assert.True(t, result.Valid(), result.Errors()) + }) + } +} + +func assertInvalid(t *testing.T, schema string, cases []testcase) { + for _, tc := range cases { + t.Run(tc.n, func(t *testing.T) { + result, err := validate(schema, tc.data) + require.NoError(t, err) + assert.False(t, result.Valid(), result.Errors()) + }) + } +} diff --git a/model/modeldecoder/generator/map.go b/model/modeldecoder/generator/map.go new file mode 100644 index 00000000000..40bc9616ef3 --- /dev/null +++ b/model/modeldecoder/generator/map.go @@ -0,0 +1,216 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package generator + +import ( + "encoding/json" + "fmt" + "go/types" + "io" + "strings" +) + +var mapSupportedTags = []string{tagMaxLengthVals, tagPatternKeys, tagRequired, tagInputTypesVals} + +func generateMapValidation(w io.Writer, fields []structField, f structField, isCustomStruct bool) error { + typ := f.Type().Underlying().(*types.Map) + // verify that validation rules for map kind exist + switch typ.Elem().Underlying().(type) { + case *types.Basic, *types.Interface: // do nothing special + case *types.Struct: + if !isCustomStruct { + return fmt.Errorf("unhandled struct type %s", typ) + } + default: + return fmt.Errorf("unhandled type %s", typ) + } + + vTag, err := validationTag(f.tag) + if err != nil { + return err + } + // check if all configured tags are supported + for k := range vTag { + var supported bool + for _, s := range mapSupportedTags { + if k == s { + supported = true + break + } + } + if !supported { + return fmt.Errorf("unhandled tag rule '%v'", k) + } + } + + // validation rules must be run on map itself and its elements + // 1. apply map validation rules: + if ruleValue, ok := vTag[tagRequired]; ok { + mapRuleRequired(w, f, validationRule{name: tagRequired, value: ruleValue}) + if len(vTag) == 1 { + return nil + } + } + if len(vTag) == 0 { + return nil + } + // 2. iterate over map and apply validation rules to its elements + if _, ok := vTag[tagInputTypesVals]; ok || isCustomStruct { + fmt.Fprintf(w, ` +for k,v := range val.%s{ +`[1:], f.Name()) + } else { + fmt.Fprintf(w, ` +for k := range val.%s{ +`[1:], f.Name()) + } + + if isCustomStruct { + // call validation on every item + fmt.Fprintf(w, ` +if err := v.validate(); err != nil{ + return errors.Wrapf(err, "%s") +} +`[1:], jsonName(f)) + } + if patternKeysValue, ok := vTag[tagPatternKeys]; ok { + mapRulePatternKeys(w, f, validationRule{name: tagPatternKeys, value: patternKeysValue}) + } + if typesValsValue, ok := vTag[tagInputTypesVals]; ok { + mapRuleTypesVals(w, f, vTag, validationRule{name: tagInputTypesVals, value: typesValsValue}) + } + fmt.Fprintf(w, ` +} +`[1:]) + return nil +} + +func mapRuleTypesVals(w io.Writer, f structField, rules map[string]string, rule validationRule) { + fmt.Fprintf(w, ` +switch t := v.(type){ +`[1:]) + // if values are not required allow nil + if _, ok := rules[tagRequired]; !ok { + fmt.Fprintf(w, ` +case nil: +`[1:]) + } + for _, typ := range strings.Split(rule.value, ";") { + if typ == "number" { + typ = "json.Number" + } + fmt.Fprintf(w, ` +case %s: +`[1:], typ) + if typ == "string" { + if maxValValue, ok := rules[tagMaxLengthVals]; ok { + mapRuleMaxVals(w, f, validationRule{name: tagMaxLengthVals, value: maxValValue}) + } + } + } + fmt.Fprintf(w, ` +default: + return fmt.Errorf("'%s': validation rule '%s(%s)' violated for key %%s",k) +} +`[1:], jsonName(f), rule.name, rule.value) +} + +func mapRuleRequired(w io.Writer, f structField, rule validationRule) { + fmt.Fprintf(w, ` +if len(val.%s) == 0{ + return fmt.Errorf("'%s' required") +} +`[1:], f.Name(), jsonName(f)) +} + +func mapRulePatternKeys(w io.Writer, f structField, rule validationRule) { + fmt.Fprintf(w, ` +if k != "" && !%sRegexp.MatchString(k){ + return fmt.Errorf("'%s': validation rule '%s(%s)' violated") +} +`[1:], rule.value, jsonName(f), rule.name, rule.value) +} + +func mapRuleMaxVals(w io.Writer, f structField, rule validationRule) { + fmt.Fprintf(w, ` +if utf8.RuneCountInString(t) > %s{ + return fmt.Errorf("'%s': validation rule '%s(%s)' violated") +} +`[1:], rule.value, jsonName(f), rule.name, rule.value) +} + +func generateJSONPropertyMap(info *fieldInfo, parent *property, child *property, nested *property) error { + name := jsonSchemaName(info.field) + child.Type.add(TypeNameObject) + patternName, isPatternProp := info.tags[tagPatternKeys] + delete(info.tags, tagPatternKeys) + + nestedParent := child + if name == "" { + // The map does not have a json name defined, in which case it is nested directly + // inside the parent object's patternProperties/additionalProperties + // e.g. {"parent":{"patternProperties":{"patternXY":{..}}}} + *nested = *child + nestedParent = parent + } else { + // The map does have a json name defined, in which case it is nested as + // patternProperties/additionalProperties inside an object, which itself is nested + // inside the parent property, identified by its json name + // e.g. {"parent":{"properties":{"jsonNameXY":{"patternProperties":{"patternXY":{..}}}}}} + parent.Properties[name] = child + } + + haveValueSchema := len(info.tags) > 0 + if maxLen, ok := info.tags[tagMaxLengthVals]; ok { + nested.MaxLength = json.Number(maxLen) + delete(info.tags, tagMaxLengthVals) + } + if inputTypes, ok := info.tags[tagInputTypesVals]; ok { + names, err := propertyTypesFromTag(tagInputTypesVals, inputTypes) + if err != nil { + return err + } + delete(info.tags, tagInputTypesVals) + nested.Type = &propertyType{names: names} + } else { + valueType := info.field.Type().Underlying().(*types.Map).Elem() + if !types.IsInterface(valueType) { + haveValueSchema = true + typeName, ok := propertyTypes[valueType.String()] + if !ok { + typeName = TypeNameObject + } + nested.Type = &propertyType{names: []propertyTypeName{typeName}} + } + } + + if isPatternProp { + pattern, ok := info.parsed.patternVariables[patternName] + if !ok { + return fmt.Errorf("unhandled %s tag value %s", tagPatternKeys, pattern) + } + if nestedParent.PatternProperties == nil { + nestedParent.PatternProperties = make(map[string]*property) + } + nestedParent.PatternProperties[pattern] = nested + nestedParent.AdditionalProperties = false + } else if haveValueSchema { + nestedParent.AdditionalProperties = nested + } + return nil +} diff --git a/model/modeldecoder/generator/nint.go b/model/modeldecoder/generator/nint.go new file mode 100644 index 00000000000..275838b4f2b --- /dev/null +++ b/model/modeldecoder/generator/nint.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package generator + +import ( + "fmt" + "io" + + "github.com/pkg/errors" +) + +func generateNullableIntValidation(w io.Writer, fields []structField, f structField, _ bool) error { + rules, err := validationRules(f.tag) + if err != nil { + return errors.Wrap(err, "nullableInt") + } + for _, rule := range rules { + switch rule.name { + case tagMin, tagMax: + nintRuleMinMax(w, f, rule) + case tagRequired: + ruleNullableRequired(w, f) + default: + errors.Wrap(errUnhandledTagRule(rule), "nullableInt") + } + } + return nil +} + +func nintRuleMinMax(w io.Writer, f structField, rule validationRule) { + fmt.Fprintf(w, ` +if val.%s.IsSet() && val.%s.Val %s %s { + return fmt.Errorf("'%s': validation rule '%s(%s)' violated") +} +`[1:], f.Name(), f.Name(), ruleMinMaxOperator(rule.name), rule.value, jsonName(f), rule.name, rule.value) +} diff --git a/model/modeldecoder/generator/ninterface.go b/model/modeldecoder/generator/ninterface.go new file mode 100644 index 00000000000..54706a5380e --- /dev/null +++ b/model/modeldecoder/generator/ninterface.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package generator + +import ( + "fmt" + "io" + "strings" + + "github.com/pkg/errors" +) + +func generateNullableInterfaceValidation(w io.Writer, fields []structField, f structField, _ bool) error { + rules, err := validationRules(f.tag) + if err != nil { + return errors.Wrap(err, "nullableInterface") + } + for _, rule := range rules { + switch rule.name { + case tagMaxLength, tagTargetType: + //handled in switch statement for string types + case tagRequired: + ruleNullableRequired(w, f) + case tagInputTypes: + if err := nullableInterfaceRuleTypes(w, f, rules, rule); err != nil { + return errors.Wrap(err, "nullableInterface") + } + default: + errors.Wrap(errUnhandledTagRule(rule), "nullableInterface") + } + } + return nil +} + +func nullableInterfaceRuleTypes(w io.Writer, f structField, rules []validationRule, rule validationRule) error { + var isRequired bool + var maxLengthRule validationRule + var targetTypeRule validationRule + var useValue bool + for _, r := range rules { + if r.name == tagRequired { + isRequired = true + continue + } + if r.name == tagMaxLength { + maxLengthRule = r + useValue = true + continue + } + if r.name == tagTargetType { + targetTypeRule = r + if targetTypeRule.value != "int" { + return fmt.Errorf("unhandled targetType %s", targetTypeRule.value) + } + useValue = true + continue + } + } + + var switchStmt string + if useValue { + switchStmt = `switch t := val.%s.Val.(type){` + } else { + switchStmt = `switch val.%s.Val.(type){` + } + fmt.Fprintf(w, switchStmt, f.Name()) + + for _, typ := range strings.Split(rule.value, ";") { + switch typ { + case "int": + fmt.Fprintf(w, ` +case int: +case json.Number: + if _, err := t.Int64(); err != nil{ + return fmt.Errorf("'%s': validation rule '%s(%s)' violated") + } +`[1:], jsonName(f), rule.name, rule.value) + case "string": + fmt.Fprintf(w, ` +case %s: +`[1:], typ) + if maxLengthRule != (validationRule{}) { + fmt.Fprintf(w, ` +if utf8.RuneCountInString(t) %s %s{ + return fmt.Errorf("'%s': validation rule '%s(%s)' violated") +} +`[1:], ruleMinMaxOperator(maxLengthRule.name), maxLengthRule.value, jsonName(f), maxLengthRule.name, maxLengthRule.value) + } + if targetTypeRule.value == "int" { + fmt.Fprintf(w, ` +if _, err := strconv.Atoi(t); err != nil{ + return fmt.Errorf("'%s': validation rule '%s(%s)' violated") +} +`[1:], jsonName(f), targetTypeRule.name, targetTypeRule.value) + } + case "object": + fmt.Fprint(w, ` +case map[string]interface{}: +`[1:]) + default: + return fmt.Errorf("unhandled %s %s", rule.name, rule.value) + } + } + if !isRequired { + fmt.Fprintf(w, ` +case nil: +`[1:]) + } + fmt.Fprintf(w, ` +default: + return fmt.Errorf("'%s': validation rule '%s(%s)' violated ") +} +`[1:], jsonName(f), rule.name, rule.value) + return nil +} diff --git a/model/modeldecoder/generator/nstring.go b/model/modeldecoder/generator/nstring.go new file mode 100644 index 00000000000..2b2d7dcb8e6 --- /dev/null +++ b/model/modeldecoder/generator/nstring.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package generator + +import ( + "fmt" + "io" + + "github.com/pkg/errors" +) + +func generateNullableStringValidation(w io.Writer, fields []structField, f structField, _ bool) error { + rules, err := validationRules(f.tag) + if err != nil { + return errors.Wrap(err, "nullableString") + } + for _, rule := range rules { + switch rule.name { + case tagEnum: + nstringRuleEnum(w, f, rule) + case tagMinLength, tagMaxLength: + nstringRuleMinMax(w, f, rule) + case tagPattern: + nstringRulePattern(w, f, rule) + case tagRequired: + ruleNullableRequired(w, f) + case tagRequiredIfAny: + if err := ruleRequiredIfAny(w, fields, f, rule.value); err != nil { + return errors.Wrap(err, "nullableString") + } + default: + errors.Wrap(errUnhandledTagRule(rule), "nullableString") + } + } + return nil +} + +func nstringRuleEnum(w io.Writer, f structField, rule validationRule) { + fmt.Fprintf(w, ` +if val.%s.Val != ""{ + var matchEnum bool + for _, s := range %s { + if val.%s.Val == s{ + matchEnum = true + break + } + } + if !matchEnum{ + return fmt.Errorf("'%s': validation rule '%s(%s)' violated") + } +} +`[1:], f.Name(), rule.value, f.Name(), jsonName(f), rule.name, rule.value) +} + +func nstringRuleMinMax(w io.Writer, f structField, rule validationRule) { + fmt.Fprintf(w, ` +if val.%s.IsSet() && utf8.RuneCountInString(val.%s.Val) %s %s{ + return fmt.Errorf("'%s': validation rule '%s(%s)' violated") +} +`[1:], f.Name(), f.Name(), ruleMinMaxOperator(rule.name), rule.value, jsonName(f), rule.name, rule.value) +} + +func nstringRulePattern(w io.Writer, f structField, rule validationRule) { + fmt.Fprintf(w, ` +if val.%s.Val != "" && !%sRegexp.MatchString(val.%s.Val){ + return fmt.Errorf("'%s': validation rule '%s(%s)' violated") +} +`[1:], f.Name(), rule.value, f.Name(), jsonName(f), rule.name, rule.value) +} diff --git a/model/modeldecoder/generator/nullable.go b/model/modeldecoder/generator/nullable.go new file mode 100644 index 00000000000..620d7210be2 --- /dev/null +++ b/model/modeldecoder/generator/nullable.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package generator + +import ( + "fmt" +) + +var ( + typPath = "github.com/elastic/apm-server/model/modeldecoder/nullable" + + nullableTypeBool = fmt.Sprintf("%s.Bool", typPath) + nullableTypeFloat64 = fmt.Sprintf("%s.Float64", typPath) + nullableTypeHTTPHeader = fmt.Sprintf("%s.HTTPHeader", typPath) + nullableTypeInt = fmt.Sprintf("%s.Int", typPath) + nullableTypeInterface = fmt.Sprintf("%s.Interface", typPath) + nullableTypeString = fmt.Sprintf("%s.String", typPath) + nullableTypeTimeMicrosUnix = fmt.Sprintf("%s.TimeMicrosUnix", typPath) +) diff --git a/model/modeldecoder/generator/parser.go b/model/modeldecoder/generator/parser.go new file mode 100644 index 00000000000..1eaf4711068 --- /dev/null +++ b/model/modeldecoder/generator/parser.go @@ -0,0 +1,203 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package generator + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "reflect" + "strconv" + "strings" + + "github.com/pkg/errors" + "golang.org/x/tools/go/packages" +) + +// Parsed contains information about a parsed package, +// including the package name, type information +// and some pre-defined variables +type Parsed struct { + // package name + pkgName string + // parsed structs from loading types from the provided package + structTypes map[string]structType + // parsed pattern variables + patternVariables map[string]string + // parsed enumeration values + enumVariables map[string][]string +} + +type structType struct { + name string + comment string + fields []structField +} + +type structField struct { + *types.Var + tag reflect.StructTag + comment string +} + +// Parse loads the Go package named by the given package pattern +// and creates a parsed struct containing the package name, +// type information and some pre-defined variables +// It returns an error if the package cannot be successfully loaded +// or parsed +func Parse(pkgPattern string) (*Parsed, error) { + pkg, err := loadPackage(pkgPattern) + if err != nil { + return nil, err + } + parsed := Parsed{ + structTypes: make(map[string]structType), + patternVariables: make(map[string]string), + enumVariables: make(map[string][]string), + pkgName: pkg.Types.Name()} + err = parse(pkg, &parsed) + return &parsed, err +} + +func loadPackage(pkg string) (*packages.Package, error) { + cfg := packages.Config{Mode: packages.NeedTypes | packages.NeedSyntax | + packages.NeedTypesInfo | packages.NeedImports} + pkgs, err := packages.Load(&cfg, pkg) + if err != nil { + return nil, err + } + if packages.PrintErrors(pkgs) > 0 { + return nil, errors.New("packages load error") + } + return pkgs[0], nil +} + +func parse(pkg *packages.Package, parsed *Parsed) error { + for _, file := range pkg.Syntax { + // parse type comments + typeComments := make(map[int]string) + for _, c := range file.Comments { + typeComments[int(c.End())] = trimComment(c.Text()) + } + + for _, decl := range file.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok { + continue + } + + switch genDecl.Tok { + case token.VAR: + for _, spec := range genDecl.Specs { + valueSpec, ok := spec.(*ast.ValueSpec) + if !ok { + continue + } + for i, expr := range valueSpec.Values { + name := valueSpec.Names[i].Name + var err error + switch v := expr.(type) { + case *ast.BasicLit: + if strings.HasPrefix(name, "pattern") { + parsed.patternVariables[name], err = strconv.Unquote(v.Value) + } + case *ast.CompositeLit: + if strings.HasPrefix(name, "enum") { + elems := make([]string, len(v.Elts)) + for i := 0; i < len(v.Elts); i++ { + elems[i], err = strconv.Unquote(v.Elts[i].(*ast.BasicLit).Value) + } + parsed.enumVariables[name] = elems + } + } + if err != nil { + return err + } + } + } + case token.TYPE: + // find comments for the generic declaration node, in the format of + // //MyComment + // type MyComment struct {..} + var genDeclComment string + if c, ok := typeComments[int(genDecl.Pos()-1)]; ok { + genDeclComment = c + } + // iterate through the type declaration for this generic declaration node + for _, spec := range genDecl.Specs { + typeSpec, ok := spec.(*ast.TypeSpec) + if !ok { + continue + } + obj := pkg.TypesInfo.Defs[typeSpec.Name] + if obj == nil { + continue + } + var st structType + st.name = obj.Name() + // find comments for the specific type declaration, in the format of + // type ( + // //MyComment + // MyComment struct {..} + // ) + // fallback to generic declaration comment otherwise if it starts with the type name + if c, ok := typeComments[int(genDecl.Pos()-1)]; ok && st.name == strings.Split(c, " ")[0] { + st.comment = c + } else if st.name == strings.Split(genDeclComment, " ")[0] { + st.comment = genDeclComment + } + // find field comments (ignoring line comments) + fieldComments := make(map[string]string) + if st, ok := typeSpec.Type.(*ast.StructType); ok { + for _, f := range st.Fields.List { + if f.Doc != nil && len(f.Doc.Text()) > 0 { + fieldComments[f.Names[0].Name] = trimComment(f.Doc.Text()) + } + } + } + named := obj.(*types.TypeName).Type().(*types.Named) + typesStruct, ok := named.Underlying().(*types.Struct) + if !ok { + return fmt.Errorf("unhandled type %T", named.Underlying()) + } + numFields := typesStruct.NumFields() + structFields := make([]structField, 0, numFields) + for i := 0; i < numFields; i++ { + structField := structField{ + Var: typesStruct.Field(i), + tag: reflect.StructTag(typesStruct.Tag(i)), + } + if c, ok := fieldComments[structField.Name()]; ok { + structField.comment = c + } + structFields = append(structFields, structField) + } + st.fields = structFields + parsed.structTypes[obj.Type().String()] = st + } + } + } + } + return nil +} + +func trimComment(c string) string { + c = strings.ReplaceAll(c, "\n", " ") + return strings.TrimSuffix(c, " ") +} diff --git a/model/modeldecoder/generator/propertytypename_string.go b/model/modeldecoder/generator/propertytypename_string.go new file mode 100644 index 00000000000..85dcc80e36a --- /dev/null +++ b/model/modeldecoder/generator/propertytypename_string.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated by "stringer -linecomment -type propertyTypeName"; DO NOT EDIT. + +package generator + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TypeNameObject-0] + _ = x[TypeNameString-1] + _ = x[TypeNameBool-2] + _ = x[TypeNameInteger-3] + _ = x[TypeNameNumber-4] + _ = x[TypeNameArray-5] +} + +const _propertyTypeName_name = "objectstringbooleanintegernumberarray" + +var _propertyTypeName_index = [...]uint8{0, 6, 12, 19, 26, 32, 37} + +func (i propertyTypeName) String() string { + if i >= propertyTypeName(len(_propertyTypeName_index)-1) { + return "propertyTypeName(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _propertyTypeName_name[_propertyTypeName_index[i]:_propertyTypeName_index[i+1]] +} diff --git a/model/modeldecoder/generator/slice.go b/model/modeldecoder/generator/slice.go new file mode 100644 index 00000000000..65659ac51f3 --- /dev/null +++ b/model/modeldecoder/generator/slice.go @@ -0,0 +1,144 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package generator + +import ( + "encoding/json" + "fmt" + "go/types" + "io" + + "github.com/pkg/errors" +) + +func generateSliceValidation(w io.Writer, fields []structField, f structField, isCustomStruct bool) error { + // call validation on every slice element when elements are of custom type + if isCustomStruct { + fmt.Fprintf(w, ` +for _, elem := range val.%s{ + if err := elem.validate(); err != nil{ + return errors.Wrapf(err, "%s") + } +} +`[1:], f.Name(), jsonName(f)) + } + // handle configured validation rules + rules, err := validationRules(f.tag) + if err != nil { + return errors.Wrap(err, "slice") + } + for _, rule := range rules { + switch rule.name { + case tagMinLength, tagMaxLength: + err = sliceRuleMinMaxLength(w, f, rule) + case tagMinVals: + err = sliceRuleMinVals(w, f, rule) + case tagRequired: + sliceRuleRequired(w, f, rule) + case tagRequiredAnyOf: + err = ruleRequiredOneOf(w, fields, rule.value) + case tagRequiredIfAny: + err = ruleRequiredIfAny(w, fields, f, rule.value) + default: + return errors.Wrap(errUnhandledTagRule(rule), "slice") + } + if err != nil { + return errors.Wrap(err, "slice") + } + } + return nil +} + +func sliceRuleMinMaxLength(w io.Writer, f structField, rule validationRule) error { + sliceT, ok := f.Type().Underlying().(*types.Slice) + if !ok { + return fmt.Errorf("unexpected error handling %s for slice", rule.name) + } + if basic, ok := sliceT.Elem().Underlying().(*types.Basic); ok { + if basic.Kind() == types.String { + fmt.Fprintf(w, ` +for _, elem := range val.%s{ + if utf8.RuneCountInString(elem) %s %s{ + return fmt.Errorf("'%s': validation rule '%s(%s)' violated") + } +} +`[1:], f.Name(), ruleMinMaxOperator(rule.name), rule.value, jsonName(f), rule.name, rule.value) + return nil + } + } + return fmt.Errorf("unhandled tag rule max for type %s", f.Type().Underlying()) +} + +func sliceRuleMinVals(w io.Writer, f structField, rule validationRule) error { + fmt.Fprintf(w, ` +for _, elem := range val.%s{ + if elem %s %s{ + return fmt.Errorf("'%s': validation rule '%s(%s)' violated") + } +} +`[1:], f.Name(), ruleMinMaxOperator(rule.name), rule.value, jsonName(f), rule.name, rule.value) + return nil +} + +func sliceRuleRequired(w io.Writer, f structField, rule validationRule) { + fmt.Fprintf(w, ` +if len(val.%s) == 0{ + return fmt.Errorf("'%s' required") +} +`[1:], f.Name(), jsonName(f)) +} + +func generateJSONPropertySlice(info *fieldInfo, parent *property, child *property) error { + child.Type.add(TypeNameArray) + var minItems int + if child.Type.required { + minItems = 1 + } + child.MinItems = &minItems + parent.Properties[jsonSchemaName(info.field)] = child + + itemType := info.field.Type().Underlying().(*types.Slice).Elem() + if _, ok := info.parsed.structTypes[itemType.String()]; ok { + // parsed struct - no type specific tags will be handled + return nil + } + // non-parsed struct + // check if type is known, otherwise raise unhandled error + itemsType, ok := propertyTypes[itemType.String()] + if !ok { + return fmt.Errorf("unhandled type %T", itemType) + } + // NOTE(simi): set required=true to be aligned with previous JSON schema definitions + items := property{Type: &propertyType{names: []propertyTypeName{itemsType}, required: true}} + switch itemsType { + case TypeNameInteger: + setPropertyRulesInteger(info, &items) + case TypeNameNumber: + setPropertyRulesNumber(info, &items) + case TypeNameString: + setPropertyRulesString(info, &items) + default: + return fmt.Errorf("unhandled slice item type %s", itemsType) + } + if minVals, ok := info.tags[tagMinVals]; ok { + items.Min = json.Number(minVals) + delete(info.tags, tagMinVals) + } + child.Items = &items + return nil +} diff --git a/model/modeldecoder/generator/string.go b/model/modeldecoder/generator/string.go new file mode 100644 index 00000000000..58b940e9db5 --- /dev/null +++ b/model/modeldecoder/generator/string.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package generator + +import ( + "encoding/json" + "fmt" +) + +func generateJSONPropertyString(info *fieldInfo, parent *property, child *property) error { + child.Type.add(TypeNameString) + parent.Properties[jsonSchemaName(info.field)] = child + return setPropertyRulesString(info, child) +} + +func setPropertyRulesString(info *fieldInfo, p *property) error { + for tagName, tagValue := range info.tags { + switch tagName { + case tagEnum: + enumVars, ok := info.parsed.enumVariables[tagValue] + if !ok { + return fmt.Errorf("unhandled %s tag value %s", tagName, tagValue) + } + for _, val := range enumVars { + v := val + p.Enum = append(p.Enum, &v) + } + // allow null value if field is not required + if _, ok := info.tags[tagRequired]; !ok { + p.Enum = append(p.Enum, nil) + } + delete(info.tags, tagName) + case tagMaxLength: + p.MaxLength = json.Number(tagValue) + delete(info.tags, tagName) + case tagMinLength: + p.MinLength = json.Number(tagValue) + delete(info.tags, tagName) + case tagPattern: + val, ok := info.parsed.patternVariables[tagValue] + if !ok { + return fmt.Errorf("unhandled %s tag value %s", tagName, tagValue) + } + p.Pattern = val + delete(info.tags, tagName) + } + } + return nil +} diff --git a/model/modeldecoder/generator/struct.go b/model/modeldecoder/generator/struct.go new file mode 100644 index 00000000000..ceb923b85b4 --- /dev/null +++ b/model/modeldecoder/generator/struct.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package generator + +import ( + "fmt" + "io" +) + +func generateStructValidation(w io.Writer, fields []structField, f structField, isCustomStruct bool) error { + // if field is a custom struct, call its validation function + if isCustomStruct { + fmt.Fprintf(w, ` + if err := val.%s.validate(); err != nil{ + return errors.Wrapf(err, "%s") + } + `[1:], f.Name(), jsonName(f)) + } + + // handle generally available rules: + // - `required` + // and throw error for others + rules, err := validationRules(f.tag) + if err != nil { + return err + } + for _, rule := range rules { + switch rule.name { + case tagRequired: + ruleNullableRequired(w, f) + case tagRequiredAnyOf: + ruleRequiredOneOf(w, fields, rule.value) + default: + return fmt.Errorf("unhandled tag rule '%s'", rule) + } + } + return nil +} + +func generateJSONPropertyStruct(info *fieldInfo, parent *property, child *property) error { + child.Type.add(TypeNameObject) + parent.Properties[jsonSchemaName(info.field)] = child + return nil +} diff --git a/model/modeldecoder/generator/validation.go b/model/modeldecoder/generator/validation.go new file mode 100644 index 00000000000..9a31104549f --- /dev/null +++ b/model/modeldecoder/generator/validation.go @@ -0,0 +1,194 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package generator + +import ( + "fmt" + "io" + "reflect" + "sort" + "strings" +) + +const ( + tagEnum = "enum" + tagInputTypes = "inputTypes" + tagInputTypesVals = "inputTypesVals" + tagMax = "max" + tagMaxLength = "maxLength" + tagMaxLengthVals = "maxLengthVals" + tagMin = "min" + tagMinLength = "minLength" + tagMinVals = "minVals" + tagPattern = "pattern" + tagPatternKeys = "patternKeys" + tagRequired = "required" + tagRequiredAnyOf = "requiredAnyOf" + tagRequiredIfAny = "requiredIfAny" + tagTargetType = "targetType" +) + +type validationRule struct { + name string + value string +} + +func errUnhandledTagRule(rule validationRule) error { + return fmt.Errorf("unhandled tag rule '%s'", rule.name) +} + +func validationTag(structTag reflect.StructTag) (map[string]string, error) { + parts := parseTag(structTag, "validate") + m := make(map[string]string, len(parts)) + errPrefix := "parse validation tag:" + for _, rule := range parts { + parts := strings.Split(rule, "=") + switch len(parts) { + case 1: + // valueless rule e.g. required + if rule != parts[0] { + return nil, fmt.Errorf("%s malformed tag '%s'", errPrefix, rule) + } + switch rule { + case tagRequired: + m[rule] = "" + default: + return nil, fmt.Errorf("%s unhandled tag rule '%s'", errPrefix, rule) + } + case 2: + // rule=value + m[parts[0]] = parts[1] + default: + return nil, fmt.Errorf("%s malformed tag '%s'", errPrefix, rule) + } + } + return m, nil +} + +func validationRules(structTag reflect.StructTag) ([]validationRule, error) { + tag, err := validationTag(structTag) + if err != nil { + return nil, err + } + var rules = make([]validationRule, 0, len(tag)) + for k, v := range tag { + rules = append(rules, validationRule{name: k, value: v}) + } + sort.Slice(rules, func(i, j int) bool { + return rules[i].name < rules[j].name + }) + return rules, nil +} + +func ruleMinMaxOperator(ruleName string) string { + switch ruleName { + case tagMin, tagMinLength, tagMinVals: + return "<" + case tagMax, tagMaxLength: + return ">" + default: + panic("unexpected rule: " + ruleName) + } +} + +// +// common validation rules independend of type +// + +func ruleNullableRequired(w io.Writer, f structField) { + fmt.Fprintf(w, ` +if !val.%s.IsSet() { + return fmt.Errorf("'%s' required") +} +`[1:], f.Name(), jsonName(f)) +} + +func ruleRequiredOneOf(w io.Writer, fields []structField, tagValue string) error { + oneOf, err := filteredFields(fields, strings.Split(tagValue, ";")) + if err != nil { + return err + } + if len(oneOf) <= 1 { + return fmt.Errorf("invalid usage of rule 'requiredOneOf' - try 'required' instead") + } + + fmt.Fprintf(w, `if `) + for i, oneOfField := range oneOf { + if i > 0 { + fmt.Fprintf(w, " && ") + } + fmt.Fprint(w, "!") + if err := generateIsSet(w, oneOfField, "val."); err != nil { + return err + } + } + fmt.Fprintf(w, ` { + return fmt.Errorf("requires at least one of the fields '%v'") +} +`[1:], tagValue) + if len(oneOf) != 0 { + return fmt.Errorf("unhandled 'requiredOneOf' field name(s)") + } + return nil +} + +func ruleRequiredIfAny(w io.Writer, fields []structField, field structField, tagValue string) error { + ifAny, err := filteredFields(fields, strings.Split(tagValue, ";")) + if err != nil { + return err + } + + // Only check ifAny fields if the field itself is not set + fmt.Fprint(w, "if !") + if err := generateIsSet(w, field, "val."); err != nil { + return err + } + fmt.Fprintln(w, " {") + + // Check if any of the fields is set. We create a separate "if" block + // for each field so we can include its name in the error. + for _, ifAnyField := range ifAny { + fmt.Fprint(w, "if ") + if err := generateIsSet(w, ifAnyField, "val."); err != nil { + return err + } + fmt.Fprintf(w, ` { + return fmt.Errorf("'%s' required when '%s' is set") +} +`, jsonName(field), jsonName(ifAnyField)) + } + + fmt.Fprintln(w, "}") + return nil +} + +func filteredFields(fields []structField, jsonNames []string) ([]structField, error) { + mapped := make(map[string]structField) + for _, field := range fields { + mapped[jsonName(field)] = field + } + filtered := make([]structField, len(jsonNames)) + for i, jsonName := range jsonNames { + field, ok := mapped[jsonName] + if !ok { + return nil, fmt.Errorf("unknown field name %q", jsonName) + } + filtered[i] = field + } + return filtered, nil +} diff --git a/model/modeldecoder/input.go b/model/modeldecoder/input.go index f1ad1c939be..3e9b18699ff 100644 --- a/model/modeldecoder/input.go +++ b/model/modeldecoder/input.go @@ -18,35 +18,11 @@ package modeldecoder import ( - "time" - "github.com/elastic/apm-server/model" ) // Input holds the input required for decoding an event. type Input struct { - // Raw holds the raw input, decoded by encoding/json. - Raw interface{} - - // RequestTime is the time at which the event was received - // by the server. This is used to set the timestamp for - // events sent by RUM. - RequestTime time.Time - - // Metadata holds metadata that may be added to the event. - Metadata model.Metadata - - // Config holds configuration for decoding. - // - // TODO(axw) define a Decoder type which encapsulates - // static configuration defined in one location, removing - // the possibility of inconsistent configuration. - Config Config -} - -// Config holds static configuration which applies to all decoding. -type Config struct { - Experimental bool - // RUM v3 support - HasShortFieldNames bool + // Base holds the base for decoding events. + Base model.APMEvent } diff --git a/model/modeldecoder/kubernetes.go b/model/modeldecoder/kubernetes.go deleted file mode 100644 index 84548d84008..00000000000 --- a/model/modeldecoder/kubernetes.go +++ /dev/null @@ -1,34 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import "github.com/elastic/apm-server/model" - -func decodeKubernetes(input map[string]interface{}, out *model.Kubernetes) { - if input == nil { - return - } - decodeString(input, "namespace", &out.Namespace) - if node := getObject(input, "node"); node != nil { - decodeString(node, "name", &out.NodeName) - } - if pod := getObject(input, "pod"); pod != nil { - decodeString(pod, "name", &out.PodName) - decodeString(pod, "uid", &out.PodUID) - } -} diff --git a/model/modeldecoder/kubernetes_test.go b/model/modeldecoder/kubernetes_test.go deleted file mode 100644 index 89c24508d2b..00000000000 --- a/model/modeldecoder/kubernetes_test.go +++ /dev/null @@ -1,56 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/apm-server/model" -) - -func TestKubernetesDecode(t *testing.T) { - namespace, podname, poduid, nodename := "namespace", "podname", "poduid", "podname" - for _, test := range []struct { - input map[string]interface{} - k model.Kubernetes - }{ - {input: nil}, - { - input: map[string]interface{}{ - "namespace": namespace, - "node": map[string]interface{}{"name": nodename}, - "pod": map[string]interface{}{ - "uid": poduid, - "name": podname, - }, - }, - k: model.Kubernetes{ - Namespace: namespace, - NodeName: nodename, - PodName: podname, - PodUID: poduid, - }, - }, - } { - var kubernetes model.Kubernetes - decodeKubernetes(test.input, &kubernetes) - assert.Equal(t, test.k, kubernetes) - } -} diff --git a/model/modeldecoder/labels.go b/model/modeldecoder/labels.go deleted file mode 100644 index aa08bcafa05..00000000000 --- a/model/modeldecoder/labels.go +++ /dev/null @@ -1,29 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "github.com/elastic/beats/v7/libbeat/common" -) - -func decodeLabels(input map[string]interface{}, out *common.MapStr) { - if len(input) == 0 { - return - } - *out = common.MapStr(input) -} diff --git a/model/modeldecoder/message.go b/model/modeldecoder/message.go deleted file mode 100644 index 979e0aa06f7..00000000000 --- a/model/modeldecoder/message.go +++ /dev/null @@ -1,51 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "errors" - - "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/utility" -) - -func decodeMessage(input interface{}, err error) (*model.Message, error) { - if input == nil || err != nil { - return nil, err - } - raw, ok := input.(map[string]interface{}) - if !ok { - return nil, errors.New("invalid type for message") - } - decoder := utility.ManualDecoder{} - - messageInp := decoder.MapStr(raw, "message") - if decoder.Err != nil || messageInp == nil { - return nil, decoder.Err - } - m := model.Message{ - QueueName: decoder.StringPtr(messageInp, "name", "queue"), - Body: decoder.StringPtr(messageInp, "body"), - Headers: decoder.Headers(messageInp, "headers"), - AgeMillis: decoder.IntPtr(messageInp, "ms", "age"), - } - if decoder.Err != nil { - return nil, decoder.Err - } - return &m, nil -} diff --git a/model/modeldecoder/message_test.go b/model/modeldecoder/message_test.go deleted file mode 100644 index afa96740de8..00000000000 --- a/model/modeldecoder/message_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "encoding/json" - "errors" - "net/http" - "testing" - - "github.com/elastic/beats/v7/libbeat/common" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/tests" -) - -func TestDecodeMessage(t *testing.T) { - for _, tc := range []struct { - name string - inp interface{} - inpErr error - message *model.Message - outpErr error - }{ - {name: "empty"}, - {name: "error", - inpErr: errors.New("error foo")}, - {name: "invalid", - inp: "foo", outpErr: errors.New("invalid type for message")}, - {name: "valid", - inp: map[string]interface{}{ - "message": map[string]interface{}{ - "queue": map[string]interface{}{"name": "order"}, - "body": "user A ordered book B", - "headers": map[string]interface{}{"internal": "false", "services": []string{"user", "order"}}, - "age": map[string]interface{}{"ms": json.Number("1577958057123")}}}, - message: &model.Message{ - QueueName: tests.StringPtr("order"), - Body: tests.StringPtr("user A ordered book B"), - Headers: http.Header{"Internal": []string{"false"}, "Services": []string{"user", "order"}}, - AgeMillis: tests.IntPtr(1577958057123), - }, - }, - } { - t.Run(tc.name, func(t *testing.T) { - decoded, err := decodeMessage(tc.inp, tc.inpErr) - if tc.inpErr != nil { - require.Equal(t, tc.inpErr, err) - } else if tc.outpErr != nil { - require.Equal(t, tc.outpErr, err) - } else { - require.Nil(t, err) - } - assert.Equal(t, tc.message, decoded) - }) - } -} - -func TestMessaging_Fields(t *testing.T) { - var m *model.Message - require.Nil(t, m.Fields()) - - m = &model.Message{} - require.Equal(t, common.MapStr{}, m.Fields()) - - m = &model.Message{ - QueueName: tests.StringPtr("orders"), - Body: tests.StringPtr("order confirmed"), - Headers: http.Header{"Internal": []string{"false"}, "Services": []string{"user", "order"}}, - AgeMillis: tests.IntPtr(1577958057123), - } - outp := common.MapStr{ - "queue": common.MapStr{"name": "orders"}, - "body": "order confirmed", - "headers": http.Header{"Internal": []string{"false"}, "Services": []string{"user", "order"}}, - "age": common.MapStr{"ms": 1577958057123}} - assert.Equal(t, outp, m.Fields()) -} diff --git a/model/modeldecoder/metadata.go b/model/modeldecoder/metadata.go deleted file mode 100644 index 71468d304d7..00000000000 --- a/model/modeldecoder/metadata.go +++ /dev/null @@ -1,60 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "github.com/pkg/errors" - "github.com/santhosh-tekuri/jsonschema" - - "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/model/metadata/generated/schema" - "github.com/elastic/apm-server/model/modeldecoder/field" - "github.com/elastic/apm-server/validation" -) - -var ( - metadataSchema = validation.CreateSchema(schema.ModelSchema, "metadata") - rumV3MetadataSchema = validation.CreateSchema(schema.RUMV3Schema, "metadata") -) - -// DecodeRUMV3Metadata decodes v3 RUM metadata. -func DecodeRUMV3Metadata(input interface{}, hasShortFieldNames bool, out *model.Metadata) error { - return decodeMetadata(input, hasShortFieldNames, rumV3MetadataSchema, out) -} - -// DecodeMetadata decodes v2 metadata. -func DecodeMetadata(input interface{}, hasShortFieldNames bool, out *model.Metadata) error { - return decodeMetadata(input, hasShortFieldNames, metadataSchema, out) -} - -func decodeMetadata(input interface{}, hasShortFieldNames bool, schema *jsonschema.Schema, out *model.Metadata) error { - raw, err := validation.ValidateObject(input, schema) - if err != nil { - return errors.Wrap(err, "failed to validate metadata") - } - fieldName := field.Mapper(hasShortFieldNames) - decodeService(getObject(raw, fieldName("service")), hasShortFieldNames, &out.Service) - decodeSystem(getObject(raw, "system"), &out.System) - decodeProcess(getObject(raw, "process"), &out.Process) - if userObj := getObject(raw, fieldName("user")); userObj != nil { - decodeUser(userObj, hasShortFieldNames, &out.User, &out.Client) - } - decodeCloud(getObject(raw, "cloud"), &out.Cloud) - decodeLabels(getObject(raw, fieldName("labels")), &out.Labels) - return nil -} diff --git a/model/modeldecoder/metadata_test.go b/model/modeldecoder/metadata_test.go deleted file mode 100644 index 98f394b3d40..00000000000 --- a/model/modeldecoder/metadata_test.go +++ /dev/null @@ -1,290 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "net" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/tests" - "github.com/elastic/beats/v7/libbeat/common" -) - -const ( - pid = 1234 - ppid = 4567 - processTitle = "bobsyouruncle" - - detectedHostname = "detected_hostname" - configuredHostname = "configured_hostname" - systemArchitecture = "x86_64" - systemPlatform = "linux" - - containerID = "container-123" - kubernetesNamespace = "k8s-namespace" - kubernetesNodeName = "k8s-node" - kubernetesPodName = "k8s-pod-name" - kubernetesPodUID = "k8s-pod-uid" - - uid = "12321" - mail = "user@email.com" - username = "user" - userAgent = "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) Gecko/20100101 Firefox/15.0.1" -) - -var ( - systemIP = net.ParseIP("192.168.0.1") - userIP = net.ParseIP("192.168.0.1") -) - -var fullInput = map[string]interface{}{ - "service": map[string]interface{}{ - "name": serviceName, - "version": serviceVersion, - "environment": serviceEnvironment, - "node": map[string]interface{}{ - "configured_name": serviceNodeName, - }, - "language": map[string]interface{}{ - "name": langName, - "version": langVersion, - }, - "runtime": map[string]interface{}{ - "name": rtName, - "version": rtVersion, - }, - "framework": map[string]interface{}{ - "name": fwName, - "version": fwVersion, - }, - "agent": map[string]interface{}{ - "name": agentName, - "version": agentVersion, - }, - }, - "process": map[string]interface{}{ - "pid": float64(pid), - "ppid": float64(ppid), - "title": processTitle, - "argv": []interface{}{"apm-server"}, - }, - "system": map[string]interface{}{ - "detected_hostname": detectedHostname, - "configured_hostname": configuredHostname, - "architecture": systemArchitecture, - "platform": systemPlatform, - "container": map[string]interface{}{ - "id": containerID, - }, - "kubernetes": map[string]interface{}{ - "namespace": kubernetesNamespace, - "node": map[string]interface{}{ - "name": kubernetesNodeName, - }, - "pod": map[string]interface{}{ - "name": kubernetesPodName, - "uid": kubernetesPodUID, - }, - }, - }, - "user": map[string]interface{}{ - "id": uid, - "email": mail, - "username": username, - }, - "cloud": map[string]interface{}{ - "availability_zone": "australia-southeast1-a", - "account": map[string]interface{}{ - "id": "acct123", - "name": "my-dev-account", - }, - "instance": map[string]interface{}{ - "id": "inst-foo123xyz", - "name": "my-instance", - }, - "machine": map[string]interface{}{ - "type": "n1-highcpu-96", - }, - "project": map[string]interface{}{ - "id": "snazzy-bobsled-123", - "name": "Development", - }, - "provider": "gcp", - "region": "australia-southeast1", - }, - "labels": map[string]interface{}{ - "k": "v", "n": 1, "f": 1.5, "b": false, - }, -} - -func metadata() *model.Metadata { - return &model.Metadata{ - UserAgent: model.UserAgent{Original: userAgent}, - Client: model.Client{IP: userIP}, - System: model.System{IP: systemIP}} -} - -func TestDecodeMetadata(t *testing.T) { - output := metadata() - require.NoError(t, DecodeMetadata(fullInput, false, output)) - assert.Equal(t, &model.Metadata{ - Service: model.Service{ - Name: serviceName, - Version: serviceVersion, - Environment: serviceEnvironment, - Node: model.ServiceNode{Name: serviceNodeName}, - Language: model.Language{Name: langName, Version: langVersion}, - Runtime: model.Runtime{Name: rtName, Version: rtVersion}, - Framework: model.Framework{Name: fwName, Version: fwVersion}, - Agent: model.Agent{Name: agentName, Version: agentVersion}, - }, - Process: model.Process{ - Pid: pid, - Ppid: tests.IntPtr(ppid), - Title: processTitle, - Argv: []string{"apm-server"}, - }, - System: model.System{ - DetectedHostname: detectedHostname, - ConfiguredHostname: configuredHostname, - Architecture: systemArchitecture, - Platform: systemPlatform, - IP: systemIP, - Container: model.Container{ID: containerID}, - Kubernetes: model.Kubernetes{ - Namespace: kubernetesNamespace, - NodeName: kubernetesNodeName, - PodName: kubernetesPodName, - PodUID: kubernetesPodUID, - }, - }, - User: model.User{ - ID: uid, - Email: mail, - Name: username, - }, - UserAgent: model.UserAgent{ - Original: userAgent, - }, - Client: model.Client{ - IP: userIP, - }, - Cloud: model.Cloud{ - AccountID: "acct123", - AccountName: "my-dev-account", - AvailabilityZone: "australia-southeast1-a", - InstanceID: "inst-foo123xyz", - InstanceName: "my-instance", - MachineType: "n1-highcpu-96", - ProjectID: "snazzy-bobsled-123", - ProjectName: "Development", - Provider: "gcp", - Region: "australia-southeast1", - }, - Labels: common.MapStr{"k": "v", "n": 1, "f": 1.5, "b": false}, - }, output) -} - -func BenchmarkDecodeMetadata(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - if err := DecodeMetadata(fullInput, false, &model.Metadata{}); err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkDecodeMetadataRecycled(b *testing.B) { - b.ReportAllocs() - var meta model.Metadata - for i := 0; i < b.N; i++ { - if err := DecodeMetadata(fullInput, false, &meta); err != nil { - b.Fatal(err) - } - for k := range meta.Labels { - delete(meta.Labels, k) - } - } -} - -func TestDecodeMetadataInvalid(t *testing.T) { - err := DecodeMetadata(nil, false, &model.Metadata{}) - require.EqualError(t, err, "failed to validate metadata: error validating JSON: input missing") - - err = DecodeMetadata("", false, &model.Metadata{}) - require.EqualError(t, err, "failed to validate metadata: error validating JSON: invalid input type") - - // baseInput holds the minimal valid input. Test-specific input is added to this. - baseInput := map[string]interface{}{ - "service": map[string]interface{}{ - "agent": map[string]interface{}{"name": "go", "version": "1.0.0"}, - "name": "name", - }, - } - require.NoError(t, DecodeMetadata(baseInput, false, &model.Metadata{})) - - for _, test := range []struct { - input map[string]interface{} - err string - }{ - { - input: map[string]interface{}{"service": 123}, - err: "service.*expected object, but got number", - }, - { - input: map[string]interface{}{"system": 123}, - err: "system.*expected object or null, but got number", - }, - { - input: map[string]interface{}{"process": 123}, - err: "process.*expected object or null, but got number", - }, - { - input: map[string]interface{}{"user": 123}, - err: "user.*expected object or null, but got number", - }, - { - input: map[string]interface{}{"cloud": 123}, - err: "cloud.*expected object or null, but got number", - }, - { - input: map[string]interface{}{"cloud": map[string]interface{}{}}, - err: `cloud.*missing properties: "provider"`, - }, - } { - input := make(map[string]interface{}) - for k, v := range baseInput { - input[k] = v - } - for k, v := range test.input { - if v == nil { - delete(input, k) - } else { - input[k] = v - } - } - err = DecodeMetadata(input, false, &model.Metadata{}) - require.Error(t, err) - assert.Regexp(t, test.err, err.Error()) - } - -} diff --git a/model/modeldecoder/metricset.go b/model/modeldecoder/metricset.go deleted file mode 100644 index 07cb9bcaae4..00000000000 --- a/model/modeldecoder/metricset.go +++ /dev/null @@ -1,128 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "github.com/santhosh-tekuri/jsonschema" - - "github.com/pkg/errors" - - "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/model/metricset/generated/schema" - "github.com/elastic/apm-server/model/modeldecoder/field" - "github.com/elastic/apm-server/utility" - "github.com/elastic/apm-server/validation" -) - -const ( - transactionKey = "transaction" - spanKey = "span" -) - -var ( - metricsetSchema = validation.CreateSchema(schema.ModelSchema, "metricset") - rumV3Schema = validation.CreateSchema(schema.RUMV3Schema, "metricset") -) - -// DecodeRUMV2Metricset decodes a v2 RUM metricset. -func DecodeRUMV2Metricset(input Input, batch *model.Batch) error { - // Identical to backend agent metricsets. - return DecodeMetricset(input, batch) -} - -// DecodeMetricset decodes a v2 metricset. -func DecodeMetricset(input Input, batch *model.Batch) error { - metricset, err := decodeMetricset(input, metricsetSchema) - if err != nil { - return err - } - batch.Metricsets = append(batch.Metricsets, metricset) - return nil -} - -// DecodeRUMV3Metricset decodes a v3 RUM metricset. -func DecodeRUMV3Metricset(input Input, batch *model.Batch) error { - metricset, err := decodeMetricset(input, rumV3Schema) - if err != nil { - return err - } - batch.Metricsets = append(batch.Metricsets, metricset) - return nil -} - -func decodeMetricset(input Input, schema *jsonschema.Schema) (*model.Metricset, error) { - raw, err := validation.ValidateObject(input.Raw, schema) - if err != nil { - return nil, errors.Wrap(err, "failed to validate metricset") - } - - md := metricsetDecoder{&utility.ManualDecoder{}} - fieldName := field.Mapper(input.Config.HasShortFieldNames) - - e := model.Metricset{ - Timestamp: md.TimeEpochMicro(raw, "timestamp"), - Metadata: input.Metadata, - } - md.decodeSamples(getObject(raw, fieldName("samples")), input.Config.HasShortFieldNames, &e.Samples) - md.decodeTransaction(getObject(raw, fieldName(transactionKey)), input.Config.HasShortFieldNames, &e.Transaction) - md.decodeSpan(getObject(raw, fieldName(spanKey)), input.Config.HasShortFieldNames, &e.Span) - - if md.Err != nil { - return nil, md.Err - } - - if tags := utility.Prune(md.MapStr(raw, fieldName("tags"))); len(tags) > 0 { - e.Labels = tags - } - if e.Timestamp.IsZero() { - e.Timestamp = input.RequestTime - } - - return &e, nil -} - -type metricsetDecoder struct { - *utility.ManualDecoder -} - -func (md *metricsetDecoder) decodeSamples(input map[string]interface{}, hasShortFieldNames bool, out *[]model.Sample) { - fieldName := field.Mapper(hasShortFieldNames) - inverseFieldName := field.InverseMapper(hasShortFieldNames) - - valueFieldName := fieldName("value") - for name, s := range input { - sampleObj, _ := s.(map[string]interface{}) - sample := model.Sample{Name: inverseFieldName(name)} - // TODO(axw) add support for ingesting counts/values (histogram metrics) - decodeFloat64(sampleObj, valueFieldName, &sample.Value) - *out = append(*out, sample) - } -} - -func (md *metricsetDecoder) decodeSpan(input map[string]interface{}, hasShortFieldNames bool, out *model.MetricsetSpan) { - fieldName := field.Mapper(hasShortFieldNames) - decodeString(input, fieldName("type"), &out.Type) - decodeString(input, fieldName("subtype"), &out.Subtype) -} - -func (md *metricsetDecoder) decodeTransaction(input map[string]interface{}, hasShortFieldNames bool, out *model.MetricsetTransaction) { - fieldName := field.Mapper(hasShortFieldNames) - decodeString(input, fieldName("type"), &out.Type) - decodeString(input, fieldName("name"), &out.Name) - // TODO(axw) add support for ingesting transaction.result, transaction.root -} diff --git a/model/modeldecoder/metricset_test.go b/model/modeldecoder/metricset_test.go deleted file mode 100644 index 86413f0e9de..00000000000 --- a/model/modeldecoder/metricset_test.go +++ /dev/null @@ -1,180 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "encoding/json" - "fmt" - "testing" - "time" - - "github.com/elastic/beats/v7/libbeat/common" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/apm-server/model" -) - -// assertMetricsMatch is an equality test for a metricset as sample order is not important -func assertMetricsetsMatch(t *testing.T, expected, actual *model.Metricset) bool { - samplesMatch := assert.ElementsMatch(t, expected.Samples, actual.Samples) - expected.Samples = nil - actual.Samples = nil - nonSamplesMatch := assert.Equal(t, expected, actual) - - return assert.True(t, samplesMatch && nonSamplesMatch, - fmt.Sprintf("metrics mismatch\nexpected:%#v\n actual:%#v", expected, actual)) -} - -func TestDecode(t *testing.T) { - tsFormat := func(ts time.Time) interface{} { - return json.Number(fmt.Sprintf("%d", ts.UnixNano()/1000)) - } - timestampParsed := time.Date(2017, 5, 30, 18, 53, 27, 154*1e6, time.UTC) - requestTime := time.Now() - spType, spSubtype, trType, trName := "db", "sql", "request", "GET /" - metadata := model.Metadata{ - Service: model.Service{Name: "myservice"}, - } - - for _, test := range []struct { - input map[string]interface{} - err bool - metricset *model.Metricset - }{ - {input: nil, metricset: nil}, - { - input: map[string]interface{}{}, - metricset: nil, - }, - { - input: map[string]interface{}{ - "timestamp": tsFormat(timestampParsed), - "samples": map[string]interface{}{}, - }, - metricset: &model.Metricset{ - Metadata: metadata, - Timestamp: timestampParsed, - }, - }, - { - input: map[string]interface{}{ - "timestamp": tsFormat(timestampParsed), - "samples": map[string]interface{}{ - "invalid.metric": map[string]interface{}{ - "value": "foo", - }, - }, - }, - err: true, - }, - { - input: map[string]interface{}{ - "samples": map[string]interface{}{}, - }, - metricset: &model.Metricset{ - Metadata: metadata, - Timestamp: requestTime, - }, - }, - { - input: map[string]interface{}{ - "tags": map[string]interface{}{ - "atag": true, - }, - "timestamp": tsFormat(timestampParsed), - "samples": map[string]interface{}{ - "a.counter": map[string]interface{}{ - "value": json.Number("612"), - }, - "some.gauge": map[string]interface{}{ - "value": json.Number("9.16"), - }, - }, - }, - metricset: &model.Metricset{ - Metadata: metadata, - Samples: []model.Sample{ - { - Name: "some.gauge", - Value: 9.16, - }, - { - Name: "a.counter", - Value: 612, - }, - }, - Labels: common.MapStr{ - "atag": true, - }, - Timestamp: timestampParsed, - }, - }, - { - input: map[string]interface{}{ - "tags": map[string]interface{}{ - "atag": true, - }, - "timestamp": tsFormat(timestampParsed), - "samples": map[string]interface{}{ - "a.counter": map[string]interface{}{ - "value": json.Number("612"), - }, - }, - "span": map[string]interface{}{ - "type": spType, - "subtype": spSubtype, - }, - "transaction": map[string]interface{}{ - "type": trType, - "name": trName, - }, - }, - metricset: &model.Metricset{ - Metadata: metadata, - Samples: []model.Sample{ - { - Name: "a.counter", - Value: 612, - }, - }, - Labels: common.MapStr{ - "atag": true, - }, - Span: model.MetricsetSpan{Type: spType, Subtype: spSubtype}, - Transaction: model.MetricsetTransaction{Type: trType, Name: trName}, - Timestamp: timestampParsed, - }, - }, - } { - batch := &model.Batch{} - err := DecodeMetricset(Input{ - Raw: test.input, - RequestTime: requestTime, - Metadata: metadata, - }, batch) - if test.err == true { - assert.Error(t, err) - } - if test.metricset != nil { - want := test.metricset - got := batch.Metricsets[0] - assertMetricsetsMatch(t, want, got) - } - } -} diff --git a/model/modeldecoder/modeldecoder.go b/model/modeldecoder/modeldecoder.go new file mode 100644 index 00000000000..0476c18a9a9 --- /dev/null +++ b/model/modeldecoder/modeldecoder.go @@ -0,0 +1,67 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modeldecoder + +import ( + "regexp" + + "github.com/pkg/errors" +) + +// DecoderError represents an error due to JSON decoding. +type DecoderError struct { + err error +} + +func (e DecoderError) Error() string { + return errors.Wrap(e.err, "decode error").Error() +} + +func (e *DecoderError) Unwrap() error { + return e.err +} + +// ValidationError represents an error due to JSON validation. +type ValidationError struct { + err error +} + +func (e ValidationError) Error() string { + return errors.Wrap(e.err, "validation error").Error() +} + +func (e *ValidationError) Unwrap() error { + return e.err +} + +var jsoniterErrRegexp = regexp.MustCompile(` but found .*error found in .* bigger context.*`) + +// NewDecoderErrFromJSONIter returns a DecoderError where +// any text from the original input is stripped, +// when decoded via jsoniter. +func NewDecoderErrFromJSONIter(err error) DecoderError { + if jsoniterErrRegexp.MatchString(err.Error()) { + err = errors.New(jsoniterErrRegexp.ReplaceAllString(err.Error(), "")) + } + return DecoderError{err} +} + +// NewValidationErr returns a ValidationError +func NewValidationErr(err error) ValidationError { + return ValidationError{err} +} diff --git a/model/modeldecoder/modeldecodertest/populator.go b/model/modeldecoder/modeldecodertest/populator.go index b49fa864174..c74cb11e2d6 100644 --- a/model/modeldecoder/modeldecodertest/populator.go +++ b/model/modeldecoder/modeldecodertest/populator.go @@ -19,51 +19,213 @@ package modeldecodertest import ( "fmt" + "net" + "net/http" "reflect" "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" - "github.com/elastic/apm-server/model/modeldecoder/nullable" "github.com/elastic/beats/v7/libbeat/common" + + "github.com/elastic/apm-server/model/modeldecoder/nullable" ) +// Values used for populating the model structs +type Values struct { + Str string + Int int + Float float64 + Bool bool + Time time.Time + Duration time.Duration + IP net.IP + HTTPHeader http.Header + // N controls how many elements are added to a slice or a map + N int +} + +// DefaultValues returns a Values struct initialized with non-zero values +func DefaultValues() *Values { + initTime, _ := time.Parse(time.RFC3339, "2020-10-10T10:00:00Z") + return &Values{ + Str: "init", + Int: 1, + Float: 0.5, + Bool: true, + Time: initTime, + Duration: time.Second, + IP: net.ParseIP("127.0.0.1"), + HTTPHeader: http.Header{http.CanonicalHeaderKey("user-agent"): []string{"a", "b", "c"}}, + N: 3, + } +} + +// NonDefaultValues returns a Values struct initialized with non-zero values +func NonDefaultValues() *Values { + updatedTime, _ := time.Parse(time.RFC3339, "2020-12-10T10:00:00Z") + return &Values{ + Str: "overwritten", + Int: 12, + Float: 3.5, + Bool: false, + Time: updatedTime, + Duration: time.Minute, + IP: net.ParseIP("192.168.0.1"), + HTTPHeader: http.Header{http.CanonicalHeaderKey("user-agent"): []string{"d", "e"}}, + N: 2, + } +} + +// Update arbitrary values +func (v *Values) Update(args ...interface{}) { + for _, arg := range args { + switch a := arg.(type) { + case string: + v.Str = a + case int: + v.Int = a + case float64: + v.Float = a + case bool: + v.Bool = a + case time.Time: + v.Time = a + case net.IP: + v.IP = a + case http.Header: + v.HTTPHeader = a + default: + panic(fmt.Sprintf("Values Merge: value type for %v not implemented", a)) + } + } +} + // InitStructValues iterates through the struct fields represented by // the given reflect.Value and initializes all fields with // some arbitrary value. func InitStructValues(i interface{}) { - SetStructValues(i, "initialized", 1) + SetStructValues(i, DefaultValues()) } +// SetStructValuesOption is the type of an option which may be passed into +// SetStructValues to override the value to which a field is set. If the +// option returns false, then the field will not be updated and no more options +// will be invoked. +type SetStructValuesOption func(key string, field, value reflect.Value) bool + // SetStructValues iterates through the struct fields represented by -// the given reflect.Value and initializes all fields with -// the given values for strings and integers. -func SetStructValues(in interface{}, vStr string, vInt int) { +// the given reflect.Value and initializes all fields with the provided values +func SetStructValues(in interface{}, values *Values, opts ...SetStructValuesOption) { IterateStruct(in, func(f reflect.Value, key string) { - var newVal interface{} - switch v := f.Interface().(type) { - case map[string]interface{}: - newVal = map[string]interface{}{vStr: vStr} - case common.MapStr: - newVal = common.MapStr{vStr: vStr} - case []string: - newVal = []string{vStr} - case []int: - newVal = []int{vInt, vInt} - case nullable.String: - v.Set(vStr) - newVal = v - case nullable.Int: - v.Set(vInt) - newVal = v - case nullable.Interface: - v.Set(vStr) - newVal = v + fieldVal := f + switch fKind := f.Kind(); fKind { + case reflect.String: + fieldVal = reflect.ValueOf(values.Str) + case reflect.Int, reflect.Int64: + fieldVal = reflect.ValueOf(values.Int).Convert(f.Type()) + case reflect.Slice: + var elemVal reflect.Value + switch v := f.Interface().(type) { + case []string: + elemVal = reflect.ValueOf(values.Str) + case []int: + elemVal = reflect.ValueOf(values.Int) + case []int64: + elemVal = reflect.ValueOf(int64(values.Int)) + case []float64: + elemVal = reflect.ValueOf(values.Float) + case net.IP: + fieldVal = reflect.ValueOf(values.IP) + default: + if f.Type().Elem().Kind() != reflect.Struct { + panic(fmt.Sprintf("unhandled type %s for key %s", v, key)) + } + elemVal = reflect.Zero(f.Type().Elem()) + } + if elemVal.IsValid() { + fieldVal = fieldVal.Slice(0, 0) + for i := 0; i < values.N; i++ { + fieldVal = reflect.Append(fieldVal, elemVal) + } + } + case reflect.Map: + fieldVal = reflect.MakeMapWithSize(f.Type(), values.N) + var elemVal reflect.Value + switch v := f.Interface().(type) { + case map[string]interface{}, common.MapStr: + elemVal = reflect.ValueOf(values.Str) + case map[string]float64: + elemVal = reflect.ValueOf(values.Float) + default: + if f.Type().Elem().Kind() != reflect.Struct { + panic(fmt.Sprintf("unhandled type %s for key %s", v, key)) + } + elemVal = reflect.Zero(f.Type().Elem()) + } + for i := 0; i < values.N; i++ { + fieldVal.SetMapIndex(reflect.ValueOf(fmt.Sprintf("%s%v", values.Str, i)), elemVal) + } + case reflect.Struct: + switch v := f.Interface().(type) { + case nullable.String: + v.Set(values.Str) + fieldVal = reflect.ValueOf(v) + case nullable.Int: + v.Set(values.Int) + fieldVal = reflect.ValueOf(v) + case nullable.Interface: + if strings.Contains(key, "port") { + v.Set(values.Int) + } else { + v.Set(values.Str) + } + fieldVal = reflect.ValueOf(v) + case nullable.Bool: + v.Set(values.Bool) + fieldVal = reflect.ValueOf(v) + case nullable.Float64: + v.Set(values.Float) + fieldVal = reflect.ValueOf(v) + case nullable.TimeMicrosUnix: + v.Set(values.Time) + fieldVal = reflect.ValueOf(v) + case nullable.HTTPHeader: + v.Set(values.HTTPHeader.Clone()) + fieldVal = reflect.ValueOf(v) + default: + if f.IsZero() { + fieldVal = reflect.Zero(f.Type()) + } else { + return + } + } + case reflect.Ptr: + if f.IsNil() { + fieldVal = reflect.Zero(f.Type()) + } + return default: - if f.Type().Kind() == reflect.Struct { - return + fieldVal = reflect.Value{} + } + + // Run through options, giving an opportunity to disable + // setting the field, or change the value. + setField := true + for _, opt := range opts { + if !opt(key, f, fieldVal) { + setField = false + break } - panic(fmt.Sprintf("unhandled type %T for key %s", f.Type().Kind(), key)) } - f.Set(reflect.ValueOf(newVal)) + if setField { + if !fieldVal.IsValid() { + panic(fmt.Sprintf("unhandled type %s for key %s", f.Kind(), key)) + } + f.Set(fieldVal) + } }) } @@ -87,6 +249,84 @@ func SetZeroStructValue(i interface{}, callback func(string)) { }) } +// AssertStructValues recursively walks through the given struct and asserts +// that values are equal to expected values +func AssertStructValues(t *testing.T, i interface{}, isException func(string) bool, + values *Values) { + IterateStruct(i, func(f reflect.Value, key string) { + if isException(key) { + return + } + fVal := f.Interface() + var newVal interface{} + switch fVal.(type) { + case map[string]interface{}: + m := map[string]interface{}{} + for i := 0; i < values.N; i++ { + m[fmt.Sprintf("%s%v", values.Str, i)] = values.Str + } + newVal = m + case common.MapStr: + m := common.MapStr{} + for i := 0; i < values.N; i++ { + m.Put(fmt.Sprintf("%s%v", values.Str, i), values.Str) + } + newVal = m + case []string: + m := make([]string, values.N) + for i := 0; i < values.N; i++ { + m[i] = values.Str + } + newVal = m + case string: + newVal = values.Str + case *string: + newVal = &values.Str + case int: + newVal = values.Int + case int64: + newVal = int64(values.Int) + case *int: + newVal = &values.Int + case float64: + newVal = values.Float + case *float64: + val := values.Float + newVal = &val + case net.IP: + newVal = values.IP + case bool: + newVal = values.Bool + case *bool: + newVal = &values.Bool + case http.Header: + newVal = values.HTTPHeader + case time.Time: + newVal = values.Time + case time.Duration: + newVal = values.Duration + default: + // the populator recursively iterates over struct and structPtr + // calling this function for all fields; + // it is enough to only assert they are not zero here + if f.Type().Kind() == reflect.Struct { + assert.NotZero(t, fVal, key) + return + } + if f.Type().Kind() == reflect.Ptr && f.Type().Elem().Kind() == reflect.Struct { + assert.NotZero(t, fVal, key) + return + } + if f.Type().Kind() == reflect.Map || f.Type().Kind() == reflect.Slice { + assert.NotZero(t, fVal, key) + return + } + panic(fmt.Sprintf("unhandled type %s for key %s", f.Type(), key)) + } + assert.Equal(t, newVal, fVal, key) + }) +} + // IterateStruct iterates through the struct fields represented by // the given reflect.Value and calls the given function on every field. func IterateStruct(i interface{}, fn func(reflect.Value, string)) { @@ -100,7 +340,7 @@ func IterateStruct(i interface{}, fn func(reflect.Value, string)) { func iterateStruct(v reflect.Value, key string, fn func(f reflect.Value, fKey string)) { t := v.Type() if t.Kind() != reflect.Struct { - panic(fmt.Sprintf("iterateStruct: invalid typ %T", t.Kind())) + panic(fmt.Sprintf("iterateStruct: invalid type %s", t.Kind())) } if key != "" { key += "." @@ -111,6 +351,7 @@ func iterateStruct(v reflect.Value, key string, fn func(f reflect.Value, fKey st if !f.CanSet() { continue } + stf := t.Field(i) fTyp := stf.Type name := jsonName(stf) @@ -119,14 +360,50 @@ func iterateStruct(v reflect.Value, key string, fn func(f reflect.Value, fKey st } fKey = fmt.Sprintf("%s%s", key, name) - if fTyp.Kind() == reflect.Struct { + // call the given function with every field + fn(f, fKey) + // check field type for recursive iteration + switch f.Kind() { + case reflect.Ptr: + if !f.IsZero() && fTyp.Elem().Kind() == reflect.Struct { + iterateStruct(f.Elem(), fKey, fn) + } + case reflect.Struct: switch f.Interface().(type) { - case nullable.String, nullable.Int, nullable.Interface: + case nullable.String, nullable.Int, nullable.Bool, nullable.Float64, + nullable.Interface, nullable.HTTPHeader, nullable.TimeMicrosUnix: default: iterateStruct(f, fKey, fn) } + case reflect.Map: + if f.Type().Elem().Kind() != reflect.Struct { + continue + } + iter := f.MapRange() + for iter.Next() { + mKey := iter.Key() + mVal := iter.Value() + ptr := reflect.New(mVal.Type()) + ptr.Elem().Set(mVal) + iterateStruct(ptr.Elem(), fmt.Sprintf("%s.[%s]", fKey, mKey), fn) + f.SetMapIndex(mKey, ptr.Elem()) + } + case reflect.Slice, reflect.Array: + if v.Type() == f.Type().Elem() { + continue + } + for j := 0; j < f.Len(); j++ { + sliceField := f.Index(j) + switch sliceField.Kind() { + case reflect.Struct: + iterateStruct(sliceField, fmt.Sprintf("%s.[%v]", fKey, j), fn) + case reflect.Ptr: + if !sliceField.IsZero() && sliceField.Type().Elem().Kind() == reflect.Struct { + iterateStruct(sliceField.Elem(), fKey, fn) + } + } + } } - fn(f, fKey) } } @@ -138,6 +415,7 @@ func jsonName(f reflect.StructField) string { parts := strings.Split(tag, ",") if len(parts) == 0 { return "" + } return parts[0] } diff --git a/model/modeldecoder/modeldecodertest/strbuilder.go b/model/modeldecoder/modeldecodertest/strbuilder.go new file mode 100644 index 00000000000..434c881433d --- /dev/null +++ b/model/modeldecoder/modeldecodertest/strbuilder.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modeldecodertest + +import "strings" + +// BuildString creates a string conisisting of nRunes runes +func BuildString(nRunes int) string { + return BuildStringWith(nRunes, '⌘') +} + +// BuildStringWith creates a string conisisting of nRunes of the given rune +func BuildStringWith(nRunes int, r rune) string { + return strings.Repeat(string(r), nRunes) +} diff --git a/model/modeldecoder/modeldecodertest/testdata.go b/model/modeldecoder/modeldecodertest/testdata.go new file mode 100644 index 00000000000..301b7eea321 --- /dev/null +++ b/model/modeldecoder/modeldecodertest/testdata.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modeldecodertest + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/decoder" +) + +// DecodeData decodes input from the io.Reader into the given output +// it skips events with a different type than the given eventType +// and decodes the first matching event type +func DecodeData(t *testing.T, r io.Reader, eventType string, out interface{}) { + dec := decoder.NewNDJSONStreamDecoder(r, 300*1024) + var et string + var err error + for et != eventType { + if et, err = readEventType(dec); err != nil { + require.Equal(t, io.EOF, err) + } + } + // decode data + if err = dec.Decode(&out); err != nil { + require.Equal(t, io.EOF, err) + } +} + +// DecodeDataWithReplacement decodes input from the io.Reader and replaces data for the +// given key with the provided newData before decoding into the output +func DecodeDataWithReplacement(t *testing.T, r io.Reader, eventType string, newData string, out interface{}, keys ...string) { + var data map[string]interface{} + DecodeData(t, r, eventType, &data) + // replace data for given key with newData + d := data[eventType].(map[string]interface{}) + for i := 0; i < len(keys)-1; i++ { + key := keys[i] + if _, ok := d[key]; !ok { + d[key] = map[string]interface{}{} + } + d = d[key].(map[string]interface{}) + } + var keyData interface{} + require.NoError(t, json.Unmarshal([]byte(newData), &keyData)) + d[keys[len(keys)-1]] = keyData + + // unmarshal data into struct + b, err := json.Marshal(data[eventType]) + require.NoError(t, err) + require.NoError(t, decoder.NewJSONDecoder(bytes.NewReader(b)).Decode(out)) +} + +func readEventType(d *decoder.NDJSONStreamDecoder) (string, error) { + body, err := d.ReadAhead() + if err != nil && err != io.EOF { + return "", err + } + body = bytes.TrimLeft(body, `{ "`) + end := bytes.Index(body, []byte(`"`)) + if end == -1 { + return "", errors.New("invalid input: " + string(body)) + } + return string(body[0:end]), nil +} diff --git a/model/modeldecoder/modeldecoderutil/exception.go b/model/modeldecoder/modeldecoderutil/exception.go new file mode 100644 index 00000000000..ccc6e24ba24 --- /dev/null +++ b/model/modeldecoder/modeldecoderutil/exception.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modeldecoderutil + +import ( + "encoding/json" + "strconv" +) + +// ExceptionCodeString formats the exception code v as a string. +func ExceptionCodeString(v interface{}) string { + switch v := v.(type) { + case int: + return strconv.Itoa(v) + case float64: + return strconv.Itoa(int(v)) + case string: + return v + case json.Number: + return v.String() + } + return "" +} diff --git a/model/modeldecoder/modeldecoderutil/http.go b/model/modeldecoder/modeldecoderutil/http.go new file mode 100644 index 00000000000..de76026a459 --- /dev/null +++ b/model/modeldecoder/modeldecoderutil/http.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modeldecoderutil + +import ( + "encoding/json" + "net/http" + + "github.com/elastic/beats/v7/libbeat/common" +) + +// HTTPHeadersToMap converts h to a common.MapStr, suitable for +// use in model.HTTP.{Request,Response}.Headers. +func HTTPHeadersToMap(h http.Header) common.MapStr { + if len(h) == 0 { + return nil + } + m := make(common.MapStr, len(h)) + for k, v := range h { + m[k] = v + } + return m +} + +// NormalizeHTTPRequestBody recurses through v, replacing any instance of +// a json.Number with float64. +// +// TODO(axw) define a more restrictive schema for context.request.body +// so this is unnecessary. Agents are unlikely to send numbers, but +// seeing as the schema does not prevent it we need this. +func NormalizeHTTPRequestBody(v interface{}) interface{} { + switch v := v.(type) { + case []interface{}: + for i, elem := range v { + v[i] = NormalizeHTTPRequestBody(elem) + } + if len(v) == 0 { + return nil + } + case map[string]interface{}: + m := v + for k, v := range v { + v := NormalizeHTTPRequestBody(v) + if v != nil { + m[k] = v + } else { + delete(m, k) + } + } + if len(m) == 0 { + return nil + } + case json.Number: + if floatVal, err := v.Float64(); err == nil { + return common.Float(floatVal) + } + } + return v +} diff --git a/model/modeldecoder/modeldecoderutil/labels.go b/model/modeldecoder/modeldecoderutil/labels.go new file mode 100644 index 00000000000..b24efa45094 --- /dev/null +++ b/model/modeldecoder/modeldecoderutil/labels.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modeldecoderutil + +import ( + "encoding/json" + + "github.com/elastic/beats/v7/libbeat/common" +) + +// MergeLabels merges eventLabels onto commonLabels. This is used for +// combining event-specific labels onto (metadata) global labels. +// +// If commonLabels is non-nil, it is first cloned. If commonLabels +// is nil, then eventLabels is cloned. +func MergeLabels(commonLabels, eventLabels common.MapStr) common.MapStr { + if commonLabels == nil { + return eventLabels.Clone() + } + combinedLabels := commonLabels.Clone() + for k, v := range eventLabels { + combinedLabels[k] = v + } + return combinedLabels +} + +// NormalizeLabelValues transforms the values in labels, replacing any +// instance of json.Number with libbeat/common.Float, and returning +// labels. +func NormalizeLabelValues(labels common.MapStr) common.MapStr { + for k, v := range labels { + switch v := v.(type) { + case json.Number: + if floatVal, err := v.Float64(); err == nil { + labels[k] = common.Float(floatVal) + } + } + } + return labels +} diff --git a/model/modeldecoder/modeldecoderutil/metrics.go b/model/modeldecoder/modeldecoderutil/metrics.go new file mode 100644 index 00000000000..53a586aea3e --- /dev/null +++ b/model/modeldecoder/modeldecoderutil/metrics.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modeldecoderutil + +import ( + "time" + + "github.com/elastic/apm-server/model" +) + +// SetInternalMetrics extracts well-known internal metrics from event.Metricset.Samples, +// setting the appropriate field on event.Transaction and event.Span (if non-nil) and +// finally setting event.Metricset.Samples to nil. +// +// Any unknown metrics sent by agents in a metricset with transaction.* set will be +// silently discarded. +func SetInternalMetrics(event *model.APMEvent) { + if event.Transaction == nil { + // Not an internal metricset. + return + } + for k, v := range event.Metricset.Samples { + switch k { + case "transaction.breakdown.count": + event.Transaction.BreakdownCount = int(v.Value) + case "transaction.duration.count": + event.Transaction.AggregatedDuration.Count = int(v.Value) + case "transaction.duration.sum.us": + event.Transaction.AggregatedDuration.Sum = time.Duration(v.Value * 1000) + case "span.self_time.count": + if event.Span != nil { + event.Span.SelfTime.Count = int(v.Value) + } + case "span.self_time.sum.us": + if event.Span != nil { + event.Span.SelfTime.Sum = time.Duration(v.Value * 1000) + } + } + } + event.Metricset.Samples = nil +} diff --git a/model/modeldecoder/nullable/nullable.go b/model/modeldecoder/nullable/nullable.go index b4c5daad3aa..130d172c065 100644 --- a/model/modeldecoder/nullable/nullable.go +++ b/model/modeldecoder/nullable/nullable.go @@ -18,6 +18,9 @@ package nullable import ( + "fmt" + "net/http" + "time" "unsafe" jsoniter "github.com/json-iterator/go" @@ -42,15 +45,78 @@ func init() { (*((*Int)(ptr))).isSet = true } }) + jsoniter.RegisterTypeDecoderFunc("nullable.Float64", func(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + switch iter.WhatIsNext() { + case jsoniter.NilValue: + iter.ReadNil() + default: + (*((*Float64)(ptr))).Val = iter.ReadFloat64() + (*((*Float64)(ptr))).isSet = true + } + }) + jsoniter.RegisterTypeDecoderFunc("nullable.Bool", func(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + switch iter.WhatIsNext() { + case jsoniter.NilValue: + iter.ReadNil() + default: + (*((*Bool)(ptr))).Val = iter.ReadBool() + (*((*Bool)(ptr))).isSet = true + } + }) jsoniter.RegisterTypeDecoderFunc("nullable.Interface", func(ptr unsafe.Pointer, iter *jsoniter.Iterator) { switch iter.WhatIsNext() { case jsoniter.NilValue: iter.ReadNil() default: - (*((*Interface)(ptr))).Val = iter.Read() + v := iter.Read() + (*((*Interface)(ptr))).Val = v (*((*Interface)(ptr))).isSet = true } }) + jsoniter.RegisterTypeDecoderFunc("nullable.TimeMicrosUnix", func(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + switch iter.WhatIsNext() { + case jsoniter.NilValue: + iter.ReadNil() + default: + us := iter.ReadInt64() + s := us / 1000000 + ns := (us - (s * 1000000)) * 1000 + (*((*TimeMicrosUnix)(ptr))).Val = time.Unix(s, ns).UTC() + (*((*TimeMicrosUnix)(ptr))).isSet = true + } + }) + jsoniter.RegisterTypeDecoderFunc("nullable.HTTPHeader", func(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + switch iter.WhatIsNext() { + case jsoniter.NilValue: + iter.ReadNil() + default: + m, ok := iter.Read().(map[string]interface{}) + if !ok { + iter.Error = fmt.Errorf("invalid input for HTTPHeader: %v", m) + } + h := http.Header{} + for key, val := range m { + switch v := val.(type) { + case nil: + case string: + h.Add(key, v) + case []interface{}: + for _, entry := range v { + switch entry := entry.(type) { + case string: + h.Add(key, entry) + default: + iter.Error = fmt.Errorf("invalid input for HTTPHeader: %v", v) + } + } + default: + iter.Error = fmt.Errorf("invalid input for HTTPHeader: %v", v) + } + } + (*((*HTTPHeader)(ptr))).Val = h + (*((*HTTPHeader)(ptr))).isSet = true + } + }) } // String stores a string value and the @@ -103,6 +169,56 @@ func (v *Int) Reset() { v.isSet = false } +// Float64 stores a float64 value and the +// information if the value has been set +type Float64 struct { + Val float64 + isSet bool +} + +// Set sets the value +func (v *Float64) Set(val float64) { + v.Val = val + v.isSet = true +} + +// IsSet is true when decode was called +func (v *Float64) IsSet() bool { + return v.isSet +} + +// Reset sets the Int to it's initial state +// where it is not set and has no value +func (v *Float64) Reset() { + v.Val = 0.0 + v.isSet = false +} + +// Bool stores a bool value and the +// information if the value has been set +type Bool struct { + Val bool + isSet bool +} + +// Set sets the value +func (v *Bool) Set(val bool) { + v.Val = val + v.isSet = true +} + +// IsSet is true when decode was called +func (v *Bool) IsSet() bool { + return v.isSet +} + +// Reset sets the Int to it's initial state +// where it is not set and has no value +func (v *Bool) Reset() { + v.Val = false + v.isSet = false +} + // Interface stores an interface{} value and the // information if the value has been set // @@ -129,3 +245,51 @@ func (v *Interface) Reset() { v.Val = nil v.isSet = false } + +type TimeMicrosUnix struct { + Val time.Time + isSet bool +} + +// Set sets the value +func (v *TimeMicrosUnix) Set(val time.Time) { + v.Val = val + v.isSet = true +} + +// IsSet is true when decode was called +func (v *TimeMicrosUnix) IsSet() bool { + return v.isSet +} + +// Reset sets the Interface to it's initial state +// where it is not set and has no value +func (v *TimeMicrosUnix) Reset() { + v.Val = time.Time{} + v.isSet = false +} + +type HTTPHeader struct { + Val http.Header + isSet bool +} + +// Set sets the value +func (v *HTTPHeader) Set(val http.Header) { + v.Val = val + v.isSet = true +} + +// IsSet is true when decode was called +func (v *HTTPHeader) IsSet() bool { + return v.isSet +} + +// Reset sets the Interface to it's initial state +// where it is not set and has no value +func (v *HTTPHeader) Reset() { + for k := range v.Val { + delete(v.Val, k) + } + v.isSet = false +} diff --git a/model/modeldecoder/nullable/nullable_test.go b/model/modeldecoder/nullable/nullable_test.go index 3c27a8a938a..320b3f8689a 100644 --- a/model/modeldecoder/nullable/nullable_test.go +++ b/model/modeldecoder/nullable/nullable_test.go @@ -18,8 +18,10 @@ package nullable import ( + "net/http" "strings" "testing" + "time" jsoniter "github.com/json-iterator/go" "github.com/stretchr/testify/assert" @@ -27,9 +29,13 @@ import ( ) type testType struct { - S String `json:"s"` - I Int `json:"i"` - V Interface `json:"v"` + S String `json:"s"` + I Int `json:"i"` + F Float64 `json:"f"` + B Bool `json:"b"` + V Interface `json:"v"` + Tms TimeMicrosUnix `json:"tms"` + H HTTPHeader `json:"h"` } var json = jsoniter.ConfigCompatibleWithStandardLibrary @@ -108,6 +114,82 @@ func TestInt(t *testing.T) { } } +func TestFloat64(t *testing.T) { + for _, tc := range []struct { + name string + input string + + val float64 + isSet, fail bool + }{ + {name: "values", input: `{"f":44.89}`, val: 44.89, isSet: true}, + {name: "integer", input: `{"f":44}`, val: 44.00, isSet: true}, + {name: "zero", input: `{"f":0}`, isSet: true}, + {name: "null", input: `{"f":null}`, isSet: false}, + {name: "missing", input: `{}`}, + {name: "invalid", input: `{"f":"1.0.1"}`, fail: true}, + } { + t.Run(tc.name, func(t *testing.T) { + dec := json.NewDecoder(strings.NewReader(tc.input)) + var testStruct testType + err := dec.Decode(&testStruct) + if tc.fail { + require.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tc.isSet, testStruct.F.IsSet()) + assert.Equal(t, tc.val, testStruct.F.Val) + } + + testStruct.F.Reset() + assert.False(t, testStruct.F.IsSet()) + assert.Empty(t, testStruct.F.Val) + + testStruct.F.Set(55.67) + assert.True(t, testStruct.F.IsSet()) + assert.Equal(t, 55.67, testStruct.F.Val) + }) + } +} + +func TestBool(t *testing.T) { + for _, tc := range []struct { + name string + input string + + val bool + isSet, fail bool + }{ + {name: "true", input: `{"b":true}`, val: true, isSet: true}, + {name: "false", input: `{"b":false}`, val: false, isSet: true}, + {name: "null", input: `{"b":null}`, isSet: false}, + {name: "missing", input: `{}`}, + {name: "convert", input: `{"b":1}`, fail: true}, + {name: "invalid", input: `{"b":"1.0.1"}`, fail: true}, + } { + t.Run(tc.name, func(t *testing.T) { + dec := json.NewDecoder(strings.NewReader(tc.input)) + var testStruct testType + err := dec.Decode(&testStruct) + if tc.fail { + require.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tc.isSet, testStruct.B.IsSet()) + assert.Equal(t, tc.val, testStruct.B.Val) + } + + testStruct.B.Reset() + assert.False(t, testStruct.B.IsSet()) + assert.Empty(t, testStruct.B.Val) + + testStruct.B.Set(true) + assert.True(t, testStruct.B.IsSet()) + assert.Equal(t, true, testStruct.B.Val) + }) + } +} + func TestInterface(t *testing.T) { for _, tc := range []struct { name string @@ -141,3 +223,87 @@ func TestInterface(t *testing.T) { }) } } + +func TestTimeMicrosUnix(t *testing.T) { + for _, tc := range []struct { + name string + input string + + val string + isSet, fail bool + }{ + {name: "valid", input: `{"tms":1599996822281000}`, isSet: true, + val: "2020-09-13 11:33:42.281 +0000 UTC"}, + {name: "null", input: `{"tms":null}`, val: time.Time{}.String()}, + {name: "invalid-type", input: `{"tms":""}`, fail: true, isSet: true}, + {name: "invalid-type", input: `{"tms":123.56}`, fail: true, isSet: true}, + {name: "missing", input: `{}`, val: time.Time{}.String()}, + } { + t.Run(tc.name, func(t *testing.T) { + dec := json.NewDecoder(strings.NewReader(tc.input)) + var testStruct testType + err := dec.Decode(&testStruct) + if tc.fail { + require.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tc.isSet, testStruct.Tms.IsSet()) + assert.Equal(t, tc.val, testStruct.Tms.Val.String()) + } + + testStruct.Tms.Reset() + assert.False(t, testStruct.Tms.IsSet()) + assert.Zero(t, testStruct.Tms.Val) + + testStruct.Tms.Set(time.Now()) + assert.True(t, testStruct.Tms.IsSet()) + assert.NotZero(t, testStruct.Tms.Val) + }) + } +} +func TestHTTPHeader(t *testing.T) { + for _, tc := range []struct { + name string + input string + + val http.Header + isSet, fail bool + }{ + {name: "valid", isSet: true, input: ` +{"h":{"content-type":"application/x-ndjson","Authorization":"Bearer 123-token","authorization":"ApiKey 123-api-key","Accept":["text/html", "application/xhtml+xml"]}}`, + val: http.Header{ + "Content-Type": []string{"application/x-ndjson"}, + "Authorization": []string{"ApiKey 123-api-key", "Bearer 123-token"}, + "Accept": []string{"text/html", "application/xhtml+xml"}, + }}, + {name: "valid2", input: `{"h":{"k":["a","b"]}}`, isSet: true, val: http.Header{"K": []string{"a", "b"}}}, + {name: "null", input: `{"h":null}`}, + {name: "invalid-type", input: `{"h":""}`, fail: true, isSet: true}, + {name: "invalid-array", input: `{"h":{"k":["a",23]}}`, isSet: true, fail: true}, + {name: "missing", input: `{}`}, + } { + t.Run(tc.name, func(t *testing.T) { + dec := json.NewDecoder(strings.NewReader(tc.input)) + var testStruct testType + err := dec.Decode(&testStruct) + if tc.fail { + require.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tc.isSet, testStruct.H.IsSet()) + assert.Equal(t, len(tc.val), len(testStruct.H.Val)) + for k, v := range tc.val { + assert.ElementsMatch(t, v, testStruct.H.Val.Values(k)) + } + } + + testStruct.H.Reset() + assert.False(t, testStruct.H.IsSet()) + assert.Empty(t, testStruct.H.Val) + + testStruct.H.Set(http.Header{"Accept": []string{"*/*"}}) + assert.True(t, testStruct.H.IsSet()) + assert.NotEmpty(t, testStruct.H.Val) + }) + } +} diff --git a/model/modeldecoder/process.go b/model/modeldecoder/process.go deleted file mode 100644 index 535be3a7d44..00000000000 --- a/model/modeldecoder/process.go +++ /dev/null @@ -1,45 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import "github.com/elastic/apm-server/model" - -func decodeProcess(input map[string]interface{}, out *model.Process) { - if input == nil { - return - } - - decodeString(input, "title", &out.Title) - decodeInt(input, "pid", &out.Pid) - - var ppid int - if decodeInt(input, "ppid", &ppid) { - // TODO(axw) consider using a negative value as a sentinel - // value for unset ppid, since pids cannot be negative. - out.Ppid = &ppid - } - - if argv, ok := input["argv"].([]interface{}); ok { - out.Argv = out.Argv[:0] - for _, arg := range argv { - if strval, ok := arg.(string); ok { - out.Argv = append(out.Argv, strval) - } - } - } -} diff --git a/model/modeldecoder/process_test.go b/model/modeldecoder/process_test.go deleted file mode 100644 index ab13d3295cd..00000000000 --- a/model/modeldecoder/process_test.go +++ /dev/null @@ -1,46 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/apm-server/model" -) - -func TestProcessDecode(t *testing.T) { - pid, ppid, title, argv := 123, 456, "foo", []string{"a", "b"} - for _, test := range []struct { - input map[string]interface{} - p model.Process - }{ - {input: nil}, - { - input: map[string]interface{}{ - "pid": 123.0, "ppid": 456.0, "title": title, "argv": []interface{}{"a", "b"}, - }, - p: model.Process{Pid: pid, Ppid: &ppid, Title: title, Argv: argv}, - }, - } { - var proc model.Process - decodeProcess(test.input, &proc) - assert.Equal(t, test.p, proc) - } -} diff --git a/model/modeldecoder/rumv3/decoder.go b/model/modeldecoder/rumv3/decoder.go index 440d5c66eb4..c27236a7dfe 100644 --- a/model/modeldecoder/rumv3/decoder.go +++ b/model/modeldecoder/rumv3/decoder.go @@ -19,18 +19,45 @@ package rumv3 import ( "fmt" + "io" + "net/http" + "net/textproto" + "strings" "sync" - - "github.com/elastic/beats/v7/libbeat/common" + "time" "github.com/elastic/apm-server/decoder" "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modeldecoder" + "github.com/elastic/apm-server/model/modeldecoder/modeldecoderutil" + "github.com/elastic/apm-server/model/modeldecoder/nullable" ) -var metadataRootPool = sync.Pool{ - New: func() interface{} { - return &metadataRoot{} - }, +var ( + errorRootPool = sync.Pool{ + New: func() interface{} { + return &errorRoot{} + }, + } + metadataRootPool = sync.Pool{ + New: func() interface{} { + return &metadataRoot{} + }, + } + transactionRootPool = sync.Pool{ + New: func() interface{} { + return &transactionRoot{} + }, + } +) + +func fetchErrorRoot() *errorRoot { + return errorRootPool.Get().(*errorRoot) +} + +func releaseErrorRoot(root *errorRoot) { + root.Reset() + errorRootPool.Put(root) } func fetchMetadataRoot() *metadataRoot { @@ -42,35 +69,242 @@ func releaseMetadataRoot(m *metadataRoot) { metadataRootPool.Put(m) } -// DecodeNestedMetadata uses the given decoder to create the input models, -// then runs the defined validations on the input models -// and finally maps the values fom the input model to the given *model.Metadata instance -func DecodeNestedMetadata(d decoder.Decoder, out *model.Metadata) error { - m := fetchMetadataRoot() - defer releaseMetadataRoot(m) - if err := d.Decode(&m); err != nil { - return fmt.Errorf("decode error %w", err) +func fetchTransactionRoot() *transactionRoot { + return transactionRootPool.Get().(*transactionRoot) +} + +func releaseTransactionRoot(m *transactionRoot) { + m.Reset() + transactionRootPool.Put(m) +} + +// DecodeNestedMetadata decodes metadata from d, updating out. +func DecodeNestedMetadata(d decoder.Decoder, out *model.APMEvent) error { + root := fetchMetadataRoot() + defer releaseMetadataRoot(root) + if err := d.Decode(root); err != nil && err != io.EOF { + return modeldecoder.NewDecoderErrFromJSONIter(err) + } + if err := root.validate(); err != nil { + return modeldecoder.NewValidationErr(err) + } + mapToMetadataModel(&root.Metadata, out) + return nil +} + +// DecodeNestedError decodes an error from d, appending it to batch. +// +// DecodeNestedError should be used when the stream in the decoder contains the `error` key +func DecodeNestedError(d decoder.Decoder, input *modeldecoder.Input, batch *model.Batch) error { + root := fetchErrorRoot() + defer releaseErrorRoot(root) + if err := d.Decode(root); err != nil && err != io.EOF { + return modeldecoder.NewDecoderErrFromJSONIter(err) + } + if err := root.validate(); err != nil { + return modeldecoder.NewValidationErr(err) + } + event := input.Base + mapToErrorModel(&root.Error, &event) + *batch = append(*batch, event) + return nil +} + +// DecodeNestedTransaction a transaction and zero or more nested spans and +// metricsets, appending them to batch. +// +// DecodeNestedTransaction should be used when the decoder contains the `transaction` key +func DecodeNestedTransaction(d decoder.Decoder, input *modeldecoder.Input, batch *model.Batch) error { + root := fetchTransactionRoot() + defer releaseTransactionRoot(root) + if err := d.Decode(root); err != nil && err != io.EOF { + return modeldecoder.NewDecoderErrFromJSONIter(err) + } + if err := root.validate(); err != nil { + return modeldecoder.NewValidationErr(err) + } + + transaction := input.Base + mapToTransactionModel(&root.Transaction, &transaction) + *batch = append(*batch, transaction) + + for _, m := range root.Transaction.Metricsets { + event := input.Base + event.Transaction = &model.Transaction{ + Name: transaction.Transaction.Name, + Type: transaction.Transaction.Type, + } + mapToTransactionMetricsetModel(&m, &event) + *batch = append(*batch, event) } - if err := m.validate(); err != nil { - return fmt.Errorf("validation error %w", err) + + offset := len(*batch) + for _, s := range root.Transaction.Spans { + event := input.Base + mapToSpanModel(&s, &event) + event.Transaction = &model.Transaction{ID: transaction.Transaction.ID} + event.Parent.ID = transaction.Transaction.ID // may be overridden later + event.Trace = transaction.Trace + *batch = append(*batch, event) + } + spans := (*batch)[offset:] + for i, s := range root.Transaction.Spans { + if s.ParentIndex.IsSet() && s.ParentIndex.Val >= 0 && s.ParentIndex.Val < len(spans) { + spans[i].Parent.ID = spans[s.ParentIndex.Val].Span.ID + } } - mapToMetadataModel(&m.Metadata, out) return nil } -func mapToMetadataModel(m *metadata, out *model.Metadata) { +func mapToErrorModel(from *errorEvent, event *model.APMEvent) { + out := &model.Error{} + event.Error = out + event.Processor = model.ErrorProcessor + + // overwrite metadata with event specific information + mapToServiceModel(from.Context.Service, &event.Service) + mapToAgentModel(from.Context.Service.Agent, &event.Agent) + overwriteUserInMetadataModel(from.Context.User, event) + mapToUserAgentModel(from.Context.Request.Headers, &event.UserAgent) + + // map errorEvent specific data + if from.Context.IsSet() { + if len(from.Context.Tags) > 0 { + event.Labels = modeldecoderutil.MergeLabels( + event.Labels, + modeldecoderutil.NormalizeLabelValues(from.Context.Tags), + ) + } + if from.Context.Request.IsSet() { + event.HTTP.Request = &model.HTTPRequest{} + mapToRequestModel(from.Context.Request, event.HTTP.Request) + if from.Context.Request.HTTPVersion.IsSet() { + event.HTTP.Version = from.Context.Request.HTTPVersion.Val + } + } + if from.Context.Response.IsSet() { + event.HTTP.Response = &model.HTTPResponse{} + mapToResponseModel(from.Context.Response, event.HTTP.Response) + } + if from.Context.Page.IsSet() { + if from.Context.Page.URL.IsSet() { + event.URL = model.ParseURL(from.Context.Page.URL.Val, "", "") + } + if from.Context.Page.Referer.IsSet() { + if event.HTTP.Request == nil { + event.HTTP.Request = &model.HTTPRequest{} + } + event.HTTP.Request.Referrer = from.Context.Page.Referer.Val + } + } + if len(from.Context.Custom) > 0 { + out.Custom = modeldecoderutil.NormalizeLabelValues(from.Context.Custom.Clone()) + } + } + if from.Culprit.IsSet() { + out.Culprit = from.Culprit.Val + } + if from.Exception.IsSet() { + out.Exception = &model.Exception{} + mapToExceptionModel(from.Exception, out.Exception) + } + if from.ID.IsSet() { + out.ID = from.ID.Val + } + if from.Log.IsSet() { + log := model.Log{} + if from.Log.Level.IsSet() { + log.Level = from.Log.Level.Val + } + loggerName := "default" + if from.Log.LoggerName.IsSet() { + loggerName = from.Log.LoggerName.Val + + } + log.LoggerName = loggerName + if from.Log.Message.IsSet() { + log.Message = from.Log.Message.Val + } + if from.Log.ParamMessage.IsSet() { + log.ParamMessage = from.Log.ParamMessage.Val + } + if len(from.Log.Stacktrace) > 0 { + log.Stacktrace = make(model.Stacktrace, len(from.Log.Stacktrace)) + mapToStracktraceModel(from.Log.Stacktrace, log.Stacktrace) + } + out.Log = &log + } + if from.ParentID.IsSet() { + event.Parent.ID = from.ParentID.Val + } + if !from.Timestamp.Val.IsZero() { + event.Timestamp = from.Timestamp.Val + } + if from.TraceID.IsSet() { + event.Trace.ID = from.TraceID.Val + } + if from.Transaction.IsSet() { + event.Transaction = &model.Transaction{} + if from.Transaction.Sampled.IsSet() { + event.Transaction.Sampled = from.Transaction.Sampled.Val + } + if from.Transaction.Type.IsSet() { + event.Transaction.Type = from.Transaction.Type.Val + } + if from.TransactionID.IsSet() { + event.Transaction.ID = from.TransactionID.Val + } + } +} + +func mapToExceptionModel(from errorException, out *model.Exception) { + if !from.IsSet() { + return + } + if len(from.Attributes) > 0 { + out.Attributes = from.Attributes.Clone() + } + if from.Code.IsSet() { + out.Code = modeldecoderutil.ExceptionCodeString(from.Code.Val) + } + if len(from.Cause) > 0 { + out.Cause = make([]model.Exception, len(from.Cause)) + for i := 0; i < len(from.Cause); i++ { + var ex model.Exception + mapToExceptionModel(from.Cause[i], &ex) + out.Cause[i] = ex + } + } + if from.Handled.IsSet() { + out.Handled = &from.Handled.Val + } + if from.Message.IsSet() { + out.Message = from.Message.Val + } + if from.Module.IsSet() { + out.Module = from.Module.Val + } + if len(from.Stacktrace) > 0 { + out.Stacktrace = make(model.Stacktrace, len(from.Stacktrace)) + mapToStracktraceModel(from.Stacktrace, out.Stacktrace) + } + if from.Type.IsSet() { + out.Type = from.Type.Val + } +} + +func mapToMetadataModel(m *metadata, out *model.APMEvent) { // Labels if len(m.Labels) > 0 { - out.Labels = common.MapStr{} - out.Labels.Update(m.Labels) + out.Labels = modeldecoderutil.NormalizeLabelValues(m.Labels.Clone()) } // Service if m.Service.Agent.Name.IsSet() { - out.Service.Agent.Name = m.Service.Agent.Name.Val + out.Agent.Name = m.Service.Agent.Name.Val } if m.Service.Agent.Version.IsSet() { - out.Service.Agent.Version = m.Service.Agent.Version.Val + out.Agent.Version = m.Service.Agent.Version.Val } if m.Service.Environment.IsSet() { out.Service.Environment = m.Service.Environment.Val @@ -101,6 +335,9 @@ func mapToMetadataModel(m *metadata, out *model.Metadata) { } // User + if m.User.Domain.IsSet() { + out.User.Domain = fmt.Sprint(m.User.Domain.Val) + } if m.User.ID.IsSet() { out.User.ID = fmt.Sprint(m.User.ID.Val) } @@ -110,4 +347,467 @@ func mapToMetadataModel(m *metadata, out *model.Metadata) { if m.User.Name.IsSet() { out.User.Name = m.User.Name.Val } + + // Network + if m.Network.Connection.Type.IsSet() { + out.Network.Connection.Type = m.Network.Connection.Type.Val + } +} + +func mapToTransactionMetricsetModel(from *transactionMetricset, event *model.APMEvent) { + event.Metricset = &model.Metricset{} + event.Processor = model.MetricsetProcessor + + if from.Span.IsSet() { + event.Span = &model.Span{} + if from.Span.Subtype.IsSet() { + event.Span.Subtype = from.Span.Subtype.Val + } + if from.Span.Type.IsSet() { + event.Span.Type = from.Span.Type.Val + } + } + + if from.Samples.IsSet() { + if event.Transaction != nil { + if value := from.Samples.TransactionDurationCount.Value; value.IsSet() { + event.Transaction.AggregatedDuration.Count = int(value.Val) + } + if value := from.Samples.TransactionDurationSum.Value; value.IsSet() { + event.Transaction.AggregatedDuration.Sum = time.Duration(value.Val * 1000) + } + if value := from.Samples.TransactionBreakdownCount.Value; value.IsSet() { + event.Transaction.BreakdownCount = int(value.Val) + } + } + if event.Span != nil { + if value := from.Samples.SpanSelfTimeCount.Value; value.IsSet() { + event.Span.SelfTime.Count = int(value.Val) + } + if value := from.Samples.SpanSelfTimeSum.Value; value.IsSet() { + event.Span.SelfTime.Sum = time.Duration(value.Val * 1000) + } + } + } +} + +func mapToResponseModel(from contextResponse, out *model.HTTPResponse) { + if from.Headers.IsSet() { + out.Headers = modeldecoderutil.HTTPHeadersToMap(from.Headers.Val.Clone()) + } + if from.StatusCode.IsSet() { + out.StatusCode = from.StatusCode.Val + } + if from.TransferSize.IsSet() { + val := from.TransferSize.Val + out.TransferSize = &val + } + if from.EncodedBodySize.IsSet() { + val := from.EncodedBodySize.Val + out.EncodedBodySize = &val + } + if from.DecodedBodySize.IsSet() { + val := from.DecodedBodySize.Val + out.DecodedBodySize = &val + } +} + +func mapToRequestModel(from contextRequest, out *model.HTTPRequest) { + if from.Method.IsSet() { + out.Method = from.Method.Val + } + if len(from.Env) > 0 { + out.Env = from.Env.Clone() + } + if from.Headers.IsSet() { + out.Headers = modeldecoderutil.HTTPHeadersToMap(from.Headers.Val.Clone()) + } +} + +func mapToServiceModel(from contextService, out *model.Service) { + if from.Environment.IsSet() { + out.Environment = from.Environment.Val + } + if from.Framework.Name.IsSet() { + out.Framework.Name = from.Framework.Name.Val + } + if from.Framework.Version.IsSet() { + out.Framework.Version = from.Framework.Version.Val + } + if from.Language.Name.IsSet() { + out.Language.Name = from.Language.Name.Val + } + if from.Language.Version.IsSet() { + out.Language.Version = from.Language.Version.Val + } + if from.Name.IsSet() { + out.Name = from.Name.Val + } + if from.Runtime.Name.IsSet() { + out.Runtime.Name = from.Runtime.Name.Val + } + if from.Runtime.Version.IsSet() { + out.Runtime.Version = from.Runtime.Version.Val + } + if from.Version.IsSet() { + out.Version = from.Version.Val + } +} + +func mapToAgentModel(from contextServiceAgent, out *model.Agent) { + if from.Name.IsSet() { + out.Name = from.Name.Val + } + if from.Version.IsSet() { + out.Version = from.Version.Val + } +} + +func mapToSpanModel(from *span, event *model.APMEvent) { + out := &model.Span{} + event.Span = out + event.Processor = model.SpanProcessor + + // map span specific data + if !from.Action.IsSet() && !from.Subtype.IsSet() { + sep := "." + typ := strings.Split(from.Type.Val, sep) + out.Type = typ[0] + if len(typ) > 1 { + out.Subtype = typ[1] + if len(typ) > 2 { + out.Action = strings.Join(typ[2:], sep) + } + } + } else { + if from.Action.IsSet() { + out.Action = from.Action.Val + } + if from.Subtype.IsSet() { + out.Subtype = from.Subtype.Val + } + if from.Type.IsSet() { + out.Type = from.Type.Val + } + } + if from.Context.Destination.Address.IsSet() || from.Context.Destination.Port.IsSet() { + if from.Context.Destination.Address.IsSet() { + event.Destination.Address = from.Context.Destination.Address.Val + } + if from.Context.Destination.Port.IsSet() { + event.Destination.Port = from.Context.Destination.Port.Val + } + } + if from.Context.Destination.Service.IsSet() { + service := model.DestinationService{} + if from.Context.Destination.Service.Name.IsSet() { + service.Name = from.Context.Destination.Service.Name.Val + } + if from.Context.Destination.Service.Resource.IsSet() { + service.Resource = from.Context.Destination.Service.Resource.Val + } + if from.Context.Destination.Service.Type.IsSet() { + service.Type = from.Context.Destination.Service.Type.Val + } + out.DestinationService = &service + } + if from.Context.HTTP.IsSet() { + var response model.HTTPResponse + if from.Context.HTTP.Method.IsSet() { + event.HTTP.Request = &model.HTTPRequest{} + event.HTTP.Request.Method = from.Context.HTTP.Method.Val + } + if from.Context.HTTP.StatusCode.IsSet() { + event.HTTP.Response = &response + event.HTTP.Response.StatusCode = from.Context.HTTP.StatusCode.Val + } + if from.Context.HTTP.URL.IsSet() { + event.URL.Original = from.Context.HTTP.URL.Val + } + if from.Context.HTTP.Response.IsSet() { + event.HTTP.Response = &response + if from.Context.HTTP.Response.DecodedBodySize.IsSet() { + val := from.Context.HTTP.Response.DecodedBodySize.Val + event.HTTP.Response.DecodedBodySize = &val + } + if from.Context.HTTP.Response.EncodedBodySize.IsSet() { + val := from.Context.HTTP.Response.EncodedBodySize.Val + event.HTTP.Response.EncodedBodySize = &val + } + if from.Context.HTTP.Response.TransferSize.IsSet() { + val := from.Context.HTTP.Response.TransferSize.Val + event.HTTP.Response.TransferSize = &val + } + } + } + if from.Context.Service.IsSet() { + if from.Context.Service.Name.IsSet() { + event.Service.Name = from.Context.Service.Name.Val + } + } + if len(from.Context.Tags) > 0 { + event.Labels = modeldecoderutil.MergeLabels( + event.Labels, + modeldecoderutil.NormalizeLabelValues(from.Context.Tags), + ) + } + if from.Duration.IsSet() { + duration := time.Duration(from.Duration.Val * float64(time.Millisecond)) + event.Event.Duration = duration + } + if from.ID.IsSet() { + out.ID = from.ID.Val + } + if from.Name.IsSet() { + out.Name = from.Name.Val + } + if from.Outcome.IsSet() { + event.Event.Outcome = from.Outcome.Val + } else { + if from.Context.HTTP.StatusCode.IsSet() { + statusCode := from.Context.HTTP.StatusCode.Val + if statusCode >= http.StatusBadRequest { + event.Event.Outcome = "failure" + } else { + event.Event.Outcome = "success" + } + } else { + event.Event.Outcome = "unknown" + } + } + if from.SampleRate.IsSet() && from.SampleRate.Val > 0 { + out.RepresentativeCount = 1 / from.SampleRate.Val + } + if len(from.Stacktrace) > 0 { + out.Stacktrace = make(model.Stacktrace, len(from.Stacktrace)) + mapToStracktraceModel(from.Stacktrace, out.Stacktrace) + } + if from.Start.IsSet() { + val := from.Start.Val + out.Start = &val + } + if from.Sync.IsSet() { + val := from.Sync.Val + out.Sync = &val + } + if from.Start.IsSet() { + // event.Timestamp is initialized to the time the payload was + // received by apm-server; offset that by "start" milliseconds + // for RUM. + event.Timestamp = event.Timestamp.Add( + time.Duration(float64(time.Millisecond) * from.Start.Val), + ) + } +} + +func mapToStracktraceModel(from []stacktraceFrame, out model.Stacktrace) { + for idx, eventFrame := range from { + fr := model.StacktraceFrame{} + if eventFrame.AbsPath.IsSet() { + fr.AbsPath = eventFrame.AbsPath.Val + } + if eventFrame.Classname.IsSet() { + fr.Classname = eventFrame.Classname.Val + } + if eventFrame.ColumnNumber.IsSet() { + val := eventFrame.ColumnNumber.Val + fr.Colno = &val + } + if eventFrame.ContextLine.IsSet() { + fr.ContextLine = eventFrame.ContextLine.Val + } + if eventFrame.Filename.IsSet() { + fr.Filename = eventFrame.Filename.Val + } + if eventFrame.Function.IsSet() { + fr.Function = eventFrame.Function.Val + } + if eventFrame.LineNumber.IsSet() { + val := eventFrame.LineNumber.Val + fr.Lineno = &val + } + if eventFrame.Module.IsSet() { + fr.Module = eventFrame.Module.Val + } + if len(eventFrame.PostContext) > 0 { + fr.PostContext = make([]string, len(eventFrame.PostContext)) + copy(fr.PostContext, eventFrame.PostContext) + } + if len(eventFrame.PreContext) > 0 { + fr.PreContext = make([]string, len(eventFrame.PreContext)) + copy(fr.PreContext, eventFrame.PreContext) + } + out[idx] = &fr + } +} + +func mapToTransactionModel(from *transaction, event *model.APMEvent) { + out := &model.Transaction{} + event.Transaction = out + event.Processor = model.TransactionProcessor + + // overwrite metadata with event specific information + mapToServiceModel(from.Context.Service, &event.Service) + mapToAgentModel(from.Context.Service.Agent, &event.Agent) + overwriteUserInMetadataModel(from.Context.User, event) + mapToUserAgentModel(from.Context.Request.Headers, &event.UserAgent) + + // map transaction specific data + if from.Context.IsSet() { + if len(from.Context.Custom) > 0 { + out.Custom = modeldecoderutil.NormalizeLabelValues(from.Context.Custom.Clone()) + } + if len(from.Context.Tags) > 0 { + event.Labels = modeldecoderutil.MergeLabels( + event.Labels, + modeldecoderutil.NormalizeLabelValues(from.Context.Tags), + ) + } + if from.Context.Request.IsSet() { + event.HTTP.Request = &model.HTTPRequest{} + mapToRequestModel(from.Context.Request, event.HTTP.Request) + if from.Context.Request.HTTPVersion.IsSet() { + event.HTTP.Version = from.Context.Request.HTTPVersion.Val + } + } + if from.Context.Response.IsSet() { + event.HTTP.Response = &model.HTTPResponse{} + mapToResponseModel(from.Context.Response, event.HTTP.Response) + } + if from.Context.Page.IsSet() { + if from.Context.Page.URL.IsSet() { + event.URL = model.ParseURL(from.Context.Page.URL.Val, "", "") + } + if from.Context.Page.Referer.IsSet() { + if event.HTTP.Request == nil { + event.HTTP.Request = &model.HTTPRequest{} + } + event.HTTP.Request.Referrer = from.Context.Page.Referer.Val + } + } + } + if from.Duration.IsSet() { + duration := time.Duration(from.Duration.Val * float64(time.Millisecond)) + event.Event.Duration = duration + } + if from.ID.IsSet() { + out.ID = from.ID.Val + } + if from.Marks.IsSet() { + out.Marks = make(model.TransactionMarks, len(from.Marks.Events)) + for event, val := range from.Marks.Events { + if len(val.Measurements) > 0 { + out.Marks[event] = model.TransactionMark(val.Measurements) + } + } + } + if from.Name.IsSet() { + out.Name = from.Name.Val + } + if from.Outcome.IsSet() { + event.Event.Outcome = from.Outcome.Val + } else { + if from.Context.Response.StatusCode.IsSet() { + statusCode := from.Context.Response.StatusCode.Val + if statusCode >= http.StatusInternalServerError { + event.Event.Outcome = "failure" + } else { + event.Event.Outcome = "success" + } + } else { + event.Event.Outcome = "unknown" + } + } + if from.ParentID.IsSet() { + event.Parent.ID = from.ParentID.Val + } + if from.Result.IsSet() { + out.Result = from.Result.Val + } + + sampled := true + if from.Sampled.IsSet() { + sampled = from.Sampled.Val + } + out.Sampled = sampled + if from.SampleRate.IsSet() { + if from.SampleRate.Val > 0 { + out.RepresentativeCount = 1 / from.SampleRate.Val + } + } else { + out.RepresentativeCount = 1 + } + if from.Session.ID.IsSet() { + event.Session.ID = from.Session.ID.Val + event.Session.Sequence = from.Session.Sequence.Val + } + if from.SpanCount.Dropped.IsSet() { + dropped := from.SpanCount.Dropped.Val + out.SpanCount.Dropped = &dropped + } + if from.SpanCount.Started.IsSet() { + started := from.SpanCount.Started.Val + out.SpanCount.Started = &started + } + if from.TraceID.IsSet() { + event.Trace.ID = from.TraceID.Val + } + if from.Type.IsSet() { + out.Type = from.Type.Val + } + if from.UserExperience.IsSet() { + out.UserExperience = &model.UserExperience{ + CumulativeLayoutShift: -1, + FirstInputDelay: -1, + TotalBlockingTime: -1, + Longtask: model.LongtaskMetrics{Count: -1}, + } + if from.UserExperience.CumulativeLayoutShift.IsSet() { + out.UserExperience.CumulativeLayoutShift = from.UserExperience.CumulativeLayoutShift.Val + } + if from.UserExperience.FirstInputDelay.IsSet() { + out.UserExperience.FirstInputDelay = from.UserExperience.FirstInputDelay.Val + } + if from.UserExperience.TotalBlockingTime.IsSet() { + out.UserExperience.TotalBlockingTime = from.UserExperience.TotalBlockingTime.Val + } + if from.UserExperience.Longtask.IsSet() { + out.UserExperience.Longtask = model.LongtaskMetrics{ + Count: from.UserExperience.Longtask.Count.Val, + Sum: from.UserExperience.Longtask.Sum.Val, + Max: from.UserExperience.Longtask.Max.Val, + } + } + } +} + +func mapToUserAgentModel(from nullable.HTTPHeader, out *model.UserAgent) { + // overwrite userAgent information if available + if from.IsSet() { + if h := from.Val.Values(textproto.CanonicalMIMEHeaderKey("User-Agent")); len(h) > 0 { + out.Original = strings.Join(h, ", ") + } + } +} + +func overwriteUserInMetadataModel(from user, out *model.APMEvent) { + // overwrite User specific values if set + // either populate all User fields or none to avoid mixing + // different user data + if !from.Domain.IsSet() && !from.ID.IsSet() && !from.Email.IsSet() && !from.Name.IsSet() { + return + } + out.User = model.User{} + if from.Domain.IsSet() { + out.User.Domain = fmt.Sprint(from.Domain.Val) + } + if from.ID.IsSet() { + out.User.ID = fmt.Sprint(from.ID.Val) + } + if from.Email.IsSet() { + out.User.Email = from.Email.Val + } + if from.Name.IsSet() { + out.User.Name = from.Name.Val + } } diff --git a/model/modeldecoder/rumv3/decoder_test.go b/model/modeldecoder/rumv3/decoder_test.go deleted file mode 100644 index a9384334396..00000000000 --- a/model/modeldecoder/rumv3/decoder_test.go +++ /dev/null @@ -1,104 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package rumv3 - -import ( - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/beats/v7/libbeat/common" - - "github.com/elastic/apm-server/decoder" - "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/model/modeldecoder/modeldecodertest" -) - -func TestResetModelOnRelease(t *testing.T) { - inp := `{"m":{"se":{"n":"service-a"}}}` - m := fetchMetadataRoot() - require.NoError(t, decoder.NewJSONIteratorDecoder(strings.NewReader(inp)).Decode(m)) - require.True(t, m.IsSet()) - releaseMetadataRoot(m) - assert.False(t, m.IsSet()) -} - -func TestDecodeNestedMetadata(t *testing.T) { - t.Run("decode", func(t *testing.T) { - var out model.Metadata - testMinValidMetadata := `{"m":{"se":{"n":"name","a":{"n":"go","ve":"1.0.0"}}}}` - dec := decoder.NewJSONIteratorDecoder(strings.NewReader(testMinValidMetadata)) - require.NoError(t, DecodeNestedMetadata(dec, &out)) - assert.Equal(t, model.Metadata{Service: model.Service{ - Name: "name", - Agent: model.Agent{Name: "go", Version: "1.0.0"}}}, out) - - err := DecodeNestedMetadata(decoder.NewJSONIteratorDecoder(strings.NewReader(`malformed`)), &out) - require.Error(t, err) - assert.Contains(t, err.Error(), "decode") - }) - - t.Run("validate", func(t *testing.T) { - inp := `{}` - var out model.Metadata - err := DecodeNestedMetadata(decoder.NewJSONIteratorDecoder(strings.NewReader(inp)), &out) - require.Error(t, err) - assert.Contains(t, err.Error(), "validation") - }) - -} - -func TestMappingToModel(t *testing.T) { - expected := func(s string) model.Metadata { - return model.Metadata{ - Service: model.Service{Name: s, Version: s, Environment: s, - Agent: model.Agent{Name: s, Version: s}, - Language: model.Language{Name: s, Version: s}, - Runtime: model.Runtime{Name: s, Version: s}, - Framework: model.Framework{Name: s, Version: s}}, - User: model.User{Name: s, Email: s, ID: s}, - Labels: common.MapStr{s: s}, - } - } - - // setup: - // create initialized modeldecoder and empty model metadata - // map modeldecoder to model metadata and manually set - // enhanced data that are never set by the modeldecoder - var m metadata - modeldecodertest.SetStructValues(&m, "init", 5000) - var modelM model.Metadata - mapToMetadataModel(&m, &modelM) - // iterate through model and assert values are set - assert.Equal(t, expected("init"), modelM) - - // overwrite model metadata with specified Values - // then iterate through model and assert values are overwritten - modeldecodertest.SetStructValues(&m, "overwritten", 12) - mapToMetadataModel(&m, &modelM) - assert.Equal(t, expected("overwritten"), modelM) - - // map an empty modeldecoder metadata to the model - // and assert values are unchanged - modeldecodertest.SetZeroStructValues(&m) - mapToMetadataModel(&m, &modelM) - assert.Equal(t, expected("overwritten"), modelM) - -} diff --git a/model/modeldecoder/rumv3/error_test.go b/model/modeldecoder/rumv3/error_test.go new file mode 100644 index 00000000000..50d2145f11c --- /dev/null +++ b/model/modeldecoder/rumv3/error_test.go @@ -0,0 +1,204 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package rumv3 + +import ( + "net/http" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/decoder" + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modeldecoder" + "github.com/elastic/apm-server/model/modeldecoder/modeldecodertest" + "github.com/elastic/beats/v7/libbeat/common" +) + +func TestResetErrorOnRelease(t *testing.T) { + inp := `{"e":{"id":"tr-a"}}` + root := fetchErrorRoot() + require.NoError(t, decoder.NewJSONDecoder(strings.NewReader(inp)).Decode(root)) + require.True(t, root.IsSet()) + releaseErrorRoot(root) + assert.False(t, root.IsSet()) +} + +func TestDecodeNestedError(t *testing.T) { + t.Run("decode", func(t *testing.T) { + now := time.Now() + eventBase := initializedMetadata() + eventBase.Timestamp = now + input := modeldecoder.Input{Base: eventBase} + str := `{"e":{"id":"a-b-c","timestamp":1599996822281000,"log":{"mg":"abc"}}}` + dec := decoder.NewJSONDecoder(strings.NewReader(str)) + var batch model.Batch + require.NoError(t, DecodeNestedError(dec, &input, &batch)) + require.Len(t, batch, 1) + require.NotNil(t, batch[0].Error) + defaultValues := modeldecodertest.DefaultValues() + defaultValues.Update(time.Unix(1599996822, 281000000).UTC()) + modeldecodertest.AssertStructValues(t, &batch[0], metadataExceptions(), defaultValues) + + // if no timestamp is provided, leave base event timestamp unmodified + input = modeldecoder.Input{Base: eventBase} + str = `{"e":{"id":"a-b-c","log":{"mg":"abc"}}}` + dec = decoder.NewJSONDecoder(strings.NewReader(str)) + batch = model.Batch{} + require.NoError(t, DecodeNestedError(dec, &input, &batch)) + assert.Equal(t, now, batch[0].Timestamp) + + // test decode + err := DecodeNestedError(decoder.NewJSONDecoder(strings.NewReader(`malformed`)), &input, &batch) + require.Error(t, err) + assert.Contains(t, err.Error(), "decode") + }) + + t.Run("validate", func(t *testing.T) { + var batch model.Batch + err := DecodeNestedError(decoder.NewJSONDecoder(strings.NewReader(`{}`)), &modeldecoder.Input{}, &batch) + require.Error(t, err) + assert.Contains(t, err.Error(), "validation") + }) +} + +func TestDecodeMapToErrorModel(t *testing.T) { + t.Run("metadata-overwrite", func(t *testing.T) { + // overwrite defined metadata with event metadata values + var input errorEvent + out := initializedMetadata() + otherVal := modeldecodertest.NonDefaultValues() + modeldecodertest.SetStructValues(&input, otherVal) + mapToErrorModel(&input, &out) + input.Reset() + + // ensure event Metadata are updated where expected + otherVal = modeldecodertest.NonDefaultValues() + userAgent := strings.Join(otherVal.HTTPHeader.Values("User-Agent"), ", ") + assert.Equal(t, userAgent, out.UserAgent.Original) + // do not overwrite client.ip if already set in metadata + ip := modeldecodertest.DefaultValues().IP + assert.Equal(t, ip, out.Client.IP, out.Client.IP.String()) + assert.Equal(t, common.MapStr{ + "init0": "init", "init1": "init", "init2": "init", + "overwritten0": "overwritten", "overwritten1": "overwritten", + }, out.Labels) + // service and user values should be set + modeldecodertest.AssertStructValues(t, &out.Service, metadataExceptions("Node", "Agent.EphemeralID"), otherVal) + modeldecodertest.AssertStructValues(t, &out.User, metadataExceptions(), otherVal) + }) + + t.Run("error-values", func(t *testing.T) { + exceptions := func(key string) bool { + for _, s := range []string{ + // GroupingKey is set by a model processor + "GroupingKey", + // stacktrace original and sourcemap values are set when sourcemapping is applied + "Exception.Stacktrace.Original", + "Exception.Stacktrace.Sourcemap", + "Log.Stacktrace.Original", + "Log.Stacktrace.Sourcemap", + // not set by rumv3 + "Exception.Stacktrace.Vars", + "Log.Stacktrace.Vars", + "Exception.Stacktrace.LibraryFrame", + "Log.Stacktrace.LibraryFrame", + // ExcludeFromGrouping is set when processing the event + "Exception.Stacktrace.ExcludeFromGrouping", + "Log.Stacktrace.ExcludeFromGrouping"} { + if strings.HasPrefix(key, s) { + return true + } + } + return false + } + var input errorEvent + var out1, out2 model.APMEvent + reqTime := time.Now().Add(time.Second) + out1.Timestamp = reqTime + defaultVal := modeldecodertest.DefaultValues() + modeldecodertest.SetStructValues(&input, defaultVal) + mapToErrorModel(&input, &out1) + input.Reset() + modeldecodertest.AssertStructValues(t, out1.Error, exceptions, defaultVal) + + // leave event timestamp unmodified if eventTime is zero + defaultVal.Update(time.Time{}) + out1.Timestamp = reqTime + modeldecodertest.SetStructValues(&input, defaultVal) + mapToErrorModel(&input, &out1) + defaultVal.Update(reqTime) + input.Reset() + modeldecodertest.AssertStructValues(t, out1.Error, exceptions, defaultVal) + + // reuse input model for different event + // ensure memory is not shared by reusing input model + out2.Timestamp = reqTime + otherVal := modeldecodertest.NonDefaultValues() + modeldecodertest.SetStructValues(&input, otherVal) + mapToErrorModel(&input, &out2) + modeldecodertest.AssertStructValues(t, out2.Error, exceptions, otherVal) + modeldecodertest.AssertStructValues(t, out1.Error, exceptions, defaultVal) + }) + + t.Run("page.URL", func(t *testing.T) { + var input errorEvent + input.Context.Page.URL.Set("https://my.site.test:9201") + var out model.APMEvent + mapToErrorModel(&input, &out) + assert.Equal(t, "https://my.site.test:9201", out.URL.Full) + }) + + t.Run("page.referer", func(t *testing.T) { + var input errorEvent + input.Context.Page.Referer.Set("https://my.site.test:9201") + var out model.APMEvent + mapToErrorModel(&input, &out) + assert.Equal(t, "https://my.site.test:9201", out.HTTP.Request.Referrer) + }) + + t.Run("loggerName", func(t *testing.T) { + var input errorEvent + input.Log.Message.Set("log message") + var out model.APMEvent + mapToErrorModel(&input, &out) + require.NotNil(t, out.Error.Log.LoggerName) + assert.Equal(t, "default", out.Error.Log.LoggerName) + }) + + t.Run("http-headers", func(t *testing.T) { + var input errorEvent + input.Context.Request.Headers.Set(http.Header{"a": []string{"b"}, "c": []string{"d", "e"}}) + input.Context.Response.Headers.Set(http.Header{"f": []string{"g"}}) + var out model.APMEvent + mapToErrorModel(&input, &out) + assert.Equal(t, common.MapStr{"a": []string{"b"}, "c": []string{"d", "e"}}, out.HTTP.Request.Headers) + assert.Equal(t, common.MapStr{"f": []string{"g"}}, out.HTTP.Response.Headers) + }) + + t.Run("exception-code", func(t *testing.T) { + var input errorEvent + var out model.APMEvent + input.Exception.Code.Set(123.456) + mapToErrorModel(&input, &out) + assert.Equal(t, "123", out.Error.Exception.Code) + }) +} diff --git a/model/modeldecoder/rumv3/metadata_test.go b/model/modeldecoder/rumv3/metadata_test.go new file mode 100644 index 00000000000..56ba96214c3 --- /dev/null +++ b/model/modeldecoder/rumv3/metadata_test.go @@ -0,0 +1,222 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package rumv3 + +import ( + "fmt" + "net" + "reflect" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/decoder" + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modeldecoder/modeldecodertest" + "github.com/elastic/beats/v7/libbeat/common" +) + +// initializedMetadata returns a model.APMEvent populated with default values +// in the metadata-derived fields. +func initializedMetadata() model.APMEvent { + var input metadata + var out model.APMEvent + modeldecodertest.SetStructValues(&input, modeldecodertest.DefaultValues(), func(key string, field, value reflect.Value) bool { + return key != "Experimental" + }) + mapToMetadataModel(&input, &out) + // initialize values that are not set by input + out.UserAgent = model.UserAgent{Name: "init", Original: "init"} + out.Client.Domain = "init" + out.Client.IP = net.ParseIP("127.0.0.1") + out.Client.Port = 1 + out.Source = model.Source(out.Client) + return out +} + +func metadataExceptions(keys ...string) func(key string) bool { + missing := []string{ + "Agent", + "Child", + "Cloud", + "Container", + "DataStream", + "Destination", + "ECSVersion", + "Experimental", + "HTTP", + "Kubernetes", + "Message", + "Network", + "Observer", + "Parent", + "Process", + "Processor", + "Service.Node", + "Service.Agent.EphemeralID", + "Host", + "Event", + "Session", + "Trace", + "URL", + + // event-specific fields + "Error", + "Metricset", + "ProfileSample", + "Span", + "Transaction", + } + exceptions := append(missing, keys...) + return func(key string) bool { + for _, k := range exceptions { + if strings.HasPrefix(key, k) { + return true + } + } + return false + } +} + +func TestMetadataResetModelOnRelease(t *testing.T) { + inp := `{"m":{"se":{"n":"service-a"}}}` + m := fetchMetadataRoot() + require.NoError(t, decoder.NewJSONDecoder(strings.NewReader(inp)).Decode(m)) + require.True(t, m.IsSet()) + releaseMetadataRoot(m) + assert.False(t, m.IsSet()) +} + +func TestDecodeNestedMetadata(t *testing.T) { + t.Run("decode", func(t *testing.T) { + var out model.APMEvent + testMinValidMetadata := `{"m":{"se":{"n":"name","a":{"n":"go","ve":"1.0.0"}}}}` + dec := decoder.NewJSONDecoder(strings.NewReader(testMinValidMetadata)) + require.NoError(t, DecodeNestedMetadata(dec, &out)) + assert.Equal(t, model.APMEvent{ + Service: model.Service{Name: "name"}, + Agent: model.Agent{Name: "go", Version: "1.0.0"}, + }, out) + + err := DecodeNestedMetadata(decoder.NewJSONDecoder(strings.NewReader(`malformed`)), &out) + require.Error(t, err) + assert.Contains(t, err.Error(), "decode") + }) + + t.Run("validate", func(t *testing.T) { + inp := `{}` + var out model.APMEvent + err := DecodeNestedMetadata(decoder.NewJSONDecoder(strings.NewReader(inp)), &out) + require.Error(t, err) + assert.Contains(t, err.Error(), "validation") + }) + +} + +func TestDecodeMetadataMappingToModel(t *testing.T) { + expected := func(s string, ip net.IP, n int) model.APMEvent { + labels := common.MapStr{} + for i := 0; i < n; i++ { + labels.Put(fmt.Sprintf("%s%v", s, i), s) + } + return model.APMEvent{ + Agent: model.Agent{Name: s, Version: s}, + Service: model.Service{Name: s, Version: s, Environment: s, + Language: model.Language{Name: s, Version: s}, + Runtime: model.Runtime{Name: s, Version: s}, + Framework: model.Framework{Name: s, Version: s}}, + User: model.User{Name: s, Email: s, Domain: s, ID: s}, + Labels: labels, + Network: model.Network{ + Connection: model.NetworkConnection{ + Type: s, + }, + }, + // these values are not set from http headers and + // are not expected change with updated input data + UserAgent: model.UserAgent{Original: "init", Name: "init"}, + Client: model.Client{ + Domain: "init", + IP: net.ParseIP("127.0.0.1"), + Port: 1, + }, + Source: model.Source{ + Domain: "init", + IP: net.ParseIP("127.0.0.1"), + Port: 1, + }, + } + } + + t.Run("overwrite", func(t *testing.T) { + // setup: + // create initialized modeldecoder and empty model metadata + // map modeldecoder to model metadata and manually set + // enhanced data that are never set by the modeldecoder + out := initializedMetadata() + // iterate through model and assert values are set + defaultVal := modeldecodertest.DefaultValues() + assert.Equal(t, expected(defaultVal.Str, defaultVal.IP, defaultVal.N), out) + + // overwrite model metadata with specified Values + // then iterate through model and assert values are overwritten + var input metadata + otherVal := modeldecodertest.NonDefaultValues() + modeldecodertest.SetStructValues(&input, otherVal) + mapToMetadataModel(&input, &out) + assert.Equal(t, expected(otherVal.Str, otherVal.IP, otherVal.N), out) + + // map an empty modeldecoder metadata to the model + // and assert values are unchanged + input.Reset() + modeldecodertest.SetZeroStructValues(&input) + mapToMetadataModel(&input, &out) + assert.Equal(t, expected(otherVal.Str, otherVal.IP, otherVal.N), out) + }) + + t.Run("reused-memory", func(t *testing.T) { + var input metadata + var out1, out2 model.APMEvent + defaultVal := modeldecodertest.DefaultValues() + modeldecodertest.SetStructValues(&input, defaultVal) + mapToMetadataModel(&input, &out1) + // initialize values that are not set by input + out1.UserAgent = model.UserAgent{Name: "init", Original: "init"} + out1.Client.Domain = "init" + out1.Client.IP = net.ParseIP("127.0.0.1") + out1.Client.Port = 1 + out1.Source = model.Source(out1.Client) + assert.Equal(t, expected(defaultVal.Str, defaultVal.IP, defaultVal.N), out1) + + // overwrite model metadata with specified Values + // then iterate through model and assert values are overwritten + otherVal := modeldecodertest.NonDefaultValues() + input.Reset() + modeldecodertest.SetStructValues(&input, otherVal) + mapToMetadataModel(&input, &out2) + out2.UserAgent = model.UserAgent{Name: "init", Original: "init"} + out2.Client.Domain = "init" + out2.Client.IP = net.ParseIP("127.0.0.1") + out2.Client.Port = 1 + out2.Source = model.Source(out2.Client) + assert.Equal(t, expected(otherVal.Str, otherVal.IP, otherVal.N), out2) + assert.Equal(t, expected(defaultVal.Str, defaultVal.IP, defaultVal.N), out1) + }) +} diff --git a/model/modeldecoder/rumv3/model.go b/model/modeldecoder/rumv3/model.go index 346a10204f3..6f194b89b65 100644 --- a/model/modeldecoder/rumv3/model.go +++ b/model/modeldecoder/rumv3/model.go @@ -18,7 +18,7 @@ package rumv3 import ( - "regexp" + "encoding/json" "github.com/elastic/beats/v7/libbeat/common" @@ -26,52 +26,624 @@ import ( ) var ( - alphaNumericExtRegex = regexp.MustCompile("^[a-zA-Z0-9 _-]+$") - labelsRegex = regexp.MustCompile("^[^.*\"]*$") //do not allow '.' '*' '"' + patternAlphaNumericExt = `^[a-zA-Z0-9 _-]+$` + + enumOutcome = []string{"success", "failure", "unknown"} ) +// entry points + +// errorRoot requires an error event to be present +type errorRoot struct { + Error errorEvent `json:"e" validate:"required"` +} + +// metadatatRoot requires a metadata event to be present type metadataRoot struct { Metadata metadata `json:"m" validate:"required"` } +// transactionRoot requires a transaction event to be present +type transactionRoot struct { + Transaction transaction `json:"x" validate:"required"` +} + +// other structs + +type context struct { + // Custom can contain additional metadata to be stored with the event. + // The format is unspecified and can be deeply nested objects. + // The information will not be indexed or searchable in Elasticsearch. + Custom common.MapStr `json:"cu"` + // Page holds information related to the current page and page referers. + // It is only sent from RUM agents. + Page contextPage `json:"p"` + // Response describes the HTTP response information in case the event was + // created as a result of an HTTP request. + Response contextResponse `json:"r"` + // Request describes the HTTP request information in case the event was + // created as a result of an HTTP request. + Request contextRequest `json:"q"` + // Service related information can be sent per event. Information provided + // here will override the more generic information retrieved from metadata, + // missing service fields will be retrieved from the metadata information. + Service contextService `json:"se"` + // Tags are a flat mapping of user-defined tags. Allowed value types are + // string, boolean and number values. Tags are indexed and searchable. + Tags common.MapStr `json:"g" validate:"inputTypesVals=string;bool;number,maxLengthVals=1024"` + // User holds information about the correlated user for this event. If + // user data are provided here, all user related information from metadata + // is ignored, otherwise the metadata's user information will be stored + // with the event. + User user `json:"u"` +} + +type contextPage struct { + // Referer holds the URL of the page that 'linked' to the current page. + Referer nullable.String `json:"rf"` + // URL of the current page + URL nullable.String `json:"url"` +} + +type contextRequest struct { + // Env holds environment variable information passed to the monitored service. + Env common.MapStr `json:"en"` + // Headers includes any HTTP headers sent by the requester. Cookies will + // be taken by headers if supplied. + Headers nullable.HTTPHeader `json:"he"` + // HTTPVersion holds information about the used HTTP version. + HTTPVersion nullable.String `json:"hve" validate:"maxLength=1024"` + // Method holds information about the method of the HTTP request. + Method nullable.String `json:"mt" validate:"required,maxLength=1024"` +} + +type contextResponse struct { + // DecodedBodySize holds the size of the decoded payload. + DecodedBodySize nullable.Float64 `json:"dbs"` + // EncodedBodySize holds the size of the encoded payload. + EncodedBodySize nullable.Float64 `json:"ebs"` + // Headers holds the http headers sent in the http response. + Headers nullable.HTTPHeader `json:"he"` + // StatusCode sent in the http response. + StatusCode nullable.Int `json:"sc"` + // TransferSize holds the total size of the payload. + TransferSize nullable.Float64 `json:"ts"` +} + +type contextService struct { + // Agent holds information about the APM agent capturing the event. + Agent contextServiceAgent `json:"a"` + // Environment in which the monitored service is running, + // e.g. `production` or `staging`. + Environment nullable.String `json:"en" validate:"maxLength=1024"` + // Framework holds information about the framework used in the + // monitored service. + Framework contextServiceFramework `json:"fw"` + // Language holds information about the programming language of the + // monitored service. + Language contextServiceLanguage `json:"la"` + // Name of the monitored service. + Name nullable.String `json:"n" validate:"maxLength=1024,pattern=patternAlphaNumericExt"` + // Runtime holds information about the language runtime running the + // monitored service + Runtime contextServiceRuntime `json:"ru"` + // Version of the monitored service. + Version nullable.String `json:"ve" validate:"maxLength=1024"` +} + +type contextServiceAgent struct { + // Name of the APM agent capturing information. + Name nullable.String `json:"n" validate:"maxLength=1024"` + // Version of the APM agent capturing information. + Version nullable.String `json:"ve" validate:"maxLength=1024"` +} + +type contextServiceFramework struct { + // Name of the used framework + Name nullable.String `json:"n" validate:"maxLength=1024"` + // Version of the used framework + Version nullable.String `json:"ve" validate:"maxLength=1024"` +} + +type contextServiceLanguage struct { + // Name of the used programming language + Name nullable.String `json:"n" validate:"maxLength=1024"` + // Version of the used programming language + Version nullable.String `json:"ve" validate:"maxLength=1024"` +} + +type contextServiceRuntime struct { + // Name of the language runtime + Name nullable.String `json:"n" validate:"maxLength=1024"` + // Version of the language runtime + Version nullable.String `json:"ve" validate:"maxLength=1024"` +} + +type errorEvent struct { + // Context holds arbitrary contextual information for the event. + Context context `json:"c"` + // Culprit identifies the function call which was the primary perpetrator + // of this event. + Culprit nullable.String `json:"cl" validate:"maxLength=1024"` + // Exception holds information about the original error. + // The information is language specific. + Exception errorException `json:"ex"` + // ID holds the hex encoded 128 random bits ID of the event. + ID nullable.String `json:"id" validate:"required,maxLength=1024"` + // Log holds additional information added when the error is logged. + Log errorLog `json:"log"` + // ParentID holds the hex encoded 64 random bits ID of the parent + // transaction or span. + ParentID nullable.String `json:"pid" validate:"requiredIfAny=xid;tid,maxLength=1024"` + // Timestamp holds the recorded time of the event, UTC based and formatted + // as microseconds since Unix epoch. + Timestamp nullable.TimeMicrosUnix `json:"timestamp"` + // TraceID holds the hex encoded 128 random bits ID of the correlated trace. + TraceID nullable.String `json:"tid" validate:"requiredIfAny=xid;pid,maxLength=1024"` + // Transaction holds information about the correlated transaction. + Transaction errorTransactionRef `json:"x"` + // TransactionID holds the hex encoded 64 random bits ID of the correlated + // transaction. + TransactionID nullable.String `json:"xid" validate:"maxLength=1024"` + _ struct{} `validate:"requiredAnyOf=ex;log"` +} + +type errorException struct { + // Attributes of the exception. + Attributes common.MapStr `json:"at"` + // Code that is set when the error happened, e.g. database error code. + Code nullable.Interface `json:"cd" validate:"inputTypes=string;int,maxLength=1024"` + // Cause can hold a collection of error exceptions representing chained + // exceptions. The chain starts with the outermost exception, followed + // by its cause, and so on. + Cause []errorException `json:"ca"` + // Handled indicates whether the error was caught in the code or not. + Handled nullable.Bool `json:"hd"` + // Message contains the originally captured error message. + Message nullable.String `json:"mg"` + // Module describes the exception type's module namespace. + Module nullable.String `json:"mo" validate:"maxLength=1024"` + // Stacktrace information of the captured exception. + Stacktrace []stacktraceFrame `json:"st"` + // Type of the exception. + Type nullable.String `json:"t" validate:"maxLength=1024"` + _ struct{} `validate:"requiredAnyOf=mg;t"` +} + +type errorLog struct { + // Level represents the severity of the recorded log. + Level nullable.String `json:"lv" validate:"maxLength=1024"` + // LoggerName holds the name of the used logger instance. + LoggerName nullable.String `json:"ln" validate:"maxLength=1024"` + // Message of the logged error. In case a parameterized message is captured, + // Message should contain the same information, but with any placeholders + // being replaced. + Message nullable.String `json:"mg" validate:"required"` + // ParamMessage should contain the same information as Message, but with + // placeholders where parameters were logged, e.g. 'error connecting to %s'. + // The string is not interpreted, allowing differnt placeholders per client + // languange. The information might be used to group errors together. + ParamMessage nullable.String `json:"pmg" validate:"maxLength=1024"` + // Stacktrace information of the captured error. + Stacktrace []stacktraceFrame `json:"st"` +} + +type errorTransactionRef struct { + // Sampled indicates whether or not the full information for a transaction + // is captured. If a transaction is unsampled no spans and less context + // information will be reported. + Sampled nullable.Bool `json:"sm"` + // Type expresses the correlated transaction's type as keyword that has + // specific relevance within the service's domain, + // eg: 'request', 'backgroundjob'. + Type nullable.String `json:"t" validate:"maxLength=1024"` +} + type metadata struct { - Labels common.MapStr `json:"l" validate:"patternKeys=labelsRegex,typesVals=string;bool;number,maxVals=1024"` + // Labels are a flat mapping of user-defined tags. Allowed value types are + // string, boolean and number values. Labels are indexed and searchable. + Labels common.MapStr `json:"l" validate:"inputTypesVals=string;bool;number,maxLengthVals=1024"` + // Service metadata about the monitored service. Service metadataService `json:"se" validate:"required"` - User metadataUser `json:"u"` + // User metadata, which can be overwritten on a per event basis. + User user `json:"u"` + // Network holds information about the network over which the + // monitored service is communicating. + Network network `json:"n"` } type metadataService struct { - Agent metadataServiceAgent `json:"a" validate:"required"` - Environment nullable.String `json:"en" validate:"max=1024"` - Framework MetadataServiceFramework `json:"fw"` - Language metadataServiceLanguage `json:"la"` - Name nullable.String `json:"n" validate:"required,max=1024,pattern=alphaNumericExtRegex"` - Runtime metadataServiceRuntime `json:"ru"` - Version nullable.String `json:"ve" validate:"max=1024"` + // Agent holds information about the APM agent capturing the event. + Agent metadataServiceAgent `json:"a" validate:"required"` + // Environment in which the monitored service is running, + // e.g. `production` or `staging`. + Environment nullable.String `json:"en" validate:"maxLength=1024"` + // Framework holds information about the framework used in the + // monitored service. + Framework metadataServiceFramework `json:"fw"` + // Language holds information about the programming language of the + // monitored service. + Language metadataServiceLanguage `json:"la"` + // Name of the monitored service. + Name nullable.String `json:"n" validate:"required,minLength=1,maxLength=1024,pattern=patternAlphaNumericExt"` + // Runtime holds information about the language runtime running the + // monitored service + Runtime metadataServiceRuntime `json:"ru"` + // Version of the monitored service. + Version nullable.String `json:"ve" validate:"maxLength=1024"` } type metadataServiceAgent struct { - Name nullable.String `json:"n" validate:"required,max=1024"` - Version nullable.String `json:"ve" validate:"required,max=1024"` + // Name of the APM agent capturing information. + Name nullable.String `json:"n" validate:"required,minLength=1,maxLength=1024"` + // Version of the APM agent capturing information. + Version nullable.String `json:"ve" validate:"required,maxLength=1024"` } -type MetadataServiceFramework struct { - Name nullable.String `json:"n" validate:"max=1024"` - Version nullable.String `json:"ve" validate:"max=1024"` +type metadataServiceFramework struct { + // Name of the used framework + Name nullable.String `json:"n" validate:"maxLength=1024"` + // Version of the used framework + Version nullable.String `json:"ve" validate:"maxLength=1024"` } type metadataServiceLanguage struct { - Name nullable.String `json:"n" validate:"required,max=1024"` - Version nullable.String `json:"ve" validate:"max=1024"` + // Name of the used programming language + Name nullable.String `json:"n" validate:"required,maxLength=1024"` + // Version of the used programming language + Version nullable.String `json:"ve" validate:"maxLength=1024"` } type metadataServiceRuntime struct { - Name nullable.String `json:"n" validate:"required,max=1024"` - Version nullable.String `json:"ve" validate:"required,max=1024"` + // Name of the language runtime + Name nullable.String `json:"n" validate:"required,maxLength=1024"` + // Name of the language runtime + Version nullable.String `json:"ve" validate:"required,maxLength=1024"` +} + +type network struct { + Connection networkConnection `json:"c"` +} + +type networkConnection struct { + Type nullable.String `json:"t" validate:"maxLength=1024"` +} + +type transactionMetricset struct { + // Samples hold application metrics collected from the agent. + Samples transactionMetricsetSamples `json:"sa" validate:"required"` + // Span holds selected information about the correlated transaction. + Span metricsetSpanRef `json:"y"` +} + +type transactionMetricsetSamples struct { + // TransactionDurationCount is the number of transactions since the last + // report (the delta). The duration of transactions is tracked, which + // allows for the creation of graphs displaying a weighted average. + TransactionDurationCount metricsetSampleValue `json:"xdc"` + // TransactionDurationSum is the sum of all transactions durations in ms + // since the last report (the delta). The duration of transactions is tracked, + // which allows for the creation of graphs displaying a weighted average. + TransactionDurationSum metricsetSampleValue `json:"xds"` + // TransactionBreakdownCount The number of transactions for which breakdown metrics (span.self_time) have been created. As the Java agent tracks the breakdown for both sampled and non-sampled transactions, this metric is equivalent to transaction.duration.count + TransactionBreakdownCount metricsetSampleValue `json:"xbc"` + // SpanSelfTimeCount holds the count of the related spans' self_time. + SpanSelfTimeCount metricsetSampleValue `json:"ysc"` + // SpanSelfTimeSum holds the sum of the related spans' self_time. + SpanSelfTimeSum metricsetSampleValue `json:"yss"` +} + +type metricsetSampleValue struct { + // Value holds the value of a single metric sample. + Value nullable.Float64 `json:"v" validate:"required"` +} + +type metricsetSpanRef struct { + // Subtype is a further sub-division of the type (e.g. postgresql, elasticsearch) + Subtype nullable.String `json:"su" validate:"maxLength=1024"` + // Type expresses the correlated span's type as keyword that has specific + // relevance within the service's domain, eg: 'request', 'backgroundjob'. + Type nullable.String `json:"t" validate:"maxLength=1024"` +} + +type span struct { + // Action holds the specific kind of event within the sub-type represented + // by the span (e.g. query, connect) + Action nullable.String `json:"ac" validate:"maxLength=1024"` + // Context holds arbitrary contextual information for the event. + Context spanContext `json:"c"` + // Duration of the span in milliseconds + Duration nullable.Float64 `json:"d" validate:"required,min=0"` + // ID holds the hex encoded 64 random bits ID of the event. + ID nullable.String `json:"id" validate:"required,maxLength=1024"` + // Name is the generic designation of a span in the scope of a transaction. + Name nullable.String `json:"n" validate:"required,maxLength=1024"` + // Outcome of the span: success, failure, or unknown. Outcome may be one of + // a limited set of permitted values describing the success or failure of + // the span. It can be used for calculating error rates for outgoing requests. + Outcome nullable.String `json:"o" validate:"enum=enumOutcome"` + // ParentIndex is the index of the parent span in the list. Absent when + // the parent is a transaction. + ParentIndex nullable.Int `json:"pi"` + // SampleRate applied to the monitored service at the time where this span + // was recorded. + SampleRate nullable.Float64 `json:"sr"` + // Stacktrace connected to this span event. + Stacktrace []stacktraceFrame `json:"st"` + // Start is the offset relative to the transaction's timestamp identifying + // the start of the span, in milliseconds. + Start nullable.Float64 `json:"s" validate:"required"` + // Subtype is a further sub-division of the type (e.g. postgresql, elasticsearch) + Subtype nullable.String `json:"su" validate:"maxLength=1024"` + // Sync indicates whether the span was executed synchronously or asynchronously. + Sync nullable.Bool `json:"sy"` + // Type holds the span's type, and can have specific keywords + // within the service's domain (eg: 'request', 'backgroundjob', etc) + Type nullable.String `json:"t" validate:"required,maxLength=1024"` +} + +type spanContext struct { + // Destination contains contextual data about the destination of spans + Destination spanContextDestination `json:"dt"` + // HTTP contains contextual information when the span concerns an HTTP request. + HTTP spanContextHTTP `json:"h"` + // Service related information can be sent per span. Information provided + // here will override the more generic information retrieved from metadata, + // missing service fields will be retrieved from the metadata information. + Service spanContextService `json:"se"` + // Tags are a flat mapping of user-defined tags. Allowed value types are + // string, boolean and number values. Tags are indexed and searchable. + Tags common.MapStr `json:"g" validate:"inputTypesVals=string;bool;number,maxLengthVals=1024"` +} + +type spanContextDestination struct { + // Address is the destination network address: + // hostname (e.g. 'localhost'), + // FQDN (e.g. 'elastic.co'), + // IPv4 (e.g. '127.0.0.1') + // IPv6 (e.g. '::1') + Address nullable.String `json:"ad" validate:"maxLength=1024"` + // Port is the destination network port (e.g. 443) + Port nullable.Int `json:"po"` + // Service describes the destination service + Service spanContextDestinationService `json:"se"` +} + +type spanContextDestinationService struct { + // Name is the identifier for the destination service, + // e.g. 'http://elastic.co', 'elasticsearch', 'rabbitmq' + // DEPRECATED: this field will be removed in a future release + Name nullable.String `json:"n" validate:"maxLength=1024"` + // Resource identifies the destination service resource being operated on + // e.g. 'http://elastic.co:80', 'elasticsearch', 'rabbitmq/queue_name' + Resource nullable.String `json:"rc" validate:"required,maxLength=1024"` + // Type of the destination service, e.g. db, elasticsearch. Should + // typically be the same as span.type. + // DEPRECATED: this field will be removed in a future release + Type nullable.String `json:"t" validate:"maxLength=1024"` +} + +type spanContextHTTP struct { + // Method holds information about the method of the HTTP request. + Method nullable.String `json:"mt" validate:"maxLength=1024"` + // Response describes the HTTP response information in case the event was + // created as a result of an HTTP request. + Response spanContextHTTPResponse `json:"r"` + // Deprecated: Use Response.StatusCode instead. + // StatusCode sent in the http response. + StatusCode nullable.Int `json:"sc"` + // URL is the raw url of the correlating HTTP request. + URL nullable.String `json:"url"` +} + +type spanContextHTTPResponse struct { + // DecodedBodySize holds the size of the decoded payload. + DecodedBodySize nullable.Float64 `json:"dbs"` + // EncodedBodySize holds the size of the encoded payload. + EncodedBodySize nullable.Float64 `json:"ebs"` + // TransferSize holds the total size of the payload. + TransferSize nullable.Float64 `json:"ts"` +} + +type spanContextService struct { + // Agent holds information about the APM agent capturing the event. + Agent contextServiceAgent `json:"a"` + // Name of the monitored service. + Name nullable.String `json:"n" validate:"maxLength=1024,pattern=patternAlphaNumericExt"` +} + +type stacktraceFrame struct { + // AbsPath is the absolute path of the frame's file. + AbsPath nullable.String `json:"ap"` + // Classname of the frame. + Classname nullable.String `json:"cn"` + // ColumnNumber of the frame. + ColumnNumber nullable.Int `json:"co"` + // ContextLine is the line from the frame's file. + ContextLine nullable.String `json:"cli"` + // Filename is the relative name of the frame's file. + Filename nullable.String `json:"f" validate:"required"` + // Function represented by the frame. + Function nullable.String `json:"fn"` + // LineNumber of the frame. + LineNumber nullable.Int `json:"li"` + // Module to which the frame belongs to. + Module nullable.String `json:"mo"` + // PostContext is a slice of code lines immediately before the line + // from the frame's file. + PostContext []string `json:"poc"` + // PreContext is a slice of code lines immediately after the line + // from the frame's file. + PreContext []string `json:"prc"` +} + +type transaction struct { + // Context holds arbitrary contextual information for the event. + Context context `json:"c"` + // Duration how long the transaction took to complete, in milliseconds + // with 3 decimal points. + Duration nullable.Float64 `json:"d" validate:"required,min=0"` + // ID holds the hex encoded 64 random bits ID of the event. + ID nullable.String `json:"id" validate:"required,maxLength=1024"` + // Marks capture the timing of a significant event during the lifetime of + // a transaction. Marks are organized into groups and can be set by the + // user or the agent. Marks are only reported by RUM agents. + Marks transactionMarks `json:"k"` + // Metricsets is a collection metrics related to this transaction. + Metricsets []transactionMetricset `json:"me"` + // Name is the generic designation of a transaction in the scope of a + // single service, eg: 'GET /users/:id'. + Name nullable.String `json:"n" validate:"maxLength=1024"` + // Outcome of the transaction with a limited set of permitted values, + // describing the success or failure of the transaction from the service's + // perspective. It is used for calculating error rates for incoming requests. + // Permitted values: success, failure, unknown. + Outcome nullable.String `json:"o" validate:"enum=enumOutcome"` + // ParentID holds the hex encoded 64 random bits ID of the parent + // transaction or span. + ParentID nullable.String `json:"pid" validate:"maxLength=1024"` + // Result of the transaction. For HTTP-related transactions, this should + // be the status code formatted like 'HTTP 2xx'. + Result nullable.String `json:"rt" validate:"maxLength=1024"` + // Sampled indicates whether or not the full information for a transaction + // is captured. If a transaction is unsampled no spans and less context + // information will be reported. + Sampled nullable.Bool `json:"sm"` + // SampleRate applied to the monitored service at the time where this transaction + // was recorded. Allowed values are [0..1]. A SampleRate <1 indicates that + // not all spans are recorded. + SampleRate nullable.Float64 `json:"sr"` + // Session holds optional transaction session information for RUM. + Session transactionSession `json:"ses"` + // SpanCount counts correlated spans. + SpanCount transactionSpanCount `json:"yc" validate:"required"` + // Spans is a collection of spans related to this transaction. + Spans []span `json:"y"` + // TraceID holds the hex encoded 128 random bits ID of the correlated trace. + TraceID nullable.String `json:"tid" validate:"required,maxLength=1024"` + // Type expresses the transaction's type as keyword that has specific + // relevance within the service's domain, eg: 'request', 'backgroundjob'. + Type nullable.String `json:"t" validate:"required,maxLength=1024"` + // UserExperience holds metrics for measuring real user experience. + // This information is only sent by RUM agents. + UserExperience transactionUserExperience `json:"exp"` +} + +type transactionSession struct { + // ID holds a session ID for grouping a set of related transactions. + ID nullable.String `json:"id" validate:"required"` + + // Sequence holds an optional sequence number for a transaction within + // a session. It is not meaningful to compare sequences across two + // different sessions. + Sequence nullable.Int `json:"seq" validate:"min=1"` +} + +type transactionMarks struct { + Events map[string]transactionMarkEvents `json:"-"` +} + +var markEventsLongNames = map[string]string{ + "a": "agent", + "nt": "navigationTiming", +} + +func (m *transactionMarks) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &m.Events); err != nil { + return err + } + for name, val := range m.Events { + nameLong, ok := markEventsLongNames[name] + if !ok { + // there is no long name defined for this event + continue + } + delete(m.Events, name) + m.Events[nameLong] = val + } + return nil +} + +type transactionMarkEvents struct { + Measurements map[string]float64 `json:"-"` +} + +func (m *transactionMarkEvents) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &m.Measurements); err != nil { + return err + } + for name, val := range m.Measurements { + nameLong, ok := markMeasurementsLongNames[name] + if !ok { + // there is no long name defined for this measurement + continue + } + delete(m.Measurements, name) + m.Measurements[nameLong] = val + } + return nil +} + +var markMeasurementsLongNames = map[string]string{ + "ce": "connectEnd", + "cs": "connectStart", + "dc": "domComplete", + "de": "domContentLoadedEventEnd", + "di": "domInteractive", + "dl": "domLoading", + "ds": "domContentLoadedEventStart", + "ee": "loadEventEnd", + "es": "loadEventStart", + "fb": "timeToFirstByte", + "fp": "firstContentfulPaint", + "fs": "fetchStart", + "le": "domainLookupEnd", + "lp": "largestContentfulPaint", + "ls": "domainLookupStart", + "re": "responseEnd", + "rs": "responseStart", + "qs": "requestStart", +} + +type transactionSpanCount struct { + // Dropped is the number of correlated spans that have been dropped by + // the APM agent recording the transaction. + Dropped nullable.Int `json:"dd"` + // Started is the number of correlated spans that are recorded. + Started nullable.Int `json:"sd" validate:"required"` +} + +// userExperience holds real user (browser) experience metrics. +type transactionUserExperience struct { + // CumulativeLayoutShift holds the Cumulative Layout Shift (CLS) metric value, + // or a negative value if CLS is unknown. See https://web.dev/cls/ + CumulativeLayoutShift nullable.Float64 `json:"cls" validate:"min=0"` + // FirstInputDelay holds the First Input Delay (FID) metric value, + // or a negative value if FID is unknown. See https://web.dev/fid/ + FirstInputDelay nullable.Float64 `json:"fid" validate:"min=0"` + // TotalBlockingTime holds the Total Blocking Time (TBT) metric value, + // or a negative value if TBT is unknown. See https://web.dev/tbt/ + TotalBlockingTime nullable.Float64 `json:"tbt" validate:"min=0"` + // Longtask holds longtask duration/count metrics. + Longtask longtaskMetrics `json:"lt"` +} + +type longtaskMetrics struct { + // Count is the total number of of longtasks. + Count nullable.Int `json:"count" validate:"required,min=0"` + // Max longtask duration + Max nullable.Float64 `json:"max" validate:"required,min=0"` + // Sum of longtask durations + Sum nullable.Float64 `json:"sum" validate:"required,min=0"` } -type metadataUser struct { - ID nullable.Interface `json:"id" validate:"max=1024,types=string;int"` - Email nullable.String `json:"em" validate:"max=1024"` - Name nullable.String `json:"un" validate:"max=1024"` +type user struct { + // Domain of the user + Domain nullable.String `json:"ud" validate:"maxLength=1024"` + // ID identifies the logged in user, e.g. can be the primary key of the user + ID nullable.Interface `json:"id" validate:"maxLength=1024,inputTypes=string;int"` + // Email of the user. + Email nullable.String `json:"em" validate:"maxLength=1024"` + // Name of the user. + Name nullable.String `json:"un" validate:"maxLength=1024"` } diff --git a/model/modeldecoder/rumv3/model_generated.go b/model/modeldecoder/rumv3/model_generated.go index b5cf96e5625..9321e5d11c0 100644 --- a/model/modeldecoder/rumv3/model_generated.go +++ b/model/modeldecoder/rumv3/model_generated.go @@ -22,258 +22,1411 @@ package rumv3 import ( "encoding/json" "fmt" + "regexp" "unicode/utf8" + + "github.com/pkg/errors" +) + +var ( + patternAlphaNumericExtRegexp = regexp.MustCompile(patternAlphaNumericExt) ) -func (m *metadataRoot) IsSet() bool { - return m.Metadata.IsSet() +func (val *metadataRoot) IsSet() bool { + return val.Metadata.IsSet() } -func (m *metadataRoot) Reset() { - m.Metadata.Reset() +func (val *metadataRoot) Reset() { + val.Metadata.Reset() } -func (m *metadataRoot) validate() error { - if err := m.Metadata.validate(); err != nil { - return err +func (val *metadataRoot) validate() error { + if err := val.Metadata.validate(); err != nil { + return errors.Wrapf(err, "m") } - if !m.Metadata.IsSet() { + if !val.Metadata.IsSet() { return fmt.Errorf("'m' required") } return nil } -func (m *metadata) IsSet() bool { - return len(m.Labels) > 0 || m.Service.IsSet() || m.User.IsSet() +func (val *metadata) IsSet() bool { + return (len(val.Labels) > 0) || val.Service.IsSet() || val.User.IsSet() || val.Network.IsSet() } -func (m *metadata) Reset() { - for k := range m.Labels { - delete(m.Labels, k) +func (val *metadata) Reset() { + for k := range val.Labels { + delete(val.Labels, k) } - m.Service.Reset() - m.User.Reset() + val.Service.Reset() + val.User.Reset() + val.Network.Reset() } -func (m *metadata) validate() error { - if !m.IsSet() { +func (val *metadata) validate() error { + if !val.IsSet() { return nil } - for k, v := range m.Labels { - if !labelsRegex.MatchString(k) { - return fmt.Errorf("validation rule 'patternKeys(labelsRegex)' violated for 'm.l'") - } + for k, v := range val.Labels { switch t := v.(type) { case nil: case string: if utf8.RuneCountInString(t) > 1024 { - return fmt.Errorf("validation rule 'maxVals(1024)' violated for 'm.l'") + return fmt.Errorf("'l': validation rule 'maxLengthVals(1024)' violated") } case bool: case json.Number: default: - return fmt.Errorf("validation rule 'typesVals(string;bool;number)' violated for 'm.l' for key %s", k) + return fmt.Errorf("'l': validation rule 'inputTypesVals(string;bool;number)' violated for key %s", k) } } - if err := m.Service.validate(); err != nil { - return err + if err := val.Service.validate(); err != nil { + return errors.Wrapf(err, "se") } - if !m.Service.IsSet() { - return fmt.Errorf("'m.se' required") + if !val.Service.IsSet() { + return fmt.Errorf("'se' required") } - if err := m.User.validate(); err != nil { - return err + if err := val.User.validate(); err != nil { + return errors.Wrapf(err, "u") + } + if err := val.Network.validate(); err != nil { + return errors.Wrapf(err, "n") } return nil } -func (m *metadataService) IsSet() bool { - return m.Agent.IsSet() || m.Environment.IsSet() || m.Framework.IsSet() || m.Language.IsSet() || m.Name.IsSet() || m.Runtime.IsSet() || m.Version.IsSet() +func (val *metadataService) IsSet() bool { + return val.Agent.IsSet() || val.Environment.IsSet() || val.Framework.IsSet() || val.Language.IsSet() || val.Name.IsSet() || val.Runtime.IsSet() || val.Version.IsSet() } -func (m *metadataService) Reset() { - m.Agent.Reset() - m.Environment.Reset() - m.Framework.Reset() - m.Language.Reset() - m.Name.Reset() - m.Runtime.Reset() - m.Version.Reset() +func (val *metadataService) Reset() { + val.Agent.Reset() + val.Environment.Reset() + val.Framework.Reset() + val.Language.Reset() + val.Name.Reset() + val.Runtime.Reset() + val.Version.Reset() } -func (m *metadataService) validate() error { - if !m.IsSet() { +func (val *metadataService) validate() error { + if !val.IsSet() { return nil } - if err := m.Agent.validate(); err != nil { - return err + if err := val.Agent.validate(); err != nil { + return errors.Wrapf(err, "a") + } + if !val.Agent.IsSet() { + return fmt.Errorf("'a' required") } - if !m.Agent.IsSet() { - return fmt.Errorf("'m.se.a' required") + if val.Environment.IsSet() && utf8.RuneCountInString(val.Environment.Val) > 1024 { + return fmt.Errorf("'en': validation rule 'maxLength(1024)' violated") } - if utf8.RuneCountInString(m.Environment.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'm.se.en'") + if err := val.Framework.validate(); err != nil { + return errors.Wrapf(err, "fw") } - if err := m.Framework.validate(); err != nil { - return err + if err := val.Language.validate(); err != nil { + return errors.Wrapf(err, "la") } - if err := m.Language.validate(); err != nil { - return err + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'n': validation rule 'maxLength(1024)' violated") } - if utf8.RuneCountInString(m.Name.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'm.se.n'") + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) < 1 { + return fmt.Errorf("'n': validation rule 'minLength(1)' violated") } - if !alphaNumericExtRegex.MatchString(m.Name.Val) { - return fmt.Errorf("validation rule 'pattern(alphaNumericExtRegex)' violated for 'm.se.n'") + if val.Name.Val != "" && !patternAlphaNumericExtRegexp.MatchString(val.Name.Val) { + return fmt.Errorf("'n': validation rule 'pattern(patternAlphaNumericExt)' violated") } - if !m.Name.IsSet() { - return fmt.Errorf("'m.se.n' required") + if !val.Name.IsSet() { + return fmt.Errorf("'n' required") } - if err := m.Runtime.validate(); err != nil { - return err + if err := val.Runtime.validate(); err != nil { + return errors.Wrapf(err, "ru") } - if utf8.RuneCountInString(m.Version.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'm.se.ve'") + if val.Version.IsSet() && utf8.RuneCountInString(val.Version.Val) > 1024 { + return fmt.Errorf("'ve': validation rule 'maxLength(1024)' violated") } return nil } -func (m *metadataServiceAgent) IsSet() bool { - return m.Name.IsSet() || m.Version.IsSet() +func (val *metadataServiceAgent) IsSet() bool { + return val.Name.IsSet() || val.Version.IsSet() } -func (m *metadataServiceAgent) Reset() { - m.Name.Reset() - m.Version.Reset() +func (val *metadataServiceAgent) Reset() { + val.Name.Reset() + val.Version.Reset() } -func (m *metadataServiceAgent) validate() error { - if !m.IsSet() { +func (val *metadataServiceAgent) validate() error { + if !val.IsSet() { return nil } - if utf8.RuneCountInString(m.Name.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'm.se.a.n'") + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'n': validation rule 'maxLength(1024)' violated") } - if !m.Name.IsSet() { - return fmt.Errorf("'m.se.a.n' required") + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) < 1 { + return fmt.Errorf("'n': validation rule 'minLength(1)' violated") } - if utf8.RuneCountInString(m.Version.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'm.se.a.ve'") + if !val.Name.IsSet() { + return fmt.Errorf("'n' required") } - if !m.Version.IsSet() { - return fmt.Errorf("'m.se.a.ve' required") + if val.Version.IsSet() && utf8.RuneCountInString(val.Version.Val) > 1024 { + return fmt.Errorf("'ve': validation rule 'maxLength(1024)' violated") + } + if !val.Version.IsSet() { + return fmt.Errorf("'ve' required") } return nil } -func (m *MetadataServiceFramework) IsSet() bool { - return m.Name.IsSet() || m.Version.IsSet() +func (val *metadataServiceFramework) IsSet() bool { + return val.Name.IsSet() || val.Version.IsSet() } -func (m *MetadataServiceFramework) Reset() { - m.Name.Reset() - m.Version.Reset() +func (val *metadataServiceFramework) Reset() { + val.Name.Reset() + val.Version.Reset() } -func (m *MetadataServiceFramework) validate() error { - if !m.IsSet() { +func (val *metadataServiceFramework) validate() error { + if !val.IsSet() { return nil } - if utf8.RuneCountInString(m.Name.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'm.se.fw.n'") + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'n': validation rule 'maxLength(1024)' violated") } - if utf8.RuneCountInString(m.Version.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'm.se.fw.ve'") + if val.Version.IsSet() && utf8.RuneCountInString(val.Version.Val) > 1024 { + return fmt.Errorf("'ve': validation rule 'maxLength(1024)' violated") } return nil } -func (m *metadataServiceLanguage) IsSet() bool { - return m.Name.IsSet() || m.Version.IsSet() +func (val *metadataServiceLanguage) IsSet() bool { + return val.Name.IsSet() || val.Version.IsSet() } -func (m *metadataServiceLanguage) Reset() { - m.Name.Reset() - m.Version.Reset() +func (val *metadataServiceLanguage) Reset() { + val.Name.Reset() + val.Version.Reset() } -func (m *metadataServiceLanguage) validate() error { - if !m.IsSet() { +func (val *metadataServiceLanguage) validate() error { + if !val.IsSet() { return nil } - if utf8.RuneCountInString(m.Name.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'm.se.la.n'") + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'n': validation rule 'maxLength(1024)' violated") } - if !m.Name.IsSet() { - return fmt.Errorf("'m.se.la.n' required") + if !val.Name.IsSet() { + return fmt.Errorf("'n' required") } - if utf8.RuneCountInString(m.Version.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'm.se.la.ve'") + if val.Version.IsSet() && utf8.RuneCountInString(val.Version.Val) > 1024 { + return fmt.Errorf("'ve': validation rule 'maxLength(1024)' violated") } return nil } -func (m *metadataServiceRuntime) IsSet() bool { - return m.Name.IsSet() || m.Version.IsSet() +func (val *metadataServiceRuntime) IsSet() bool { + return val.Name.IsSet() || val.Version.IsSet() } -func (m *metadataServiceRuntime) Reset() { - m.Name.Reset() - m.Version.Reset() +func (val *metadataServiceRuntime) Reset() { + val.Name.Reset() + val.Version.Reset() } -func (m *metadataServiceRuntime) validate() error { - if !m.IsSet() { +func (val *metadataServiceRuntime) validate() error { + if !val.IsSet() { return nil } - if utf8.RuneCountInString(m.Name.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'm.se.ru.n'") + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'n': validation rule 'maxLength(1024)' violated") } - if !m.Name.IsSet() { - return fmt.Errorf("'m.se.ru.n' required") + if !val.Name.IsSet() { + return fmt.Errorf("'n' required") } - if utf8.RuneCountInString(m.Version.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'm.se.ru.ve'") + if val.Version.IsSet() && utf8.RuneCountInString(val.Version.Val) > 1024 { + return fmt.Errorf("'ve': validation rule 'maxLength(1024)' violated") } - if !m.Version.IsSet() { - return fmt.Errorf("'m.se.ru.ve' required") + if !val.Version.IsSet() { + return fmt.Errorf("'ve' required") } return nil } -func (m *metadataUser) IsSet() bool { - return m.ID.IsSet() || m.Email.IsSet() || m.Name.IsSet() +func (val *user) IsSet() bool { + return val.Domain.IsSet() || val.ID.IsSet() || val.Email.IsSet() || val.Name.IsSet() } -func (m *metadataUser) Reset() { - m.ID.Reset() - m.Email.Reset() - m.Name.Reset() +func (val *user) Reset() { + val.Domain.Reset() + val.ID.Reset() + val.Email.Reset() + val.Name.Reset() } -func (m *metadataUser) validate() error { - if !m.IsSet() { +func (val *user) validate() error { + if !val.IsSet() { return nil } - switch t := m.ID.Val.(type) { + if val.Domain.IsSet() && utf8.RuneCountInString(val.Domain.Val) > 1024 { + return fmt.Errorf("'ud': validation rule 'maxLength(1024)' violated") + } + switch t := val.ID.Val.(type) { case string: if utf8.RuneCountInString(t) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'm.u.id'") + return fmt.Errorf("'id': validation rule 'maxLength(1024)' violated") } + case int: case json.Number: if _, err := t.Int64(); err != nil { - return fmt.Errorf("validation rule 'types(string;int)' violated for 'm.u.id'") + return fmt.Errorf("'id': validation rule 'inputTypes(string;int)' violated") + } + case nil: + default: + return fmt.Errorf("'id': validation rule 'inputTypes(string;int)' violated ") + } + if val.Email.IsSet() && utf8.RuneCountInString(val.Email.Val) > 1024 { + return fmt.Errorf("'em': validation rule 'maxLength(1024)' violated") + } + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'un': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *network) IsSet() bool { + return val.Connection.IsSet() +} + +func (val *network) Reset() { + val.Connection.Reset() +} + +func (val *network) validate() error { + if !val.IsSet() { + return nil + } + if err := val.Connection.validate(); err != nil { + return errors.Wrapf(err, "c") + } + return nil +} + +func (val *networkConnection) IsSet() bool { + return val.Type.IsSet() +} + +func (val *networkConnection) Reset() { + val.Type.Reset() +} + +func (val *networkConnection) validate() error { + if !val.IsSet() { + return nil + } + if val.Type.IsSet() && utf8.RuneCountInString(val.Type.Val) > 1024 { + return fmt.Errorf("'t': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *errorRoot) IsSet() bool { + return val.Error.IsSet() +} + +func (val *errorRoot) Reset() { + val.Error.Reset() +} + +func (val *errorRoot) validate() error { + if err := val.Error.validate(); err != nil { + return errors.Wrapf(err, "e") + } + if !val.Error.IsSet() { + return fmt.Errorf("'e' required") + } + return nil +} + +func (val *errorEvent) IsSet() bool { + return val.Context.IsSet() || val.Culprit.IsSet() || val.Exception.IsSet() || val.ID.IsSet() || val.Log.IsSet() || val.ParentID.IsSet() || val.Timestamp.IsSet() || val.TraceID.IsSet() || val.Transaction.IsSet() || val.TransactionID.IsSet() +} + +func (val *errorEvent) Reset() { + val.Context.Reset() + val.Culprit.Reset() + val.Exception.Reset() + val.ID.Reset() + val.Log.Reset() + val.ParentID.Reset() + val.Timestamp.Reset() + val.TraceID.Reset() + val.Transaction.Reset() + val.TransactionID.Reset() +} + +func (val *errorEvent) validate() error { + if !val.IsSet() { + return nil + } + if err := val.Context.validate(); err != nil { + return errors.Wrapf(err, "c") + } + if val.Culprit.IsSet() && utf8.RuneCountInString(val.Culprit.Val) > 1024 { + return fmt.Errorf("'cl': validation rule 'maxLength(1024)' violated") + } + if err := val.Exception.validate(); err != nil { + return errors.Wrapf(err, "ex") + } + if val.ID.IsSet() && utf8.RuneCountInString(val.ID.Val) > 1024 { + return fmt.Errorf("'id': validation rule 'maxLength(1024)' violated") + } + if !val.ID.IsSet() { + return fmt.Errorf("'id' required") + } + if err := val.Log.validate(); err != nil { + return errors.Wrapf(err, "log") + } + if val.ParentID.IsSet() && utf8.RuneCountInString(val.ParentID.Val) > 1024 { + return fmt.Errorf("'pid': validation rule 'maxLength(1024)' violated") + } + if !val.ParentID.IsSet() { + if val.TransactionID.IsSet() { + return fmt.Errorf("'pid' required when 'xid' is set") + } + if val.TraceID.IsSet() { + return fmt.Errorf("'pid' required when 'tid' is set") + } + } + if val.TraceID.IsSet() && utf8.RuneCountInString(val.TraceID.Val) > 1024 { + return fmt.Errorf("'tid': validation rule 'maxLength(1024)' violated") + } + if !val.TraceID.IsSet() { + if val.TransactionID.IsSet() { + return fmt.Errorf("'tid' required when 'xid' is set") + } + if val.ParentID.IsSet() { + return fmt.Errorf("'tid' required when 'pid' is set") + } + } + if err := val.Transaction.validate(); err != nil { + return errors.Wrapf(err, "x") + } + if val.TransactionID.IsSet() && utf8.RuneCountInString(val.TransactionID.Val) > 1024 { + return fmt.Errorf("'xid': validation rule 'maxLength(1024)' violated") + } + if !val.Exception.IsSet() && !val.Log.IsSet() { + return fmt.Errorf("requires at least one of the fields 'ex;log'") + } + return nil +} + +func (val *context) IsSet() bool { + return (len(val.Custom) > 0) || val.Page.IsSet() || val.Response.IsSet() || val.Request.IsSet() || val.Service.IsSet() || (len(val.Tags) > 0) || val.User.IsSet() +} + +func (val *context) Reset() { + for k := range val.Custom { + delete(val.Custom, k) + } + val.Page.Reset() + val.Response.Reset() + val.Request.Reset() + val.Service.Reset() + for k := range val.Tags { + delete(val.Tags, k) + } + val.User.Reset() +} + +func (val *context) validate() error { + if !val.IsSet() { + return nil + } + if err := val.Page.validate(); err != nil { + return errors.Wrapf(err, "p") + } + if err := val.Response.validate(); err != nil { + return errors.Wrapf(err, "r") + } + if err := val.Request.validate(); err != nil { + return errors.Wrapf(err, "q") + } + if err := val.Service.validate(); err != nil { + return errors.Wrapf(err, "se") + } + for k, v := range val.Tags { + switch t := v.(type) { + case nil: + case string: + if utf8.RuneCountInString(t) > 1024 { + return fmt.Errorf("'g': validation rule 'maxLengthVals(1024)' violated") + } + case bool: + case json.Number: + default: + return fmt.Errorf("'g': validation rule 'inputTypesVals(string;bool;number)' violated for key %s", k) + } + } + if err := val.User.validate(); err != nil { + return errors.Wrapf(err, "u") + } + return nil +} + +func (val *contextPage) IsSet() bool { + return val.Referer.IsSet() || val.URL.IsSet() +} + +func (val *contextPage) Reset() { + val.Referer.Reset() + val.URL.Reset() +} + +func (val *contextPage) validate() error { + if !val.IsSet() { + return nil + } + return nil +} + +func (val *contextResponse) IsSet() bool { + return val.DecodedBodySize.IsSet() || val.EncodedBodySize.IsSet() || val.Headers.IsSet() || val.StatusCode.IsSet() || val.TransferSize.IsSet() +} + +func (val *contextResponse) Reset() { + val.DecodedBodySize.Reset() + val.EncodedBodySize.Reset() + val.Headers.Reset() + val.StatusCode.Reset() + val.TransferSize.Reset() +} + +func (val *contextResponse) validate() error { + if !val.IsSet() { + return nil + } + return nil +} + +func (val *contextRequest) IsSet() bool { + return (len(val.Env) > 0) || val.Headers.IsSet() || val.HTTPVersion.IsSet() || val.Method.IsSet() +} + +func (val *contextRequest) Reset() { + for k := range val.Env { + delete(val.Env, k) + } + val.Headers.Reset() + val.HTTPVersion.Reset() + val.Method.Reset() +} + +func (val *contextRequest) validate() error { + if !val.IsSet() { + return nil + } + if val.HTTPVersion.IsSet() && utf8.RuneCountInString(val.HTTPVersion.Val) > 1024 { + return fmt.Errorf("'hve': validation rule 'maxLength(1024)' violated") + } + if val.Method.IsSet() && utf8.RuneCountInString(val.Method.Val) > 1024 { + return fmt.Errorf("'mt': validation rule 'maxLength(1024)' violated") + } + if !val.Method.IsSet() { + return fmt.Errorf("'mt' required") + } + return nil +} + +func (val *contextService) IsSet() bool { + return val.Agent.IsSet() || val.Environment.IsSet() || val.Framework.IsSet() || val.Language.IsSet() || val.Name.IsSet() || val.Runtime.IsSet() || val.Version.IsSet() +} + +func (val *contextService) Reset() { + val.Agent.Reset() + val.Environment.Reset() + val.Framework.Reset() + val.Language.Reset() + val.Name.Reset() + val.Runtime.Reset() + val.Version.Reset() +} + +func (val *contextService) validate() error { + if !val.IsSet() { + return nil + } + if err := val.Agent.validate(); err != nil { + return errors.Wrapf(err, "a") + } + if val.Environment.IsSet() && utf8.RuneCountInString(val.Environment.Val) > 1024 { + return fmt.Errorf("'en': validation rule 'maxLength(1024)' violated") + } + if err := val.Framework.validate(); err != nil { + return errors.Wrapf(err, "fw") + } + if err := val.Language.validate(); err != nil { + return errors.Wrapf(err, "la") + } + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'n': validation rule 'maxLength(1024)' violated") + } + if val.Name.Val != "" && !patternAlphaNumericExtRegexp.MatchString(val.Name.Val) { + return fmt.Errorf("'n': validation rule 'pattern(patternAlphaNumericExt)' violated") + } + if err := val.Runtime.validate(); err != nil { + return errors.Wrapf(err, "ru") + } + if val.Version.IsSet() && utf8.RuneCountInString(val.Version.Val) > 1024 { + return fmt.Errorf("'ve': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *contextServiceAgent) IsSet() bool { + return val.Name.IsSet() || val.Version.IsSet() +} + +func (val *contextServiceAgent) Reset() { + val.Name.Reset() + val.Version.Reset() +} + +func (val *contextServiceAgent) validate() error { + if !val.IsSet() { + return nil + } + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'n': validation rule 'maxLength(1024)' violated") + } + if val.Version.IsSet() && utf8.RuneCountInString(val.Version.Val) > 1024 { + return fmt.Errorf("'ve': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *contextServiceFramework) IsSet() bool { + return val.Name.IsSet() || val.Version.IsSet() +} + +func (val *contextServiceFramework) Reset() { + val.Name.Reset() + val.Version.Reset() +} + +func (val *contextServiceFramework) validate() error { + if !val.IsSet() { + return nil + } + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'n': validation rule 'maxLength(1024)' violated") + } + if val.Version.IsSet() && utf8.RuneCountInString(val.Version.Val) > 1024 { + return fmt.Errorf("'ve': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *contextServiceLanguage) IsSet() bool { + return val.Name.IsSet() || val.Version.IsSet() +} + +func (val *contextServiceLanguage) Reset() { + val.Name.Reset() + val.Version.Reset() +} + +func (val *contextServiceLanguage) validate() error { + if !val.IsSet() { + return nil + } + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'n': validation rule 'maxLength(1024)' violated") + } + if val.Version.IsSet() && utf8.RuneCountInString(val.Version.Val) > 1024 { + return fmt.Errorf("'ve': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *contextServiceRuntime) IsSet() bool { + return val.Name.IsSet() || val.Version.IsSet() +} + +func (val *contextServiceRuntime) Reset() { + val.Name.Reset() + val.Version.Reset() +} + +func (val *contextServiceRuntime) validate() error { + if !val.IsSet() { + return nil + } + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'n': validation rule 'maxLength(1024)' violated") + } + if val.Version.IsSet() && utf8.RuneCountInString(val.Version.Val) > 1024 { + return fmt.Errorf("'ve': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *errorException) IsSet() bool { + return (len(val.Attributes) > 0) || val.Code.IsSet() || (len(val.Cause) > 0) || val.Handled.IsSet() || val.Message.IsSet() || val.Module.IsSet() || (len(val.Stacktrace) > 0) || val.Type.IsSet() +} + +func (val *errorException) Reset() { + for k := range val.Attributes { + delete(val.Attributes, k) + } + val.Code.Reset() + for i := range val.Cause { + val.Cause[i].Reset() + } + val.Cause = val.Cause[:0] + val.Handled.Reset() + val.Message.Reset() + val.Module.Reset() + for i := range val.Stacktrace { + val.Stacktrace[i].Reset() + } + val.Stacktrace = val.Stacktrace[:0] + val.Type.Reset() +} + +func (val *errorException) validate() error { + if !val.IsSet() { + return nil + } + switch t := val.Code.Val.(type) { + case string: + if utf8.RuneCountInString(t) > 1024 { + return fmt.Errorf("'cd': validation rule 'maxLength(1024)' violated") } case int: + case json.Number: + if _, err := t.Int64(); err != nil { + return fmt.Errorf("'cd': validation rule 'inputTypes(string;int)' violated") + } case nil: default: - return fmt.Errorf("validation rule 'types(string;int)' violated for 'm.u.id'") + return fmt.Errorf("'cd': validation rule 'inputTypes(string;int)' violated ") + } + for _, elem := range val.Cause { + if err := elem.validate(); err != nil { + return errors.Wrapf(err, "ca") + } + } + if val.Module.IsSet() && utf8.RuneCountInString(val.Module.Val) > 1024 { + return fmt.Errorf("'mo': validation rule 'maxLength(1024)' violated") + } + for _, elem := range val.Stacktrace { + if err := elem.validate(); err != nil { + return errors.Wrapf(err, "st") + } + } + if val.Type.IsSet() && utf8.RuneCountInString(val.Type.Val) > 1024 { + return fmt.Errorf("'t': validation rule 'maxLength(1024)' violated") + } + if !val.Message.IsSet() && !val.Type.IsSet() { + return fmt.Errorf("requires at least one of the fields 'mg;t'") + } + return nil +} + +func (val *stacktraceFrame) IsSet() bool { + return val.AbsPath.IsSet() || val.Classname.IsSet() || val.ColumnNumber.IsSet() || val.ContextLine.IsSet() || val.Filename.IsSet() || val.Function.IsSet() || val.LineNumber.IsSet() || val.Module.IsSet() || (len(val.PostContext) > 0) || (len(val.PreContext) > 0) +} + +func (val *stacktraceFrame) Reset() { + val.AbsPath.Reset() + val.Classname.Reset() + val.ColumnNumber.Reset() + val.ContextLine.Reset() + val.Filename.Reset() + val.Function.Reset() + val.LineNumber.Reset() + val.Module.Reset() + val.PostContext = val.PostContext[:0] + val.PreContext = val.PreContext[:0] +} + +func (val *stacktraceFrame) validate() error { + if !val.IsSet() { + return nil + } + if !val.Filename.IsSet() { + return fmt.Errorf("'f' required") + } + return nil +} + +func (val *errorLog) IsSet() bool { + return val.Level.IsSet() || val.LoggerName.IsSet() || val.Message.IsSet() || val.ParamMessage.IsSet() || (len(val.Stacktrace) > 0) +} + +func (val *errorLog) Reset() { + val.Level.Reset() + val.LoggerName.Reset() + val.Message.Reset() + val.ParamMessage.Reset() + for i := range val.Stacktrace { + val.Stacktrace[i].Reset() + } + val.Stacktrace = val.Stacktrace[:0] +} + +func (val *errorLog) validate() error { + if !val.IsSet() { + return nil + } + if val.Level.IsSet() && utf8.RuneCountInString(val.Level.Val) > 1024 { + return fmt.Errorf("'lv': validation rule 'maxLength(1024)' violated") + } + if val.LoggerName.IsSet() && utf8.RuneCountInString(val.LoggerName.Val) > 1024 { + return fmt.Errorf("'ln': validation rule 'maxLength(1024)' violated") + } + if !val.Message.IsSet() { + return fmt.Errorf("'mg' required") + } + if val.ParamMessage.IsSet() && utf8.RuneCountInString(val.ParamMessage.Val) > 1024 { + return fmt.Errorf("'pmg': validation rule 'maxLength(1024)' violated") + } + for _, elem := range val.Stacktrace { + if err := elem.validate(); err != nil { + return errors.Wrapf(err, "st") + } + } + return nil +} + +func (val *errorTransactionRef) IsSet() bool { + return val.Sampled.IsSet() || val.Type.IsSet() +} + +func (val *errorTransactionRef) Reset() { + val.Sampled.Reset() + val.Type.Reset() +} + +func (val *errorTransactionRef) validate() error { + if !val.IsSet() { + return nil + } + if val.Type.IsSet() && utf8.RuneCountInString(val.Type.Val) > 1024 { + return fmt.Errorf("'t': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *transactionRoot) IsSet() bool { + return val.Transaction.IsSet() +} + +func (val *transactionRoot) Reset() { + val.Transaction.Reset() +} + +func (val *transactionRoot) validate() error { + if err := val.Transaction.validate(); err != nil { + return errors.Wrapf(err, "x") + } + if !val.Transaction.IsSet() { + return fmt.Errorf("'x' required") + } + return nil +} + +func (val *transaction) IsSet() bool { + return val.Context.IsSet() || val.Duration.IsSet() || val.ID.IsSet() || val.Marks.IsSet() || (len(val.Metricsets) > 0) || val.Name.IsSet() || val.Outcome.IsSet() || val.ParentID.IsSet() || val.Result.IsSet() || val.Sampled.IsSet() || val.SampleRate.IsSet() || val.Session.IsSet() || val.SpanCount.IsSet() || (len(val.Spans) > 0) || val.TraceID.IsSet() || val.Type.IsSet() || val.UserExperience.IsSet() +} + +func (val *transaction) Reset() { + val.Context.Reset() + val.Duration.Reset() + val.ID.Reset() + val.Marks.Reset() + for i := range val.Metricsets { + val.Metricsets[i].Reset() + } + val.Metricsets = val.Metricsets[:0] + val.Name.Reset() + val.Outcome.Reset() + val.ParentID.Reset() + val.Result.Reset() + val.Sampled.Reset() + val.SampleRate.Reset() + val.Session.Reset() + val.SpanCount.Reset() + for i := range val.Spans { + val.Spans[i].Reset() + } + val.Spans = val.Spans[:0] + val.TraceID.Reset() + val.Type.Reset() + val.UserExperience.Reset() +} + +func (val *transaction) validate() error { + if !val.IsSet() { + return nil + } + if err := val.Context.validate(); err != nil { + return errors.Wrapf(err, "c") + } + if val.Duration.IsSet() && val.Duration.Val < 0 { + return fmt.Errorf("'d': validation rule 'min(0)' violated") + } + if !val.Duration.IsSet() { + return fmt.Errorf("'d' required") + } + if val.ID.IsSet() && utf8.RuneCountInString(val.ID.Val) > 1024 { + return fmt.Errorf("'id': validation rule 'maxLength(1024)' violated") + } + if !val.ID.IsSet() { + return fmt.Errorf("'id' required") + } + if err := val.Marks.validate(); err != nil { + return errors.Wrapf(err, "k") + } + for _, elem := range val.Metricsets { + if err := elem.validate(); err != nil { + return errors.Wrapf(err, "me") + } + } + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'n': validation rule 'maxLength(1024)' violated") + } + if val.Outcome.Val != "" { + var matchEnum bool + for _, s := range enumOutcome { + if val.Outcome.Val == s { + matchEnum = true + break + } + } + if !matchEnum { + return fmt.Errorf("'o': validation rule 'enum(enumOutcome)' violated") + } + } + if val.ParentID.IsSet() && utf8.RuneCountInString(val.ParentID.Val) > 1024 { + return fmt.Errorf("'pid': validation rule 'maxLength(1024)' violated") + } + if val.Result.IsSet() && utf8.RuneCountInString(val.Result.Val) > 1024 { + return fmt.Errorf("'rt': validation rule 'maxLength(1024)' violated") + } + if err := val.Session.validate(); err != nil { + return errors.Wrapf(err, "ses") + } + if err := val.SpanCount.validate(); err != nil { + return errors.Wrapf(err, "yc") + } + if !val.SpanCount.IsSet() { + return fmt.Errorf("'yc' required") + } + for _, elem := range val.Spans { + if err := elem.validate(); err != nil { + return errors.Wrapf(err, "y") + } + } + if val.TraceID.IsSet() && utf8.RuneCountInString(val.TraceID.Val) > 1024 { + return fmt.Errorf("'tid': validation rule 'maxLength(1024)' violated") + } + if !val.TraceID.IsSet() { + return fmt.Errorf("'tid' required") + } + if val.Type.IsSet() && utf8.RuneCountInString(val.Type.Val) > 1024 { + return fmt.Errorf("'t': validation rule 'maxLength(1024)' violated") + } + if !val.Type.IsSet() { + return fmt.Errorf("'t' required") + } + if err := val.UserExperience.validate(); err != nil { + return errors.Wrapf(err, "exp") + } + return nil +} + +func (val *transactionMarks) IsSet() bool { + return (len(val.Events) > 0) +} + +func (val *transactionMarks) Reset() { + for k := range val.Events { + delete(val.Events, k) + } +} + +func (val *transactionMarks) validate() error { + if !val.IsSet() { + return nil + } + return nil +} + +func (val *transactionMarkEvents) IsSet() bool { + return (len(val.Measurements) > 0) +} + +func (val *transactionMarkEvents) Reset() { + for k := range val.Measurements { + delete(val.Measurements, k) + } +} + +func (val *transactionMarkEvents) validate() error { + if !val.IsSet() { + return nil + } + return nil +} + +func (val *transactionMetricset) IsSet() bool { + return val.Samples.IsSet() || val.Span.IsSet() +} + +func (val *transactionMetricset) Reset() { + val.Samples.Reset() + val.Span.Reset() +} + +func (val *transactionMetricset) validate() error { + if !val.IsSet() { + return nil + } + if err := val.Samples.validate(); err != nil { + return errors.Wrapf(err, "sa") + } + if !val.Samples.IsSet() { + return fmt.Errorf("'sa' required") + } + if err := val.Span.validate(); err != nil { + return errors.Wrapf(err, "y") + } + return nil +} + +func (val *transactionMetricsetSamples) IsSet() bool { + return val.TransactionDurationCount.IsSet() || val.TransactionDurationSum.IsSet() || val.TransactionBreakdownCount.IsSet() || val.SpanSelfTimeCount.IsSet() || val.SpanSelfTimeSum.IsSet() +} + +func (val *transactionMetricsetSamples) Reset() { + val.TransactionDurationCount.Reset() + val.TransactionDurationSum.Reset() + val.TransactionBreakdownCount.Reset() + val.SpanSelfTimeCount.Reset() + val.SpanSelfTimeSum.Reset() +} + +func (val *transactionMetricsetSamples) validate() error { + if !val.IsSet() { + return nil + } + if err := val.TransactionDurationCount.validate(); err != nil { + return errors.Wrapf(err, "xdc") + } + if err := val.TransactionDurationSum.validate(); err != nil { + return errors.Wrapf(err, "xds") + } + if err := val.TransactionBreakdownCount.validate(); err != nil { + return errors.Wrapf(err, "xbc") + } + if err := val.SpanSelfTimeCount.validate(); err != nil { + return errors.Wrapf(err, "ysc") + } + if err := val.SpanSelfTimeSum.validate(); err != nil { + return errors.Wrapf(err, "yss") + } + return nil +} + +func (val *metricsetSampleValue) IsSet() bool { + return val.Value.IsSet() +} + +func (val *metricsetSampleValue) Reset() { + val.Value.Reset() +} + +func (val *metricsetSampleValue) validate() error { + if !val.IsSet() { + return nil + } + if !val.Value.IsSet() { + return fmt.Errorf("'v' required") + } + return nil +} + +func (val *metricsetSpanRef) IsSet() bool { + return val.Subtype.IsSet() || val.Type.IsSet() +} + +func (val *metricsetSpanRef) Reset() { + val.Subtype.Reset() + val.Type.Reset() +} + +func (val *metricsetSpanRef) validate() error { + if !val.IsSet() { + return nil + } + if val.Subtype.IsSet() && utf8.RuneCountInString(val.Subtype.Val) > 1024 { + return fmt.Errorf("'su': validation rule 'maxLength(1024)' violated") + } + if val.Type.IsSet() && utf8.RuneCountInString(val.Type.Val) > 1024 { + return fmt.Errorf("'t': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *transactionSession) IsSet() bool { + return val.ID.IsSet() || val.Sequence.IsSet() +} + +func (val *transactionSession) Reset() { + val.ID.Reset() + val.Sequence.Reset() +} + +func (val *transactionSession) validate() error { + if !val.IsSet() { + return nil + } + if !val.ID.IsSet() { + return fmt.Errorf("'id' required") + } + if val.Sequence.IsSet() && val.Sequence.Val < 1 { + return fmt.Errorf("'seq': validation rule 'min(1)' violated") + } + return nil +} + +func (val *transactionSpanCount) IsSet() bool { + return val.Dropped.IsSet() || val.Started.IsSet() +} + +func (val *transactionSpanCount) Reset() { + val.Dropped.Reset() + val.Started.Reset() +} + +func (val *transactionSpanCount) validate() error { + if !val.IsSet() { + return nil + } + if !val.Started.IsSet() { + return fmt.Errorf("'sd' required") + } + return nil +} + +func (val *span) IsSet() bool { + return val.Action.IsSet() || val.Context.IsSet() || val.Duration.IsSet() || val.ID.IsSet() || val.Name.IsSet() || val.Outcome.IsSet() || val.ParentIndex.IsSet() || val.SampleRate.IsSet() || (len(val.Stacktrace) > 0) || val.Start.IsSet() || val.Subtype.IsSet() || val.Sync.IsSet() || val.Type.IsSet() +} + +func (val *span) Reset() { + val.Action.Reset() + val.Context.Reset() + val.Duration.Reset() + val.ID.Reset() + val.Name.Reset() + val.Outcome.Reset() + val.ParentIndex.Reset() + val.SampleRate.Reset() + for i := range val.Stacktrace { + val.Stacktrace[i].Reset() + } + val.Stacktrace = val.Stacktrace[:0] + val.Start.Reset() + val.Subtype.Reset() + val.Sync.Reset() + val.Type.Reset() +} + +func (val *span) validate() error { + if !val.IsSet() { + return nil + } + if val.Action.IsSet() && utf8.RuneCountInString(val.Action.Val) > 1024 { + return fmt.Errorf("'ac': validation rule 'maxLength(1024)' violated") + } + if err := val.Context.validate(); err != nil { + return errors.Wrapf(err, "c") + } + if val.Duration.IsSet() && val.Duration.Val < 0 { + return fmt.Errorf("'d': validation rule 'min(0)' violated") + } + if !val.Duration.IsSet() { + return fmt.Errorf("'d' required") + } + if val.ID.IsSet() && utf8.RuneCountInString(val.ID.Val) > 1024 { + return fmt.Errorf("'id': validation rule 'maxLength(1024)' violated") + } + if !val.ID.IsSet() { + return fmt.Errorf("'id' required") + } + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'n': validation rule 'maxLength(1024)' violated") + } + if !val.Name.IsSet() { + return fmt.Errorf("'n' required") + } + if val.Outcome.Val != "" { + var matchEnum bool + for _, s := range enumOutcome { + if val.Outcome.Val == s { + matchEnum = true + break + } + } + if !matchEnum { + return fmt.Errorf("'o': validation rule 'enum(enumOutcome)' violated") + } + } + for _, elem := range val.Stacktrace { + if err := elem.validate(); err != nil { + return errors.Wrapf(err, "st") + } + } + if !val.Start.IsSet() { + return fmt.Errorf("'s' required") + } + if val.Subtype.IsSet() && utf8.RuneCountInString(val.Subtype.Val) > 1024 { + return fmt.Errorf("'su': validation rule 'maxLength(1024)' violated") + } + if val.Type.IsSet() && utf8.RuneCountInString(val.Type.Val) > 1024 { + return fmt.Errorf("'t': validation rule 'maxLength(1024)' violated") + } + if !val.Type.IsSet() { + return fmt.Errorf("'t' required") + } + return nil +} + +func (val *spanContext) IsSet() bool { + return val.Destination.IsSet() || val.HTTP.IsSet() || val.Service.IsSet() || (len(val.Tags) > 0) +} + +func (val *spanContext) Reset() { + val.Destination.Reset() + val.HTTP.Reset() + val.Service.Reset() + for k := range val.Tags { + delete(val.Tags, k) + } +} + +func (val *spanContext) validate() error { + if !val.IsSet() { + return nil + } + if err := val.Destination.validate(); err != nil { + return errors.Wrapf(err, "dt") + } + if err := val.HTTP.validate(); err != nil { + return errors.Wrapf(err, "h") + } + if err := val.Service.validate(); err != nil { + return errors.Wrapf(err, "se") + } + for k, v := range val.Tags { + switch t := v.(type) { + case nil: + case string: + if utf8.RuneCountInString(t) > 1024 { + return fmt.Errorf("'g': validation rule 'maxLengthVals(1024)' violated") + } + case bool: + case json.Number: + default: + return fmt.Errorf("'g': validation rule 'inputTypesVals(string;bool;number)' violated for key %s", k) + } + } + return nil +} + +func (val *spanContextDestination) IsSet() bool { + return val.Address.IsSet() || val.Port.IsSet() || val.Service.IsSet() +} + +func (val *spanContextDestination) Reset() { + val.Address.Reset() + val.Port.Reset() + val.Service.Reset() +} + +func (val *spanContextDestination) validate() error { + if !val.IsSet() { + return nil + } + if val.Address.IsSet() && utf8.RuneCountInString(val.Address.Val) > 1024 { + return fmt.Errorf("'ad': validation rule 'maxLength(1024)' violated") + } + if err := val.Service.validate(); err != nil { + return errors.Wrapf(err, "se") + } + return nil +} + +func (val *spanContextDestinationService) IsSet() bool { + return val.Name.IsSet() || val.Resource.IsSet() || val.Type.IsSet() +} + +func (val *spanContextDestinationService) Reset() { + val.Name.Reset() + val.Resource.Reset() + val.Type.Reset() +} + +func (val *spanContextDestinationService) validate() error { + if !val.IsSet() { + return nil + } + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'n': validation rule 'maxLength(1024)' violated") + } + if val.Resource.IsSet() && utf8.RuneCountInString(val.Resource.Val) > 1024 { + return fmt.Errorf("'rc': validation rule 'maxLength(1024)' violated") + } + if !val.Resource.IsSet() { + return fmt.Errorf("'rc' required") + } + if val.Type.IsSet() && utf8.RuneCountInString(val.Type.Val) > 1024 { + return fmt.Errorf("'t': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *spanContextHTTP) IsSet() bool { + return val.Method.IsSet() || val.Response.IsSet() || val.StatusCode.IsSet() || val.URL.IsSet() +} + +func (val *spanContextHTTP) Reset() { + val.Method.Reset() + val.Response.Reset() + val.StatusCode.Reset() + val.URL.Reset() +} + +func (val *spanContextHTTP) validate() error { + if !val.IsSet() { + return nil + } + if val.Method.IsSet() && utf8.RuneCountInString(val.Method.Val) > 1024 { + return fmt.Errorf("'mt': validation rule 'maxLength(1024)' violated") + } + if err := val.Response.validate(); err != nil { + return errors.Wrapf(err, "r") + } + return nil +} + +func (val *spanContextHTTPResponse) IsSet() bool { + return val.DecodedBodySize.IsSet() || val.EncodedBodySize.IsSet() || val.TransferSize.IsSet() +} + +func (val *spanContextHTTPResponse) Reset() { + val.DecodedBodySize.Reset() + val.EncodedBodySize.Reset() + val.TransferSize.Reset() +} + +func (val *spanContextHTTPResponse) validate() error { + if !val.IsSet() { + return nil + } + return nil +} + +func (val *spanContextService) IsSet() bool { + return val.Agent.IsSet() || val.Name.IsSet() +} + +func (val *spanContextService) Reset() { + val.Agent.Reset() + val.Name.Reset() +} + +func (val *spanContextService) validate() error { + if !val.IsSet() { + return nil + } + if err := val.Agent.validate(); err != nil { + return errors.Wrapf(err, "a") + } + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'n': validation rule 'maxLength(1024)' violated") + } + if val.Name.Val != "" && !patternAlphaNumericExtRegexp.MatchString(val.Name.Val) { + return fmt.Errorf("'n': validation rule 'pattern(patternAlphaNumericExt)' violated") + } + return nil +} + +func (val *transactionUserExperience) IsSet() bool { + return val.CumulativeLayoutShift.IsSet() || val.FirstInputDelay.IsSet() || val.TotalBlockingTime.IsSet() || val.Longtask.IsSet() +} + +func (val *transactionUserExperience) Reset() { + val.CumulativeLayoutShift.Reset() + val.FirstInputDelay.Reset() + val.TotalBlockingTime.Reset() + val.Longtask.Reset() +} + +func (val *transactionUserExperience) validate() error { + if !val.IsSet() { + return nil + } + if val.CumulativeLayoutShift.IsSet() && val.CumulativeLayoutShift.Val < 0 { + return fmt.Errorf("'cls': validation rule 'min(0)' violated") + } + if val.FirstInputDelay.IsSet() && val.FirstInputDelay.Val < 0 { + return fmt.Errorf("'fid': validation rule 'min(0)' violated") + } + if val.TotalBlockingTime.IsSet() && val.TotalBlockingTime.Val < 0 { + return fmt.Errorf("'tbt': validation rule 'min(0)' violated") + } + if err := val.Longtask.validate(); err != nil { + return errors.Wrapf(err, "lt") + } + return nil +} + +func (val *longtaskMetrics) IsSet() bool { + return val.Count.IsSet() || val.Max.IsSet() || val.Sum.IsSet() +} + +func (val *longtaskMetrics) Reset() { + val.Count.Reset() + val.Max.Reset() + val.Sum.Reset() +} + +func (val *longtaskMetrics) validate() error { + if !val.IsSet() { + return nil + } + if val.Count.IsSet() && val.Count.Val < 0 { + return fmt.Errorf("'count': validation rule 'min(0)' violated") + } + if !val.Count.IsSet() { + return fmt.Errorf("'count' required") + } + if val.Max.IsSet() && val.Max.Val < 0 { + return fmt.Errorf("'max': validation rule 'min(0)' violated") + } + if !val.Max.IsSet() { + return fmt.Errorf("'max' required") } - if utf8.RuneCountInString(m.Email.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'm.u.em'") + if val.Sum.IsSet() && val.Sum.Val < 0 { + return fmt.Errorf("'sum': validation rule 'min(0)' violated") } - if utf8.RuneCountInString(m.Name.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'm.u.un'") + if !val.Sum.IsSet() { + return fmt.Errorf("'sum' required") } return nil } diff --git a/model/modeldecoder/rumv3/model_test.go b/model/modeldecoder/rumv3/model_test.go index b04866494ce..963e0e7fce9 100644 --- a/model/modeldecoder/rumv3/model_test.go +++ b/model/modeldecoder/rumv3/model_test.go @@ -18,178 +18,449 @@ package rumv3 import ( - "bytes" - "encoding/json" + "fmt" "io" "os" + "path/filepath" + "regexp" "strings" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/elastic/apm-server/decoder" "github.com/elastic/apm-server/model/modeldecoder/modeldecodertest" ) -func testdata(t *testing.T) io.Reader { - r, err := os.Open("../../../testdata/intake-v3/metadata.ndjson") - require.NoError(t, err) - return r +func TestUserValidationRules(t *testing.T) { + testcases := []testcase{ + {name: "id-string", data: `{"id":"user123"}`}, + {name: "id-int", data: `{"id":44}`}, + {name: "id-float", errorKey: "inputTypes", data: `{"id":45.6}`}, + {name: "id-bool", errorKey: "inputTypes", data: `{"id":true}`}, + {name: "id-string-max-len", data: `{"id":"` + modeldecodertest.BuildString(1024) + `"}`}, + {name: "id-string-max-len", errorKey: "max", data: `{"id":"` + modeldecodertest.BuildString(1025) + `"}`}, + } + testValidation(t, "m", testcases, "u") + testValidation(t, "x", testcases, "c", "u") + testValidation(t, "e", testcases, "c", "u") } -func TestIsSet(t *testing.T) { - data := `{"se":{"n":"user-service"}}` - var m metadata - require.NoError(t, decoder.NewJSONIteratorDecoder(strings.NewReader(data)).Decode(&m)) - assert.True(t, m.IsSet()) - assert.True(t, m.Service.IsSet()) - assert.True(t, m.Service.Name.IsSet()) - assert.False(t, m.Service.Language.IsSet()) -} - -func TestSetReset(t *testing.T) { - var m metadataRoot - require.NoError(t, decoder.NewJSONIteratorDecoder(testdata(t)).Decode(&m)) - require.True(t, m.IsSet()) - require.NotEmpty(t, m.Metadata.Labels) - require.True(t, m.Metadata.Service.IsSet()) - require.True(t, m.Metadata.User.IsSet()) - // call Reset and ensure initial state, except for array capacity - m.Reset() - assert.False(t, m.IsSet()) - assert.Equal(t, metadataService{}, m.Metadata.Service) - assert.Equal(t, metadataUser{}, m.Metadata.User) - assert.Empty(t, m.Metadata.Labels) -} - -func TestValidationRules(t *testing.T) { - type testcase struct { - name string - errorKey string - data string - } - - strBuilder := func(n int) string { - b := make([]rune, n) - for i := range b { - b[i] = '⌘' - } - return string(b) - } - - testMetadata := func(t *testing.T, key string, tc testcase) { - // load data - // set testcase data for given key - var data map[string]interface{} - require.NoError(t, decoder.NewJSONIteratorDecoder(testdata(t)).Decode(&data)) - meta := data["m"].(map[string]interface{}) - var keyData map[string]interface{} - require.NoError(t, json.Unmarshal([]byte(tc.data), &keyData)) - meta[key] = keyData - - // unmarshal data into metdata struct - var m metadata - b, err := json.Marshal(meta) - require.NoError(t, err) - require.NoError(t, decoder.NewJSONIteratorDecoder(bytes.NewReader(b)).Decode(&m)) - // run validation and checks - err = m.validate() - if tc.errorKey == "" { - assert.NoError(t, err) - } else { - require.Error(t, err) - assert.Contains(t, err.Error(), tc.errorKey) - } +func TestServiceValidationRules(t *testing.T) { + testcases := []testcase{ + {name: "name-valid-lower", data: `{"a":{"n":"go","ve":"1.0"},"n":"abcdefghijklmnopqrstuvwxyz"}`}, + {name: "name-valid-upper", data: `{"a":{"n":"go","ve":"1.0"},"n":"ABCDEFGHIJKLMNOPQRSTUVWXYZ"}`}, + {name: "name-valid-digits", data: `{"a":{"n":"go","ve":"1.0"},"n":"0123456789"}`}, + {name: "name-valid-special", data: `{"a":{"n":"go","ve":"1.0"},"n":"_ -"}`}, + {name: "name-asterisk", errorKey: "n", data: `{"a":{"n":"go","ve":"1.0"},"n":"abc*"}`}, + {name: "name-dot", errorKey: "n", data: `{"a":{"n":"go","ve":"1.0"},"n":"abc."}`}, } + testValidation(t, "m", testcases, "se") + testValidation(t, "x", testcases, "c", "se") + testValidation(t, "e", testcases, "c", "se") +} - t.Run("user", func(t *testing.T) { - for _, tc := range []testcase{ - {name: "id-string", data: `{"id":"user123"}`}, - {name: "id-int", data: `{"id":44}`}, - {name: "id-float", errorKey: "types", data: `{"id":45.6}`}, - {name: "id-bool", errorKey: "types", data: `{"id":true}`}, - {name: "id-string-max-len", data: `{"id":"` + strBuilder(1024) + `"}`}, - {name: "id-string-max-len", errorKey: "max", data: `{"id":"` + strBuilder(1025) + `"}`}, - } { - t.Run(tc.name, func(t *testing.T) { - testMetadata(t, "u", tc) - }) - } - }) +func TestLabelValidationRules(t *testing.T) { + testcases := []testcase{ + {name: "valid", data: `{"k.*\"\\1":"v1\\.s*\"","k2":2.3,"k3":3,"k4":true,"k5":null}`}, + {name: "restricted-type", errorKey: "inputTypesVals", data: `{"k1":{"k2":"v1"}}`}, + {name: "restricted-type", errorKey: "inputTypesVals", data: `{"k1":{"k2":[1,2,3]}}`}, + {name: "max-len", data: `{"k1":"` + modeldecodertest.BuildString(1024) + `"}`}, + {name: "max-len-exceeded", errorKey: "maxLengthVals", data: `{"k1":"` + modeldecodertest.BuildString(1025) + `"}`}, + } + testValidation(t, "m", testcases, "l") + testValidation(t, "x", testcases, "c", "g") + testValidation(t, "e", testcases, "c", "g") +} - t.Run("service", func(t *testing.T) { - for _, tc := range []testcase{ - {name: "name-valid-lower", data: `"n":"abcdefghijklmnopqrstuvwxyz"`}, - {name: "name-valid-upper", data: `"n":"ABCDEFGHIJKLMNOPQRSTUVWXYZ"`}, - {name: "name-valid-digits", data: `"n":"0123456789"`}, - {name: "name-valid-special", data: `"n":"_ -"`}, - {name: "name-asterisk", errorKey: "se.n", data: `"n":"abc*"`}, - {name: "name-dot", errorKey: "se.n", data: `"n":"abc."`}, - } { - t.Run(tc.name, func(t *testing.T) { - tc.data = `{"a":{"n":"go","ve":"1.0"},` + tc.data + `}` - testMetadata(t, "se", tc) - }) - } - }) +func TestMaxLenValidationRules(t *testing.T) { + // this tests an arbitrary field to ensure the `max` rule on strings works as expected + testcases := []testcase{ + {name: "service-environment-max-len", + data: `{"a":{"n":"go","ve":"1.0"},"n":"my-service","en":"` + modeldecodertest.BuildString(1024) + `"}`}, + {name: "service-environment-max-len", errorKey: "max", + data: `{"a":{"n":"go","ve":"1.0"},"n":"my-service","en":"` + modeldecodertest.BuildString(1025) + `"}`}, + } + testValidation(t, "m", testcases, "se") +} - t.Run("max-len", func(t *testing.T) { - for _, tc := range []testcase{ - {name: "service-environment-max-len", data: `"en":"` + strBuilder(1024) + `"`}, - {name: "service-environment-max-len", errorKey: "max", data: `"en":"` + strBuilder(1025) + `"`}, - } { - t.Run(tc.name, func(t *testing.T) { - tc.data = `{"a":{"n":"go","ve":"1.0"},"n":"my-service",` + tc.data + `}` - testMetadata(t, "se", tc) - }) +func TestContextValidationRules(t *testing.T) { + t.Run("custom", func(t *testing.T) { + testcases := []testcase{ + {name: "custom", data: `{"cu":{"k.*\"1":{"v1":123,"v2":"value"},"k2":34,"k3":[{"a.1":1,"b*\"":2}]}}`}, } + testValidation(t, "x", testcases, "c") + testValidation(t, "e", testcases, "c") }) +} - t.Run("labels", func(t *testing.T) { - for _, tc := range []testcase{ - {name: "valid", data: `{"k1":"v1","k2":2.3,"k3":3,"k4":true,"k5":null}`}, - {name: "restricted-type", errorKey: "typesVals", data: `{"k1":{"k2":"v1"}}`}, - {name: "key-dot", errorKey: "patternKeys", data: `{"k.1":"v1"}`}, - {name: "key-asterisk", errorKey: "patternKeys", data: `{"k*1":"v1"}`}, - {name: "key-quotemark", errorKey: "patternKeys", data: `{"k\"1":"v1"}`}, - {name: "max-len", data: `{"k1":"` + strBuilder(1024) + `"}`}, - {name: "max-len-exceeded", errorKey: "maxVals", data: `{"k1":"` + strBuilder(1025) + `"}`}, - } { - t.Run(tc.name, func(t *testing.T) { - testMetadata(t, "l", tc) - }) - } - }) +func TestDurationValidationRules(t *testing.T) { + testcases := []testcase{ + {name: "duration", data: `0.0`}, + {name: "duration", errorKey: "min", data: `-0.09`}, + } + testValidation(t, "x", testcases, "d") +} + +func TestMarksValidationRules(t *testing.T) { + testcases := []testcase{ + {name: "marks", data: `{"k.*\"1":{"v.*\"1":12.3}}`}, + } + testValidation(t, "x", testcases, "k") +} - t.Run("required", func(t *testing.T) { - // setup: create full metadata struct with arbitrary values set - var metadata metadata - modeldecodertest.InitStructValues(&metadata) - - // test vanilla struct is valid - require.NoError(t, metadata.validate()) - - // iterate through struct, remove every key one by one - // and test that validation behaves as expected - requiredKeys := map[string]interface{}{ - "se": nil, //service - "se.a": nil, //service.agent - "se.a.n": nil, //service.agent.name - "se.a.ve": nil, //service.agent.version - "se.la.n": nil, //service.language.name - "se.ru.n": nil, //service.runtime.name - "se.ru.ve": nil, //service.runtime.version - "se.n": nil, //service.name +func TestOutcomeValidationRules(t *testing.T) { + testcases := []testcase{ + {name: "outcome-success", data: `"success"`}, + {name: "outcome-failure", data: `"failure"`}, + {name: "outcome-unknown", data: `"unknown"`}, + {name: "outcome-invalid", errorKey: "enum", data: `"anything"`}, + } + testValidation(t, "x", testcases, "o") +} + +// +// Test Required fields +// + +func TestErrorRequiredValidationRules(t *testing.T) { + // setup: create full struct with arbitrary values set + var event errorEvent + modeldecodertest.InitStructValues(&event) + // test vanilla struct is valid + require.NoError(t, event.validate()) + + // iterate through struct, remove every key one by one + // and test that validation behaves as expected + requiredKeys := map[string]interface{}{ + "c.q.mt": nil, //context.request.method + "ex.st.f": nil, //log.stacktrace.filename + "id": nil, //id + "log.mg": nil, //log.message + "log.st.f": nil, //log.stacktrace.filename + "pid": nil, //requiredIf + "tid": nil, //requiredIf + } + cb := assertRequiredFn(t, requiredKeys, event.validate) + modeldecodertest.SetZeroStructValue(&event, cb) +} + +func TestErrorRequiredOneOfValidationRules(t *testing.T) { + for _, tc := range []struct { + name string + setupFn func(event *errorEvent) + }{ + {name: "all", setupFn: func(e *errorEvent) { + e.Log = errorLog{} + e.Log.Message.Set("test message") + e.Exception = errorException{} + e.Exception.Message.Set("test message") + }}, + {name: "log", setupFn: func(e *errorEvent) { + e.Log = errorLog{} + e.Log.Message.Set("test message") + }}, + {name: "exception/message", setupFn: func(e *errorEvent) { + e.Exception = errorException{} + e.Exception.Message.Set("test message") + }}, + {name: "exception/type", setupFn: func(e *errorEvent) { + e.Exception = errorException{} + e.Exception.Type.Set("test type") + }}, + {name: "exception/cause", + setupFn: func(e *errorEvent) { + exception := errorException{} + exception.Type.Set("test type") + cause := errorException{} + cause.Type.Set("cause type") + exception.Cause = []errorException{cause} + e.Exception = exception + }, + }, + } { + t.Run("valid/"+tc.name, func(t *testing.T) { + var event errorEvent + event.ID.Set("123") + tc.setupFn(&event) + require.NoError(t, event.validate()) + }) + } + + for _, tc := range []struct { + name string + err string + setupFn func(event *errorEvent) + }{ + {name: "error", + err: "requires at least one of the fields 'ex;log'", + setupFn: func(e *errorEvent) {}}, + {name: "exception", + err: "ex: requires at least one of the fields 'mg;t'", + setupFn: func(e *errorEvent) { + exception := errorException{} + exception.Handled.Set(true) + e.Exception = exception + }, + }, + {name: "exception/cause", + err: "ex: ca: requires at least one of the fields 'mg;t'", + setupFn: func(e *errorEvent) { + exception := errorException{} + exception.Type.Set("test type") + cause := errorException{} + cause.Code.Set("400") + exception.Cause = []errorException{cause} + e.Exception = exception + }, + }, + } { + t.Run("invalid/"+tc.name, func(t *testing.T) { + var event errorEvent + event.ID.Set("123") + tc.setupFn(&event) + err := event.validate() + require.Error(t, err) + assert.Contains(t, err.Error(), tc.err) + }) + } +} + +func TestErrorRequiredIfAnyValidationRules(t *testing.T) { + validErrorEvent := func() errorEvent { + var event errorEvent + event.ID.Set("123") + event.Exception = errorException{} + event.Exception.Message.Set("test message") + return event + } + for _, tc := range []struct { + name string + setupFn func(event *errorEvent) + }{ + {name: "traceID-nil", setupFn: func(*errorEvent) {}}, + {name: "traceID-parentID-transactionID", setupFn: func(e *errorEvent) { + e.TraceID.Set("abcd") + e.ParentID.Set("xxx") + e.TransactionID.Set("xxx") + }}, + {name: "traceID-parentID", setupFn: func(e *errorEvent) { + e.TraceID.Set("abcd") + e.ParentID.Set("xxx") + }}, + } { + t.Run("valid/"+tc.name, func(t *testing.T) { + event := validErrorEvent() + tc.setupFn(&event) + require.NoError(t, event.validate()) + }) + } + + for _, tc := range []struct { + name string + err string + setupFn func(event *errorEvent) + }{ + {name: "traceID", err: "'pid' required", + setupFn: func(e *errorEvent) { e.TraceID.Set("xxx") }}, + {name: "parentID", err: "'tid' required", + setupFn: func(e *errorEvent) { e.ParentID.Set("xxx") }}, + {name: "transactionID", err: "'pid' required", + setupFn: func(e *errorEvent) { e.TransactionID.Set("xxx") }}, + {name: "transactionID-parentID", err: "'tid' required", + setupFn: func(e *errorEvent) { + e.TransactionID.Set("xxx") + e.ParentID.Set("xxx") + }}, + {name: "transactionID-traceID", err: "'pid' required", + setupFn: func(e *errorEvent) { + e.TransactionID.Set("xxx") + e.TraceID.Set("xxx") + }}, + } { + t.Run("invalid/"+tc.name, func(t *testing.T) { + event := validErrorEvent() + tc.setupFn(&event) + err := event.validate() + require.Error(t, err) + require.Contains(t, err.Error(), tc.err) + }) + } +} + +func TestMetadataRequiredValidationRules(t *testing.T) { + // setup: create full event struct with arbitrary values set + var event metadata + modeldecodertest.InitStructValues(&event) + + // test vanilla struct is valid + require.NoError(t, event.validate()) + + // iterate through struct, remove every key one by one + // and test that validation behaves as expected + requiredKeys := map[string]interface{}{ + "se": nil, //service + "se.a": nil, //service.agent + "se.a.n": nil, //service.agent.name + "se.a.ve": nil, //service.agent.version + "se.la.n": nil, //service.language.name + "se.ru.n": nil, //service.runtime.name + "se.ru.ve": nil, //service.runtime.version + "se.n": nil, //service.name + } + cb := assertRequiredFn(t, requiredKeys, event.validate) + modeldecodertest.SetZeroStructValue(&event, cb) +} + +func TestTransactionMetricsetRequiredValidationRules(t *testing.T) { + // setup: create full struct with sample values set + var tx transaction + s := `{"me":[{"sa":{"xds":{"v":2048},"xbc":{"v":1}},"y":{"t":"db","su":"mysql"}}]}` + modeldecodertest.DecodeData(t, strings.NewReader(s), "me", &tx) + // test vanilla struct is valid + require.Len(t, tx.Metricsets, 1) + require.NoError(t, tx.Metricsets[0].validate()) + + // iterate through struct, remove every key one by one + // and test that validation behaves as expected + requiredKeys := map[string]interface{}{ + "sa": nil, //samples + "sa.v": nil, //samples.*.value + } + cb := assertRequiredFn(t, requiredKeys, tx.Metricsets[0].validate) + modeldecodertest.SetZeroStructValue(&tx.Metricsets[0], cb) +} + +func TestTransactionRequiredValidationRules(t *testing.T) { + // setup: create full metadata struct with arbitrary values set + var event transaction + modeldecodertest.InitStructValues(&event) + event.Outcome.Set("success") + for i := 0; i < len(event.Spans); i++ { + event.Spans[i].Outcome.Set("failure") + } + // test vanilla struct is valid + require.NoError(t, event.validate()) + + // iterate through struct, remove every key one by one + // and test that validation behaves as expected + requiredKeys := map[string]interface{}{ + "c.q.mt": nil, //context.request.method + "d": nil, //duration + "id": nil, //id + "exp.lt.count": nil, //experience.longtask.count + "exp.lt.max": nil, //experience.longtask.max + "exp.lt.sum": nil, //experience.longtask.sum + "me.sa": nil, //metricsets.samples + "t": nil, //type + "tid": nil, //trace_id + "y.c.dt.se.rc": nil, //spans.*.context.destination.service.resource + "y.d": nil, //spans.*.duration + "y.id": nil, //spans.*.id + "y.n": nil, //spans.*.name + "y.s": nil, //spans.*.start + "y.t": nil, //spans.*.type + "y.st.f": nil, //spans.*.stacktrace.*.filename + "yc": nil, //span_count + "yc.sd": nil, //span_count.started + "ses.id": nil, //session.id + } + cb := assertRequiredFn(t, requiredKeys, event.validate) + modeldecodertest.SetZeroStructValue(&event, cb) +} + +var regexpArrayAccessor = regexp.MustCompile(`\[[0-9]*]\.`) + +func assertRequiredFn(t *testing.T, keys map[string]interface{}, validate func() error) func(key string) { + return func(key string) { + s := regexpArrayAccessor.ReplaceAllString(key, "") + err := validate() + if _, ok := keys[s]; ok { + require.Error(t, err, key) + for _, part := range strings.Split(s, ".") { + assert.Contains(t, err.Error(), part) + } + } else { + assert.NoError(t, err, key) } - modeldecodertest.SetZeroStructValue(&metadata, func(key string) { - err := metadata.validate() - if _, ok := requiredKeys[key]; ok { - require.Error(t, err, key) - assert.Contains(t, err.Error(), key) + } +} + +// +// Test Set() and Reset() +// + +func TestResetIsSet(t *testing.T) { + for name, root := range map[string]setter{ + "e": &errorRoot{}, + "m": &metadataRoot{}, + "x": &transactionRoot{}, + } { + t.Run(name, func(t *testing.T) { + r := testdataReader(t, testFileName(name)) + modeldecodertest.DecodeData(t, r, name, &root) + require.True(t, root.IsSet()) + // call Reset and ensure initial state, except for array capacity + root.Reset() + assert.False(t, root.IsSet()) + }) + } +} + +type testcase struct { + name string + errorKey string + data string +} + +type setter interface { + IsSet() bool + Reset() +} + +type validator interface { + validate() error +} + +func testdataReader(t *testing.T, typ string) io.Reader { + p := filepath.Join("..", "..", "..", "testdata", "intake-v3", fmt.Sprintf("%s.ndjson", typ)) + r, err := os.Open(p) + require.NoError(t, err) + return r +} + +func testFileName(eventType string) string { + switch eventType { + case "e": + return "rum_errors" + default: + return "rum_events" + } +} + +func testValidation(t *testing.T, eventType string, testcases []testcase, keys ...string) { + for _, tc := range testcases { + t.Run(tc.name+"/"+eventType, func(t *testing.T) { + var event validator + switch eventType { + case "e": + event = &errorEvent{} + case "m": + event = &metadata{} + case "x": + event = &transaction{} + } + r := testdataReader(t, testFileName(eventType)) + modeldecodertest.DecodeDataWithReplacement(t, r, eventType, tc.data, event, keys...) + + // run validation and checks + err := event.validate() + if tc.errorKey == "" { + assert.NoError(t, err) } else { - assert.NoError(t, err, key) + require.Error(t, err) + assert.Contains(t, err.Error(), tc.errorKey) } }) - }) + } } diff --git a/model/modeldecoder/rumv3/transaction_test.go b/model/modeldecoder/rumv3/transaction_test.go new file mode 100644 index 00000000000..69fb7530bf6 --- /dev/null +++ b/model/modeldecoder/rumv3/transaction_test.go @@ -0,0 +1,387 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package rumv3 + +import ( + "net" + "net/http" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/decoder" + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modeldecoder" + "github.com/elastic/apm-server/model/modeldecoder/modeldecodertest" + "github.com/elastic/beats/v7/libbeat/common" +) + +func TestResetTransactionOnRelease(t *testing.T) { + inp := `{"x":{"n":"tr-a"}}` + tr := fetchTransactionRoot() + require.NoError(t, decoder.NewJSONDecoder(strings.NewReader(inp)).Decode(tr)) + require.True(t, tr.IsSet()) + releaseTransactionRoot(tr) + assert.False(t, tr.IsSet()) +} + +func TestDecodeNestedTransaction(t *testing.T) { + t.Run("decode", func(t *testing.T) { + now := time.Now() + eventBase := initializedMetadata() + eventBase.Timestamp = now + input := modeldecoder.Input{Base: eventBase} + str := `{"x":{"n":"tr-a","d":100,"id":"100","tid":"1","t":"request","yc":{"sd":2},"y":[{"n":"a","d":10,"t":"http","id":"123","s":20}],"me":[{"sa":{"xds":{"v":2048}}},{"sa":{"ysc":{"v":5}},"y":{"t":"span_type","su":"span_subtype"}}]}}` + dec := decoder.NewJSONDecoder(strings.NewReader(str)) + var batch model.Batch + require.NoError(t, DecodeNestedTransaction(dec, &input, &batch)) + require.Len(t, batch, 4) // 1 transaction, 2 metricsets, 1 span + require.NotNil(t, batch[0].Transaction) + require.NotNil(t, batch[1].Metricset) + require.NotNil(t, batch[2].Metricset) + require.NotNil(t, batch[3].Span) + + assert.Equal(t, "request", batch[0].Transaction.Type) + // fall back to request time + assert.Equal(t, now, batch[0].Timestamp) + + // Ensure nested metricsets are decoded. RUMv3 only sends + // breakdown metrics, so the Metricsets will be empty and + // metrics will be recorded on the Transaction and Span + // fields. + assert.Equal(t, &model.Metricset{}, batch[1].Metricset) + assert.Equal(t, &model.Transaction{ + Name: "tr-a", + Type: "request", + AggregatedDuration: model.AggregatedDuration{Sum: 2048 * time.Microsecond}, + }, batch[1].Transaction) + assert.Equal(t, &model.Metricset{}, batch[2].Metricset) + assert.Equal(t, &model.Transaction{ + Name: "tr-a", + Type: "request", + }, batch[2].Transaction) + assert.Equal(t, &model.Span{ + Type: "span_type", + Subtype: "span_subtype", + SelfTime: model.AggregatedDuration{Count: 5}, + }, batch[2].Span) + assert.Equal(t, now, batch[2].Timestamp) + + // ensure nested spans are decoded + start := time.Duration(20 * 1000 * 1000) + assert.Equal(t, now.Add(start), batch[3].Timestamp) //add start to timestamp + assert.Equal(t, "100", batch[3].Transaction.ID) + assert.Equal(t, "1", batch[3].Trace.ID) + assert.Equal(t, "100", batch[3].Parent.ID) + + for _, event := range batch { + modeldecodertest.AssertStructValues( + t, &event, + metadataExceptions("Timestamp"), // timestamp checked above + modeldecodertest.DefaultValues(), + ) + } + + err := DecodeNestedTransaction(decoder.NewJSONDecoder(strings.NewReader(`malformed`)), &input, &batch) + require.Error(t, err) + assert.Contains(t, err.Error(), "decode") + }) + + t.Run("decode-marks", func(t *testing.T) { + now := time.Now() + eventBase := model.APMEvent{Timestamp: now} + input := modeldecoder.Input{Base: eventBase} + str := `{"x":{"d":100,"id":"100","tid":"1","t":"request","yc":{"sd":2},"k":{"a":{"dc":0.1,"di":0.2,"ds":0.3,"de":0.4,"fb":0.5,"fp":0.6,"lp":0.7,"long":0.8},"nt":{"fs":0.1,"ls":0.2,"le":0.3,"cs":0.4,"ce":0.5,"qs":0.6,"rs":0.7,"re":0.8,"dl":0.9,"di":0.11,"ds":0.21,"de":0.31,"dc":0.41,"es":0.51,"ee":6,"long":0.99},"long":{"long":0.1}}}}` + dec := decoder.NewJSONDecoder(strings.NewReader(str)) + var batch model.Batch + require.NoError(t, DecodeNestedTransaction(dec, &input, &batch)) + marks := model.TransactionMarks{ + "agent": map[string]float64{ + "domComplete": 0.1, + "domInteractive": 0.2, + "domContentLoadedEventStart": 0.3, + "domContentLoadedEventEnd": 0.4, + "timeToFirstByte": 0.5, + "firstContentfulPaint": 0.6, + "largestContentfulPaint": 0.7, + "long": 0.8, + }, + "navigationTiming": map[string]float64{ + "fetchStart": 0.1, + "domainLookupStart": 0.2, + "domainLookupEnd": 0.3, + "connectStart": 0.4, + "connectEnd": 0.5, + "requestStart": 0.6, + "responseStart": 0.7, + "responseEnd": 0.8, + "domLoading": 0.9, + "domInteractive": 0.11, + "domContentLoadedEventStart": 0.21, + "domContentLoadedEventEnd": 0.31, + "domComplete": 0.41, + "loadEventStart": 0.51, + "loadEventEnd": 6, + "long": 0.99, + }, + "long": map[string]float64{ + "long": 0.1, + }, + } + assert.Equal(t, marks, batch[0].Transaction.Marks) + }) + + t.Run("validate", func(t *testing.T) { + var batch model.Batch + err := DecodeNestedTransaction(decoder.NewJSONDecoder(strings.NewReader(`{}`)), &modeldecoder.Input{}, &batch) + require.Error(t, err) + assert.Contains(t, err.Error(), "validation") + }) +} + +func TestDecodeMapToTransactionModel(t *testing.T) { + localhostIP := net.ParseIP("127.0.0.1") + + t.Run("metadata-overwrite", func(t *testing.T) { + // overwrite defined metadata with transaction metadata values + var input transaction + out := initializedMetadata() + otherVal := modeldecodertest.NonDefaultValues() + modeldecodertest.SetStructValues(&input, otherVal) + mapToTransactionModel(&input, &out) + + // user-agent should be set to context request header values + assert.Equal(t, "d, e", out.UserAgent.Original) + // do not overwrite client.ip if already set in metadata + assert.Equal(t, localhostIP, out.Client.IP, out.Client.IP.String()) + assert.Equal(t, common.MapStr{ + "init0": "init", "init1": "init", "init2": "init", + "overwritten0": "overwritten", "overwritten1": "overwritten", + }, out.Labels) + // service values should be set + modeldecodertest.AssertStructValues(t, &out.Service, metadataExceptions("Node", "Agent.EphemeralID"), otherVal) + // user values should be set + modeldecodertest.AssertStructValues(t, &out.User, metadataExceptions(), otherVal) + }) + + t.Run("overwrite-user", func(t *testing.T) { + // user should be populated by metadata or event specific, but not merged + var input transaction + var out model.APMEvent + input.Context.User.Email.Set("test@user.com") + mapToTransactionModel(&input, &out) + assert.Equal(t, "test@user.com", out.User.Email) + assert.Zero(t, out.User.ID) + assert.Zero(t, out.User.Name) + }) + + t.Run("transaction-values", func(t *testing.T) { + exceptions := func(key string) bool { + for _, s := range []string{ + // values not set for RUM v3 + "RepresentativeCount", "Message", + // Not set for transaction events: + "AggregatedDuration", + "AggregatedDuration.Count", + "AggregatedDuration.Sum", + "BreakdownCount", + "DurationHistogram", + "DurationHistogram.Counts", + "DurationHistogram.Values", + "Root", + } { + if strings.HasPrefix(key, s) { + return true + } + } + return false + } + + var input transaction + var out1, out2 model.APMEvent + reqTime := time.Now().Add(time.Second) + out1.Timestamp = reqTime + defaultVal := modeldecodertest.DefaultValues() + modeldecodertest.SetStructValues(&input, defaultVal) + mapToTransactionModel(&input, &out1) + input.Reset() + defaultVal.Update(reqTime) //for rumv3 the timestamp is always set from the base event + modeldecodertest.AssertStructValues(t, out1.Transaction, exceptions, defaultVal) + + // ensure memory is not shared by reusing input model + out2.Timestamp = reqTime + otherVal := modeldecodertest.NonDefaultValues() + otherVal.Update(reqTime) //for rumv3 the timestamp is always set from the base event + modeldecodertest.SetStructValues(&input, otherVal) + mapToTransactionModel(&input, &out2) + modeldecodertest.AssertStructValues(t, out2.Transaction, exceptions, otherVal) + modeldecodertest.AssertStructValues(t, out1.Transaction, exceptions, defaultVal) + }) + + t.Run("span-values", func(t *testing.T) { + exceptions := func(key string) bool { + for _, s := range []string{ + // values not set for RUM v3 + "ChildIDs", + "Composite", + "DB", + "Message", + "RepresentativeCount", + "Stacktrace.LibraryFrame", + "Stacktrace.Vars", + // stacktrace original and sourcemap values are set when sourcemapping is applied + "Stacktrace.Original", + "Stacktrace.Sourcemap", + // ExcludeFromGrouping is set when processing the event + "Stacktrace.ExcludeFromGrouping", + // Not set for span events: + "DestinationService.ResponseTime", + "DestinationService.ResponseTime.Count", + "DestinationService.ResponseTime.Sum", + "SelfTime", + "SelfTime.Count", + "SelfTime.Sum", + } { + if strings.HasPrefix(key, s) { + return true + } + } + return false + } + + var input span + var out1, out2 model.APMEvent + reqTime := time.Now().Add(time.Second) + out1.Timestamp = reqTime + defaultVal := modeldecodertest.DefaultValues() + modeldecodertest.SetStructValues(&input, defaultVal) + mapToSpanModel(&input, &out1) + input.Reset() + defaultStart := time.Duration(defaultVal.Float * 1000 * 1000) + defaultVal.Update(reqTime.Add(defaultStart)) //for rumv3 the timestamp is always set from the base event + modeldecodertest.AssertStructValues(t, out1.Span, exceptions, defaultVal) + + // ensure memory is not shared by reusing input model + out2.Timestamp = reqTime + otherVal := modeldecodertest.NonDefaultValues() + modeldecodertest.SetStructValues(&input, otherVal) + mapToSpanModel(&input, &out2) + otherStart := time.Duration(otherVal.Float * 1000 * 1000) + otherVal.Update(reqTime.Add(otherStart)) //for rumv3 the timestamp is always set from the base event + modeldecodertest.AssertStructValues(t, out2.Span, exceptions, otherVal) + modeldecodertest.AssertStructValues(t, out1.Span, exceptions, defaultVal) + }) + + t.Run("span-outcome", func(t *testing.T) { + var input span + var out model.APMEvent + modeldecodertest.SetStructValues(&input, modeldecodertest.DefaultValues()) + // set from input, ignore status code + input.Outcome.Set("failure") + input.Context.HTTP.StatusCode.Set(http.StatusPermanentRedirect) + mapToSpanModel(&input, &out) + assert.Equal(t, "failure", out.Event.Outcome) + // derive from span fields - success + input.Outcome.Reset() + input.Context.HTTP.StatusCode.Set(http.StatusPermanentRedirect) + mapToSpanModel(&input, &out) + assert.Equal(t, "success", out.Event.Outcome) + // derive from span fields - failure + input.Outcome.Reset() + input.Context.HTTP.StatusCode.Set(http.StatusBadRequest) + mapToSpanModel(&input, &out) + assert.Equal(t, "failure", out.Event.Outcome) + // derive from span fields - unknown + input.Outcome.Reset() + input.Context.HTTP.StatusCode.Reset() + mapToSpanModel(&input, &out) + assert.Equal(t, "unknown", out.Event.Outcome) + }) + + t.Run("transaction-outcome", func(t *testing.T) { + var input transaction + var out model.APMEvent + modeldecodertest.SetStructValues(&input, modeldecodertest.DefaultValues()) + // set from input, ignore status code + input.Outcome.Set("failure") + input.Context.Response.StatusCode.Set(http.StatusBadRequest) + mapToTransactionModel(&input, &out) + assert.Equal(t, "failure", out.Event.Outcome) + // derive from span fields - success + input.Outcome.Reset() + input.Context.Response.StatusCode.Set(http.StatusBadRequest) + mapToTransactionModel(&input, &out) + assert.Equal(t, "success", out.Event.Outcome) + // derive from span fields - failure + input.Outcome.Reset() + input.Context.Response.StatusCode.Set(http.StatusInternalServerError) + mapToTransactionModel(&input, &out) + assert.Equal(t, "failure", out.Event.Outcome) + // derive from span fields - unknown + input.Outcome.Reset() + input.Context.Response.StatusCode.Reset() + mapToTransactionModel(&input, &out) + assert.Equal(t, "unknown", out.Event.Outcome) + }) + + t.Run("page.URL", func(t *testing.T) { + var input transaction + input.Context.Page.URL.Set("https://my.site.test:9201") + var out model.APMEvent + mapToTransactionModel(&input, &out) + assert.Equal(t, "https://my.site.test:9201", out.URL.Full) + }) + + t.Run("page.referer", func(t *testing.T) { + var input transaction + input.Context.Page.Referer.Set("https://my.site.test:9201") + var out model.APMEvent + mapToTransactionModel(&input, &out) + assert.Equal(t, "https://my.site.test:9201", out.HTTP.Request.Referrer) + }) + + t.Run("http-headers", func(t *testing.T) { + var input transaction + input.Context.Request.Headers.Set(http.Header{"a": []string{"b"}, "c": []string{"d", "e"}}) + input.Context.Response.Headers.Set(http.Header{"f": []string{"g"}}) + var out model.APMEvent + mapToTransactionModel(&input, &out) + assert.Equal(t, common.MapStr{"a": []string{"b"}, "c": []string{"d", "e"}}, out.HTTP.Request.Headers) + assert.Equal(t, common.MapStr{"f": []string{"g"}}, out.HTTP.Response.Headers) + }) + + t.Run("session", func(t *testing.T) { + var input transaction + var out model.APMEvent + modeldecodertest.SetStructValues(&input, modeldecodertest.DefaultValues()) + input.Session.ID.Reset() + mapToTransactionModel(&input, &out) + assert.Equal(t, model.Session{}, out.Session) + + input.Session.ID.Set("session_id") + input.Session.Sequence.Set(123) + mapToTransactionModel(&input, &out) + assert.Equal(t, model.Session{ + ID: "session_id", + Sequence: 123, + }, out.Session) + }) +} diff --git a/model/modeldecoder/service.go b/model/modeldecoder/service.go deleted file mode 100644 index f91e79dce0d..00000000000 --- a/model/modeldecoder/service.go +++ /dev/null @@ -1,53 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/model/modeldecoder/field" -) - -func decodeService(input map[string]interface{}, hasShortFieldNames bool, out *model.Service) { - if input == nil { - return - } - fieldName := field.Mapper(hasShortFieldNames) - decodeString(input, fieldName("name"), &out.Name) - decodeString(input, fieldName("version"), &out.Version) - decodeString(input, fieldName("environment"), &out.Environment) - if node := getObject(input, fieldName("node")); node != nil { - decodeString(node, fieldName("configured_name"), &out.Node.Name) - } - if agent := getObject(input, fieldName("agent")); agent != nil { - decodeString(agent, fieldName("name"), &out.Agent.Name) - decodeString(agent, fieldName("version"), &out.Agent.Version) - decodeString(agent, fieldName("ephemeral_id"), &out.Agent.EphemeralID) - } - if framework := getObject(input, fieldName("framework")); framework != nil { - decodeString(framework, fieldName("name"), &out.Framework.Name) - decodeString(framework, fieldName("version"), &out.Framework.Version) - } - if language := getObject(input, fieldName("language")); language != nil { - decodeString(language, fieldName("name"), &out.Language.Name) - decodeString(language, fieldName("version"), &out.Language.Version) - } - if runtime := getObject(input, fieldName("runtime")); runtime != nil { - decodeString(runtime, fieldName("name"), &out.Runtime.Name) - decodeString(runtime, fieldName("version"), &out.Runtime.Version) - } -} diff --git a/model/modeldecoder/service_test.go b/model/modeldecoder/service_test.go deleted file mode 100644 index 5bf1dfb865f..00000000000 --- a/model/modeldecoder/service_test.go +++ /dev/null @@ -1,101 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/apm-server/model" -) - -const ( - serviceName, serviceVersion, serviceEnvironment, serviceNodeName = "myservice", "5.1.3", "staging", "serviceABC" - langName, langVersion = "ecmascript", "8" - rtName, rtVersion = "node", "8.0.0" - fwName, fwVersion = "Express", "1.2.3" - agentName, agentVersion = "elastic-node", "1.0.0" -) - -func TestServiceDecode(t *testing.T) { - serviceName := "myService" - for _, test := range []struct { - input map[string]interface{} - s model.Service - }{ - {input: nil}, - { - input: map[string]interface{}{"name": 1234}, - s: model.Service{ - Language: model.Language{}, - Runtime: model.Runtime{}, - Framework: model.Framework{}, - Agent: model.Agent{}, - }, - }, - { - input: map[string]interface{}{ - "name": serviceName, - "version": "5.1.3", - "environment": "staging", - "language": map[string]interface{}{ - "name": "ecmascript", - "version": "8", - }, - "runtime": map[string]interface{}{ - "name": "node", - "version": "8.0.0", - }, - "framework": map[string]interface{}{ - "name": "Express", - "version": "1.2.3", - }, - "agent": map[string]interface{}{ - "name": "elastic-node", - "version": "1.0.0", - }, - }, - s: model.Service{ - Name: serviceName, - Version: serviceVersion, - Environment: serviceEnvironment, - Language: model.Language{ - Name: langName, - Version: langVersion, - }, - Runtime: model.Runtime{ - Name: rtName, - Version: rtVersion, - }, - Framework: model.Framework{ - Name: fwName, - Version: fwVersion, - }, - Agent: model.Agent{ - Name: agentName, - Version: agentVersion, - }, - }, - }, - } { - var service model.Service - decodeService(test.input, false, &service) - assert.Equal(t, test.s, service) - } -} diff --git a/model/modeldecoder/sourcemap.go b/model/modeldecoder/sourcemap.go deleted file mode 100644 index 20f576ae770..00000000000 --- a/model/modeldecoder/sourcemap.go +++ /dev/null @@ -1,44 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/model/sourcemap/generated/schema" - "github.com/elastic/apm-server/transform" - "github.com/elastic/apm-server/utility" - "github.com/elastic/apm-server/validation" -) - -// SourcemapSchema is the compiled JSON Schema for validating sourcemaps. -// -// TODO(axw) make DecodeSourcemap validate against SourcemapSchema, and unexpose this. -// This will require changes to processor/asset/sourcemap. -var SourcemapSchema = validation.CreateSchema(schema.PayloadSchema, "sourcemap") - -// DecodeSourcemap decodes a sourcemap. -func DecodeSourcemap(raw map[string]interface{}) (transform.Transformable, error) { - decoder := utility.ManualDecoder{} - pa := model.Sourcemap{ - ServiceName: decoder.String(raw, "service_name"), - ServiceVersion: decoder.String(raw, "service_version"), - Sourcemap: decoder.String(raw, "sourcemap"), - BundleFilepath: decoder.String(raw, "bundle_filepath"), - } - return &pa, decoder.Err -} diff --git a/model/modeldecoder/span.go b/model/modeldecoder/span.go deleted file mode 100644 index 706759e4639..00000000000 --- a/model/modeldecoder/span.go +++ /dev/null @@ -1,285 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "net/http" - "strings" - "time" - - "github.com/pkg/errors" - "github.com/santhosh-tekuri/jsonschema" - - "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/model/modeldecoder/field" - "github.com/elastic/apm-server/model/span/generated/schema" - "github.com/elastic/apm-server/utility" - "github.com/elastic/apm-server/validation" -) - -var ( - spanSchema = validation.CreateSchema(schema.ModelSchema, "span") - rumV3SpanSchema = validation.CreateSchema(schema.RUMV3Schema, "span") -) - -// decodeRUMV3Span decodes a v3 RUM span, and optional parent index. -// If parent index wasn't specified, then the value will be negative. -func decodeRUMV3Span(input Input) (_ *model.Span, parentIndex int, _ error) { - span, parentIndex, err := decodeSpan(input, rumV3SpanSchema) - if err != nil { - return nil, -1, err - } - span.RUM = true - return span, parentIndex, nil -} - -// DecodeRUMV2Span decodes a v2 RUM span. -func DecodeRUMV2Span(input Input, batch *model.Batch) error { - span, _, err := decodeSpan(input, spanSchema) - if err != nil { - return err - } - span.RUM = true - batch.Spans = append(batch.Spans, span) - return nil -} - -// DecodeSpan decodes a v2 span. -func DecodeSpan(input Input, batch *model.Batch) error { - span, _, err := decodeSpan(input, spanSchema) - if err != nil { - return err - } - batch.Spans = append(batch.Spans, span) - return nil -} - -func decodeSpan(input Input, schema *jsonschema.Schema) (_ *model.Span, parentIndex int, _ error) { - raw, err := validation.ValidateObject(input.Raw, schema) - if err != nil { - return nil, -1, errors.Wrap(err, "failed to validate span") - } - - fieldName := field.Mapper(input.Config.HasShortFieldNames) - decoder := utility.ManualDecoder{} - event := model.Span{ - Metadata: input.Metadata, - Name: decoder.String(raw, fieldName("name")), - Start: decoder.Float64Ptr(raw, fieldName("start")), - RepresentativeCount: safeInverse(decoder.Float64Ptr(raw, fieldName("sample_rate"))), - Duration: decoder.Float64(raw, fieldName("duration")), - Sync: decoder.BoolPtr(raw, fieldName("sync")), - Timestamp: decoder.TimeEpochMicro(raw, fieldName("timestamp")), - ID: decoder.String(raw, fieldName("id")), - ChildIDs: decoder.StringArr(raw, "child_ids"), - Type: decoder.String(raw, fieldName("type")), - Subtype: decoder.StringPtr(raw, fieldName("subtype")), - Action: decoder.StringPtr(raw, fieldName("action")), - } - decodeString(raw, fieldName("parent_id"), &event.ParentID) - decodeString(raw, fieldName("trace_id"), &event.TraceID) - decodeString(raw, fieldName("transaction_id"), &event.TransactionID) - - ctx := decoder.MapStr(raw, fieldName("context")) - if ctx != nil { - if labels, ok := ctx[fieldName("tags")].(map[string]interface{}); ok { - event.Labels = labels - } - - db, err := decodeDB(ctx, decoder.Err) - if err != nil { - return nil, -1, err - } - event.DB = db - - http, err := decodeSpanHTTP(ctx, input.Config.HasShortFieldNames, decoder.Err) - if err != nil { - return nil, -1, err - } - event.HTTP = http - - dest, destService, err := decodeDestination(ctx, input.Config.HasShortFieldNames, decoder.Err) - if err != nil { - return nil, -1, err - } - event.Destination = dest - event.DestinationService = destService - - if s := getObject(ctx, "service"); s != nil { - var service model.Service - decodeService(s, input.Config.HasShortFieldNames, &service) - event.Service = &service - } - - if event.Message, err = decodeMessage(ctx, decoder.Err); err != nil { - return nil, -1, err - } - - if input.Config.Experimental { - if obj, set := ctx["experimental"]; set { - event.Experimental = obj - } - } - } - decodeString(raw, fieldName("outcome"), &event.Outcome) - if event.Outcome == "" { - if event.HTTP != nil && event.HTTP.StatusCode != nil { - statusCode := *event.HTTP.StatusCode - if statusCode >= http.StatusBadRequest { - event.Outcome = "failure" - } else { - event.Outcome = "success" - } - } else { - event.Outcome = "unknown" - } - } - - var stacktr *model.Stacktrace - stacktr, decoder.Err = decodeStacktrace(raw[fieldName("stacktrace")], input.Config.HasShortFieldNames, decoder.Err) - if decoder.Err != nil { - return nil, -1, decoder.Err - } - if stacktr != nil { - event.Stacktrace = *stacktr - } - - if event.Subtype == nil && event.Action == nil { - sep := "." - t := strings.Split(event.Type, sep) - event.Type = t[0] - if len(t) > 1 { - event.Subtype = &t[1] - } - if len(t) > 2 { - action := strings.Join(t[2:], sep) - event.Action = &action - } - } - - if event.Timestamp.IsZero() { - timestamp := input.RequestTime - if event.Start != nil { - // adjust timestamp to be reqTime + start - timestamp = timestamp.Add(time.Duration(float64(time.Millisecond) * *event.Start)) - } - event.Timestamp = timestamp - } - - // parent_idx comes from RUM V3 payloads only. It is used only during - // decoding to populate ParentID. We initialise to -1 to indicate lack - // of parent index. - parentIndex = -1 - decodeInt(raw, fieldName("parent_idx"), &parentIndex) - - return &event, parentIndex, nil -} - -func decodeDB(input interface{}, err error) (*model.DB, error) { - if input == nil || err != nil { - return nil, err - } - raw, ok := input.(map[string]interface{}) - if !ok { - return nil, errors.New("invalid type for db") - } - decoder := utility.ManualDecoder{} - dbInput := decoder.MapStr(raw, "db") - if decoder.Err != nil || dbInput == nil { - return nil, decoder.Err - } - db := model.DB{ - Instance: decoder.StringPtr(dbInput, "instance"), - Statement: decoder.StringPtr(dbInput, "statement"), - Type: decoder.StringPtr(dbInput, "type"), - UserName: decoder.StringPtr(dbInput, "user"), - Link: decoder.StringPtr(dbInput, "link"), - RowsAffected: decoder.IntPtr(dbInput, "rows_affected"), - } - return &db, decoder.Err -} - -func decodeSpanHTTP(input interface{}, hasShortFieldNames bool, err error) (*model.HTTP, error) { - if input == nil || err != nil { - return nil, err - } - raw, ok := input.(map[string]interface{}) - if !ok { - return nil, errors.New("invalid type for http") - } - decoder := utility.ManualDecoder{} - fieldName := field.Mapper(hasShortFieldNames) - httpInput := decoder.MapStr(raw, fieldName("http")) - if decoder.Err != nil || httpInput == nil { - return nil, decoder.Err - } - method := decoder.StringPtr(httpInput, fieldName("method")) - if method != nil { - *method = strings.ToLower(*method) - } - minimalResp, err := decodeMinimalHTTPResponse(httpInput, hasShortFieldNames, decoder.Err) - if err != nil { - return nil, err - } - return &model.HTTP{ - URL: decoder.StringPtr(httpInput, fieldName("url")), - StatusCode: decoder.IntPtr(httpInput, fieldName("status_code")), - Method: method, - Response: minimalResp, - }, nil -} - -func decodeDestination(input interface{}, hasShortFieldNames bool, err error) (*model.Destination, *model.DestinationService, error) { - if input == nil || err != nil { - return nil, nil, err - } - raw, ok := input.(map[string]interface{}) - if !ok { - return nil, nil, errors.New("invalid type for destination") - } - fieldName := field.Mapper(hasShortFieldNames) - decoder := utility.ManualDecoder{} - destinationInput := decoder.MapStr(raw, fieldName("destination")) - if decoder.Err != nil || destinationInput == nil { - return nil, nil, decoder.Err - } - serviceInput := decoder.MapStr(destinationInput, fieldName("service")) - if decoder.Err != nil { - return nil, nil, decoder.Err - } - var service *model.DestinationService - if serviceInput != nil { - service = &model.DestinationService{ - Type: decoder.StringPtr(serviceInput, fieldName("type")), - Name: decoder.StringPtr(serviceInput, fieldName("name")), - Resource: decoder.StringPtr(serviceInput, fieldName("resource")), - } - } - dest := model.Destination{ - Address: decoder.StringPtr(destinationInput, fieldName("address")), - Port: decoder.IntPtr(destinationInput, fieldName("port")), - } - return &dest, service, decoder.Err -} - -func safeInverse(f *float64) float64 { - if f == nil || *f == 0 { - return 0 - } - return 1 / *f -} diff --git a/model/modeldecoder/span_test.go b/model/modeldecoder/span_test.go deleted file mode 100644 index 603a0f6080f..00000000000 --- a/model/modeldecoder/span_test.go +++ /dev/null @@ -1,374 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "encoding/json" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/beats/v7/libbeat/common" - - "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/tests" -) - -func TestDecodeSpan(t *testing.T) { - requestTime := time.Now() - spanTime := time.Date(2018, 5, 30, 19, 53, 17, 134*1e6, time.UTC) - timestampEpoch := json.Number(fmt.Sprintf("%d", spanTime.UnixNano()/1000)) - id, parentID := "0000000000000000", "FFFFFFFFFFFFFFFF" - transactionID, traceID := "ABCDEF0123456789", "01234567890123456789abcdefABCDEF" - name, spType := "foo", "db" - start, duration := 1.2, 3.4 - method, statusCode, url := "get", 200, "http://localhost" - badRequestStatusCode := 400 - instance, statement, dbType, user, link, rowsAffected := "db01", "select *", "sql", "joe", "other.db.com", 34 - address, port := "localhost", 8080 - destServiceType, destServiceName, destServiceResource := "db", "elasticsearch", "elasticsearch" - outcome := "success" - httpCtx := map[string]interface{}{"method": "GET", "status_code": json.Number("200"), "url": url} - context := map[string]interface{}{ - "a": "b", - "tags": map[string]interface{}{"a": "tag", "tag_key": 17}, - "http": httpCtx, - "db": map[string]interface{}{ - "instance": instance, "statement": statement, "type": dbType, - "user": user, "link": link, "rows_affected": json.Number("34")}, - "destination": map[string]interface{}{ - "address": address, - "port": float64(port), - "service": map[string]interface{}{ - "type": destServiceType, - "name": destServiceName, - "resource": destServiceResource, - }, - }, - "message": map[string]interface{}{ - "queue": map[string]interface{}{"name": "foo"}, - "age": map[string]interface{}{"ms": json.Number("1577958057123")}}, - } - subtype := "postgresql" - action, action2 := "query", "query.custom" - stacktrace := []interface{}{map[string]interface{}{ - "filename": "file", - }} - - metadata := model.Metadata{ - Service: model.Service{Name: "foo"}, - } - - // baseInput holds the minimal valid input. Test-specific input is added to/removed from this. - baseInput := common.MapStr{ - "id": id, "type": spType, "name": name, "duration": duration, "trace_id": traceID, - } - - for name, test := range map[string]struct { - input map[string]interface{} - cfg Config - e *model.Span - }{ - "minimal payload": { - input: map[string]interface{}{ - "name": name, "type": "db.postgresql.query.custom", "duration": duration, "parent_id": parentID, - "timestamp": timestampEpoch, "id": id, "trace_id": traceID, - }, - e: &model.Span{ - Metadata: metadata, - Name: name, - Type: "db", - Subtype: &subtype, - Action: &action2, - Duration: duration, - Timestamp: spanTime, - ParentID: parentID, - ID: id, - TraceID: traceID, - Outcome: "unknown", - }, - }, - "no timestamp specified, request time + start used": { - input: map[string]interface{}{ - "name": name, "type": "db", "duration": duration, "parent_id": parentID, "trace_id": traceID, "id": id, - "start": start, - }, - e: &model.Span{ - Metadata: metadata, - Name: name, - Type: "db", - Duration: duration, - ParentID: parentID, - ID: id, - TraceID: traceID, - Start: &start, - Timestamp: requestTime.Add(time.Duration(start * float64(time.Millisecond))), - Outcome: "unknown", - }, - }, - "event experimental=false": { - input: map[string]interface{}{ - "name": name, "type": "db.postgresql.query.custom", "start": start, "duration": duration, "parent_id": parentID, - "timestamp": timestampEpoch, "id": id, "trace_id": traceID, "transaction_id": transactionID, - "context": map[string]interface{}{"experimental": 123}, - }, - e: &model.Span{ - Metadata: metadata, - Name: name, - Type: "db", - Subtype: &subtype, - Action: &action2, - Start: &start, - Duration: duration, - Timestamp: spanTime, - ParentID: parentID, - ID: id, - TraceID: traceID, - TransactionID: transactionID, - Outcome: "unknown", - }, - }, - "event experimental=true, no experimental payload": { - input: map[string]interface{}{ - "name": name, "type": "db.postgresql.query.custom", "start": start, "duration": duration, "parent_id": parentID, - "timestamp": timestampEpoch, "id": id, "trace_id": traceID, "transaction_id": transactionID, - "context": map[string]interface{}{"foo": 123}, - }, - e: &model.Span{ - Metadata: metadata, - Name: name, - Type: "db", - Subtype: &subtype, - Action: &action2, - Start: &start, - Duration: duration, - Timestamp: spanTime, - ParentID: parentID, - ID: id, - TraceID: traceID, - TransactionID: transactionID, - Outcome: "unknown", - }, - cfg: Config{Experimental: true}, - }, - "event experimental=true": { - input: map[string]interface{}{ - "name": name, "type": "db.postgresql.query.custom", "start": start, "duration": duration, "parent_id": parentID, - "timestamp": timestampEpoch, "id": id, "trace_id": traceID, "transaction_id": transactionID, - "context": map[string]interface{}{"experimental": 123}, - }, - e: &model.Span{ - Metadata: metadata, - Name: name, - Type: "db", - Subtype: &subtype, - Action: &action2, - Start: &start, - Duration: duration, - Timestamp: spanTime, - ParentID: parentID, - ID: id, - TraceID: traceID, - TransactionID: transactionID, - Experimental: 123, - Outcome: "unknown", - }, - cfg: Config{Experimental: true}, - }, - "with derived success outcome": { - input: map[string]interface{}{ - "name": name, "type": "db.postgresql.query.custom", "duration": duration, "parent_id": parentID, - "timestamp": timestampEpoch, "id": id, "trace_id": traceID, - "context": map[string]interface{}{"http": httpCtx}, - }, - e: &model.Span{ - Metadata: metadata, - Name: name, - Type: "db", - Subtype: &subtype, - Action: &action2, - Duration: duration, - HTTP: &model.HTTP{Method: &method, StatusCode: &statusCode, URL: &url}, - Timestamp: spanTime, - ParentID: parentID, - ID: id, - TraceID: traceID, - Outcome: "success", - }, - }, - "with derived failure outcome": { - input: map[string]interface{}{ - "name": name, "type": "db.postgresql.query.custom", "duration": duration, "parent_id": parentID, - "timestamp": timestampEpoch, "id": id, "trace_id": traceID, - "context": map[string]interface{}{"http": map[string]interface{}{"status_code": json.Number("400")}}, - }, - e: &model.Span{ - Metadata: metadata, - Name: name, - Type: "db", - Subtype: &subtype, - Action: &action2, - Duration: duration, - HTTP: &model.HTTP{StatusCode: &badRequestStatusCode}, - Timestamp: spanTime, - ParentID: parentID, - ID: id, - TraceID: traceID, - Outcome: "failure", - }, - }, - "full valid payload": { - input: map[string]interface{}{ - "name": name, "type": "messaging", "subtype": subtype, "action": action, "start": start, - "duration": duration, "context": context, "timestamp": timestampEpoch, "stacktrace": stacktrace, - "id": id, "parent_id": parentID, "trace_id": traceID, "transaction_id": transactionID, - "outcome": outcome, - "sample_rate": 0.2, - }, - e: &model.Span{ - Metadata: metadata, - Name: name, - Type: "messaging", - Subtype: &subtype, - Action: &action, - Start: &start, - Duration: duration, - Timestamp: spanTime, - Outcome: outcome, - Stacktrace: model.Stacktrace{ - &model.StacktraceFrame{Filename: tests.StringPtr("file")}, - }, - Labels: common.MapStr{"a": "tag", "tag_key": 17}, - ID: id, - TraceID: traceID, - ParentID: parentID, - TransactionID: transactionID, - RepresentativeCount: 5, - HTTP: &model.HTTP{Method: &method, StatusCode: &statusCode, URL: &url}, - DB: &model.DB{ - Instance: &instance, - Statement: &statement, - Type: &dbType, - UserName: &user, - Link: &link, - RowsAffected: &rowsAffected, - }, - Destination: &model.Destination{Address: &address, Port: &port}, - DestinationService: &model.DestinationService{ - Type: &destServiceType, - Name: &destServiceName, - Resource: &destServiceResource, - }, - Message: &model.Message{ - QueueName: tests.StringPtr("foo"), - AgeMillis: tests.IntPtr(1577958057123)}, - }, - }, - } { - t.Run(name, func(t *testing.T) { - input := make(map[string]interface{}) - for k, v := range baseInput { - input[k] = v - } - for k, v := range test.input { - input[k] = v - } - batch := &model.Batch{} - err := DecodeSpan(Input{ - Raw: input, - RequestTime: requestTime, - Metadata: metadata, - Config: test.cfg, - }, batch) - require.NoError(t, err) - assert.Equal(t, test.e, batch.Spans[0]) - }) - } -} - -func TestDecodeSpanInvalid(t *testing.T) { - err := DecodeSpan(Input{Raw: nil}, &model.Batch{}) - require.EqualError(t, err, "failed to validate span: error validating JSON: input missing") - - err = DecodeSpan(Input{Raw: ""}, &model.Batch{}) - require.EqualError(t, err, "failed to validate span: error validating JSON: invalid input type") - - // baseInput holds the minimal valid input. Test-specific input is added to this. - baseInput := map[string]interface{}{ - "type": "type", - "name": "name", - "id": "id", "trace_id": "trace_id", "transaction_id": "transaction_id", "parent_id": "parent_id", - "start": 0.0, "duration": 123.0, - } - err = DecodeSpan(Input{Raw: baseInput}, &model.Batch{}) - require.NoError(t, err) - - for name, test := range map[string]struct { - input map[string]interface{} - err string - }{ - "transaction id wrong type": { - input: map[string]interface{}{"transaction_id": 123}, - err: `type.*expected string or null, but got number`, - }, - "no trace_id": { - input: map[string]interface{}{"trace_id": nil}, - err: `missing properties: "trace_id"`, - }, - "no id": { - input: map[string]interface{}{"id": nil}, - err: `missing properties: "id"`, - }, - "no parent_id": { - input: map[string]interface{}{"parent_id": nil}, - err: `missing properties: "parent_id"`, - }, - "invalid stacktrace": { - input: map[string]interface{}{"stacktrace": []interface{}{"foo"}}, - err: `stacktrace.*expected object, but got string`, - }, - "negative duration": { - input: map[string]interface{}{"duration": -1.0}, - err: "duration.*must be >= 0 but found -1", - }, - "invalid outcome": { - input: map[string]interface{}{"outcome": `¯\_(ツ)_/¯`}, - err: `outcome.*must be one of , "success", "failure", "unknown"`, - }, - } { - t.Run(name, func(t *testing.T) { - input := make(map[string]interface{}) - for k, v := range baseInput { - input[k] = v - } - for k, v := range test.input { - if v == nil { - delete(input, k) - } else { - input[k] = v - } - } - err := DecodeSpan(Input{Raw: input}, &model.Batch{}) - require.Error(t, err) - assert.Regexp(t, test.err, err.Error()) - }) - } -} diff --git a/model/modeldecoder/stacktrace.go b/model/modeldecoder/stacktrace.go deleted file mode 100644 index 5c54f476baa..00000000000 --- a/model/modeldecoder/stacktrace.go +++ /dev/null @@ -1,41 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "errors" - - "github.com/elastic/apm-server/model" -) - -var errInvalidStacktraceType = errors.New("invalid type for stacktrace") - -func decodeStacktrace(input interface{}, hasShortFieldNames bool, err error) (*model.Stacktrace, error) { - if input == nil || err != nil { - return nil, err - } - raw, ok := input.([]interface{}) - if !ok { - return nil, errInvalidStacktraceType - } - st := make(model.Stacktrace, len(raw)) - for idx, fr := range raw { - st[idx], err = decodeStacktraceFrame(fr, hasShortFieldNames, err) - } - return &st, err -} diff --git a/model/modeldecoder/stacktrace_frame.go b/model/modeldecoder/stacktrace_frame.go deleted file mode 100644 index 77d1fa1a740..00000000000 --- a/model/modeldecoder/stacktrace_frame.go +++ /dev/null @@ -1,58 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/model/modeldecoder/field" - - "github.com/pkg/errors" - - "github.com/elastic/apm-server/utility" -) - -var ( - errInvalidStacktraceFrameType = errors.New("invalid type for stacktrace frame") -) - -func decodeStacktraceFrame(input interface{}, hasShortFieldNames bool, err error) (*model.StacktraceFrame, error) { - if input == nil || err != nil { - return nil, err - } - raw, ok := input.(map[string]interface{}) - if !ok { - return nil, errInvalidStacktraceFrameType - } - decoder := utility.ManualDecoder{} - fieldName := field.Mapper(hasShortFieldNames) - frame := model.StacktraceFrame{ - AbsPath: decoder.StringPtr(raw, fieldName("abs_path")), - Filename: decoder.StringPtr(raw, fieldName("filename")), - Classname: decoder.StringPtr(raw, fieldName("classname")), - Lineno: decoder.IntPtr(raw, fieldName("lineno")), - Colno: decoder.IntPtr(raw, fieldName("colno")), - ContextLine: decoder.StringPtr(raw, fieldName("context_line")), - Module: decoder.StringPtr(raw, fieldName("module")), - Function: decoder.StringPtr(raw, fieldName("function")), - LibraryFrame: decoder.BoolPtr(raw, "library_frame"), - Vars: decoder.MapStr(raw, "vars"), - PreContext: decoder.StringArr(raw, fieldName("pre_context")), - PostContext: decoder.StringArr(raw, fieldName("post_context")), - } - return &frame, decoder.Err -} diff --git a/model/modeldecoder/stacktrace_frame_test.go b/model/modeldecoder/stacktrace_frame_test.go deleted file mode 100644 index b4b7eecae5a..00000000000 --- a/model/modeldecoder/stacktrace_frame_test.go +++ /dev/null @@ -1,86 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/apm-server/model" -) - -func TestStacktraceFrameDecode(t *testing.T) { - filename, classname, path, context, fct, module := "some file", "foo", "path", "contet", "fct", "module" - lineno, colno := 1, 55 - libraryFrame := true - vars := map[string]interface{}{"a": 1} - preContext, postContext := []string{"a"}, []string{"b", "c"} - for _, test := range []struct { - input interface{} - err, inpErr error - s *model.StacktraceFrame - }{ - {input: nil, err: nil, s: nil}, - {input: nil, inpErr: errors.New("a"), err: errors.New("a"), s: nil}, - {input: "", err: errInvalidStacktraceFrameType, s: nil}, - { - input: map[string]interface{}{}, - s: &model.StacktraceFrame{ - AbsPath: nil, Lineno: nil, Colno: nil, - ContextLine: nil, Module: nil, Function: nil, LibraryFrame: nil, - Vars: nil, PreContext: nil, PostContext: nil}, - }, - { - input: map[string]interface{}{ - "abs_path": path, - "filename": filename, - "classname": classname, - "lineno": 1.0, - "colno": 55.0, - "context_line": context, - "function": fct, - "module": module, - "library_frame": libraryFrame, - "vars": vars, - "pre_context": []interface{}{"a"}, - "post_context": []interface{}{"b", "c"}, - }, - err: nil, - s: &model.StacktraceFrame{ - AbsPath: &path, - Filename: &filename, - Classname: &classname, - Lineno: &lineno, - Colno: &colno, - ContextLine: &context, - Module: &module, - Function: &fct, - LibraryFrame: &libraryFrame, - Vars: vars, - PreContext: preContext, - PostContext: postContext, - }, - }, - } { - frame, err := decodeStacktraceFrame(test.input, false, test.inpErr) - assert.Equal(t, test.s, frame) - assert.Equal(t, test.err, err) - } -} diff --git a/model/modeldecoder/stacktrace_test.go b/model/modeldecoder/stacktrace_test.go deleted file mode 100644 index adecd79d135..00000000000 --- a/model/modeldecoder/stacktrace_test.go +++ /dev/null @@ -1,56 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/apm-server/model" -) - -func TestStacktraceDecode(t *testing.T) { - l1 := 1 - for _, test := range []struct { - input interface{} - err, inpErr error - s *model.Stacktrace - }{ - {input: nil, err: nil, s: nil}, - {input: nil, inpErr: errors.New("msg"), err: errors.New("msg"), s: nil}, - {input: "", err: errors.New("invalid type for stacktrace"), s: nil}, - { - input: []interface{}{"foo"}, - err: errInvalidStacktraceFrameType, - s: &model.Stacktrace{nil}, - }, - { - input: []interface{}{map[string]interface{}{"lineno": 1.0}}, - err: nil, - s: &model.Stacktrace{ - &model.StacktraceFrame{Lineno: &l1}, - }, - }, - } { - s, err := decodeStacktrace(test.input, false, test.inpErr) - assert.Equal(t, test.s, s) - assert.Equal(t, test.err, err) - } -} diff --git a/model/modeldecoder/system.go b/model/modeldecoder/system.go deleted file mode 100644 index 86ce28a36a1..00000000000 --- a/model/modeldecoder/system.go +++ /dev/null @@ -1,46 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "net" - - "github.com/elastic/apm-server/model" -) - -func decodeSystem(input map[string]interface{}, out *model.System) { - if input == nil { - return - } - decodeString(input, "platform", &out.Platform) - decodeString(input, "architecture", &out.Architecture) - - var ipString string - if decodeString(input, "ip", &ipString) { - out.IP = net.ParseIP(ipString) - } - - decodeContainer(getObject(input, "container"), &out.Container) - decodeKubernetes(getObject(input, "kubernetes"), &out.Kubernetes) - - decodeString(input, "detected_hostname", &out.DetectedHostname) - decodeString(input, "configured_hostname", &out.ConfiguredHostname) - if out.DetectedHostname == "" && out.ConfiguredHostname == "" { - decodeString(input, "hostname", &out.DetectedHostname) - } -} diff --git a/model/modeldecoder/system_test.go b/model/modeldecoder/system_test.go deleted file mode 100644 index 3cc42a8d75b..00000000000 --- a/model/modeldecoder/system_test.go +++ /dev/null @@ -1,197 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "encoding/json" - "fmt" - "net" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/beats/v7/libbeat/common" - - "github.com/elastic/apm-server/approvaltest" - "github.com/elastic/apm-server/model" -) - -func TestSystem(t *testing.T) { - host, configured, detected := "host", "custom hostname", "detected hostname" - arch, platform, ip, containerID, namespace := "amd", "osx", "127.0.0.1", "1234", "staging" - nodename, podname, podUID := "a.node", "a.pod", "b.podID" - - for name, test := range map[string]struct { - input map[string]interface{} - s model.System - }{ - "nil": {input: nil}, - "empty": {input: map[string]interface{}{}}, - "empty ip": { - input: map[string]interface{}{"ip": ""}, - s: model.System{IP: net.ParseIP("")}, - }, - "hostname": { - input: map[string]interface{}{"hostname": host}, - s: model.System{DetectedHostname: host}, - }, - "detected hostname": { - // in practice either hostname or detected_hostname should be sent, but in theory both can be sent, so - // testing that the server does process the proper one in such a case. - input: map[string]interface{}{ - "hostname": host, "detected_hostname": detected, - }, - s: model.System{DetectedHostname: detected}, - }, - "ignored hostname": { - // in practice either hostname or configured_hostname should be sent, but in theory both can be sent, so - // testing that the server does process the proper one in such a case. - input: map[string]interface{}{ - "hostname": host, "configured_hostname": configured, - }, - s: model.System{ConfiguredHostname: configured}, - }, - "k8s nodename with hostname": { - input: map[string]interface{}{ - "kubernetes": map[string]interface{}{"node": map[string]interface{}{"name": nodename}}, - "hostname": host, - }, - s: model.System{Kubernetes: model.Kubernetes{NodeName: nodename}, DetectedHostname: host}, - }, - "k8s nodename with configured hostname": { - input: map[string]interface{}{ - "kubernetes": map[string]interface{}{"node": map[string]interface{}{"name": nodename}}, - "hostname": host, "configured_hostname": configured, - }, - s: model.System{Kubernetes: model.Kubernetes{NodeName: nodename}, ConfiguredHostname: configured}, - }, - "k8s nodename with detected hostname": { - input: map[string]interface{}{ - "kubernetes": map[string]interface{}{"node": map[string]interface{}{"name": nodename}}, - "hostname": host, "detected_hostname": detected, - }, - s: model.System{Kubernetes: model.Kubernetes{NodeName: nodename}, DetectedHostname: detected}, - }, - "k8s podname": { - input: map[string]interface{}{ - "kubernetes": map[string]interface{}{"pod": map[string]interface{}{"name": podname}}, - "detected_hostname": detected, - }, - s: model.System{Kubernetes: model.Kubernetes{PodName: podname}, DetectedHostname: detected}, - }, - "k8s podUID": { - input: map[string]interface{}{ - "kubernetes": map[string]interface{}{"pod": map[string]interface{}{"uid": podUID}}, - "detected_hostname": detected, - }, - s: model.System{Kubernetes: model.Kubernetes{PodUID: podUID}, DetectedHostname: detected}, - }, - "k8s_namespace": { - input: map[string]interface{}{ - "kubernetes": map[string]interface{}{"namespace": namespace}, - "detected_hostname": detected, - }, - s: model.System{Kubernetes: model.Kubernetes{Namespace: namespace}, DetectedHostname: detected}, - }, - "k8s podname with configured hostname": { - input: map[string]interface{}{ - "kubernetes": map[string]interface{}{"pod": map[string]interface{}{"name": podname}}, - "detected_hostname": detected, - "configured_hostname": configured, - }, - s: model.System{Kubernetes: model.Kubernetes{PodName: podname}, DetectedHostname: detected, ConfiguredHostname: configured}, - }, - "k8s podUID with configured hostname": { - input: map[string]interface{}{ - "kubernetes": map[string]interface{}{"pod": map[string]interface{}{"uid": podUID}}, - "detected_hostname": detected, - "configured_hostname": configured, - }, - s: model.System{Kubernetes: model.Kubernetes{PodUID: podUID}, DetectedHostname: detected, ConfiguredHostname: configured}, - }, - "k8s namespace with configured hostname": { - input: map[string]interface{}{ - "kubernetes": map[string]interface{}{"namespace": namespace}, - "detected_hostname": detected, - "configured_hostname": configured, - }, - s: model.System{Kubernetes: model.Kubernetes{Namespace: namespace}, DetectedHostname: detected, ConfiguredHostname: configured}, - }, - "k8s empty": { - input: map[string]interface{}{ - "kubernetes": map[string]interface{}{}, - "detected_hostname": detected, - "configured_hostname": configured, - }, - s: model.System{Kubernetes: model.Kubernetes{}, DetectedHostname: detected, ConfiguredHostname: configured}, - }, - "full hostname info": { - input: map[string]interface{}{ - "detected_hostname": detected, - "configured_hostname": configured, - }, - s: model.System{DetectedHostname: detected, ConfiguredHostname: configured}, - }, - "full": { - input: map[string]interface{}{ - "platform": platform, - "architecture": arch, - "ip": ip, - "container": map[string]interface{}{"id": containerID}, - "kubernetes": map[string]interface{}{ - "namespace": namespace, - "node": map[string]interface{}{"name": nodename}, - "pod": map[string]interface{}{ - "uid": podUID, - "name": podname, - }, - }, - "configured_hostname": configured, - "detected_hostname": detected, - }, - s: model.System{ - DetectedHostname: detected, - ConfiguredHostname: configured, - Architecture: arch, - Platform: platform, - IP: net.ParseIP(ip), - Container: model.Container{ID: containerID}, - Kubernetes: model.Kubernetes{Namespace: namespace, NodeName: nodename, PodName: podname, PodUID: podUID}, - }, - }, - } { - - t.Run(name, func(t *testing.T) { - var system model.System - decodeSystem(test.input, &system) - assert.Equal(t, test.s, system) - - resultName := fmt.Sprintf("test_approved_system/transform_%s", strings.ReplaceAll(name, " ", "_")) - - fields := make(common.MapStr) - metadata := model.Metadata{System: system} - metadata.Set(fields) - - resultJSON, err := json.Marshal(fields["host"]) - require.NoError(t, err) - approvaltest.ApproveJSON(t, resultName, resultJSON) - }) - } -} diff --git a/model/modeldecoder/test_approved_system/transform_detected_hostname.approved.json b/model/modeldecoder/test_approved_system/transform_detected_hostname.approved.json deleted file mode 100644 index a52e2108c94..00000000000 --- a/model/modeldecoder/test_approved_system/transform_detected_hostname.approved.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "hostname": "detected hostname", - "name": "detected hostname" -} diff --git a/model/modeldecoder/test_approved_system/transform_empty.approved.json b/model/modeldecoder/test_approved_system/transform_empty.approved.json deleted file mode 100644 index 19765bd501b..00000000000 --- a/model/modeldecoder/test_approved_system/transform_empty.approved.json +++ /dev/null @@ -1 +0,0 @@ -null diff --git a/model/modeldecoder/test_approved_system/transform_empty_ip.approved.json b/model/modeldecoder/test_approved_system/transform_empty_ip.approved.json deleted file mode 100644 index 19765bd501b..00000000000 --- a/model/modeldecoder/test_approved_system/transform_empty_ip.approved.json +++ /dev/null @@ -1 +0,0 @@ -null diff --git a/model/modeldecoder/test_approved_system/transform_full.approved.json b/model/modeldecoder/test_approved_system/transform_full.approved.json deleted file mode 100644 index 827e3bda11e..00000000000 --- a/model/modeldecoder/test_approved_system/transform_full.approved.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "architecture": "amd", - "hostname": "a.node", - "ip": "127.0.0.1", - "name": "custom hostname", - "os": { - "platform": "osx" - } -} diff --git a/model/modeldecoder/test_approved_system/transform_full_hostname_info.approved.json b/model/modeldecoder/test_approved_system/transform_full_hostname_info.approved.json deleted file mode 100644 index 7f48f083378..00000000000 --- a/model/modeldecoder/test_approved_system/transform_full_hostname_info.approved.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "hostname": "detected hostname", - "name": "custom hostname" -} diff --git a/model/modeldecoder/test_approved_system/transform_hostname.approved.json b/model/modeldecoder/test_approved_system/transform_hostname.approved.json deleted file mode 100644 index a6adfea2466..00000000000 --- a/model/modeldecoder/test_approved_system/transform_hostname.approved.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "hostname": "host", - "name": "host" -} diff --git a/model/modeldecoder/test_approved_system/transform_k8s_empty.approved.json b/model/modeldecoder/test_approved_system/transform_k8s_empty.approved.json deleted file mode 100644 index 7f48f083378..00000000000 --- a/model/modeldecoder/test_approved_system/transform_k8s_empty.approved.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "hostname": "detected hostname", - "name": "custom hostname" -} diff --git a/model/modeldecoder/test_approved_system/transform_k8s_namespace.approved.json b/model/modeldecoder/test_approved_system/transform_k8s_namespace.approved.json deleted file mode 100644 index 19765bd501b..00000000000 --- a/model/modeldecoder/test_approved_system/transform_k8s_namespace.approved.json +++ /dev/null @@ -1 +0,0 @@ -null diff --git a/model/modeldecoder/test_approved_system/transform_k8s_namespace_with_configured_hostname.approved.json b/model/modeldecoder/test_approved_system/transform_k8s_namespace_with_configured_hostname.approved.json deleted file mode 100644 index e9b4133ce07..00000000000 --- a/model/modeldecoder/test_approved_system/transform_k8s_namespace_with_configured_hostname.approved.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "name": "custom hostname" -} diff --git a/model/modeldecoder/test_approved_system/transform_k8s_nodename_with_configured_hostname.approved.json b/model/modeldecoder/test_approved_system/transform_k8s_nodename_with_configured_hostname.approved.json deleted file mode 100644 index 5c86f3e2521..00000000000 --- a/model/modeldecoder/test_approved_system/transform_k8s_nodename_with_configured_hostname.approved.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "hostname": "a.node", - "name": "custom hostname" -} diff --git a/model/modeldecoder/test_approved_system/transform_k8s_nodename_with_detected_hostname.approved.json b/model/modeldecoder/test_approved_system/transform_k8s_nodename_with_detected_hostname.approved.json deleted file mode 100644 index f9ae86749c4..00000000000 --- a/model/modeldecoder/test_approved_system/transform_k8s_nodename_with_detected_hostname.approved.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "hostname": "a.node", - "name": "a.node" -} diff --git a/model/modeldecoder/test_approved_system/transform_k8s_nodename_with_hostname.approved.json b/model/modeldecoder/test_approved_system/transform_k8s_nodename_with_hostname.approved.json deleted file mode 100644 index f9ae86749c4..00000000000 --- a/model/modeldecoder/test_approved_system/transform_k8s_nodename_with_hostname.approved.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "hostname": "a.node", - "name": "a.node" -} diff --git a/model/modeldecoder/test_approved_system/transform_k8s_podUID.approved.json b/model/modeldecoder/test_approved_system/transform_k8s_podUID.approved.json deleted file mode 100644 index 19765bd501b..00000000000 --- a/model/modeldecoder/test_approved_system/transform_k8s_podUID.approved.json +++ /dev/null @@ -1 +0,0 @@ -null diff --git a/model/modeldecoder/test_approved_system/transform_k8s_podUID_with_configured_hostname.approved.json b/model/modeldecoder/test_approved_system/transform_k8s_podUID_with_configured_hostname.approved.json deleted file mode 100644 index e9b4133ce07..00000000000 --- a/model/modeldecoder/test_approved_system/transform_k8s_podUID_with_configured_hostname.approved.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "name": "custom hostname" -} diff --git a/model/modeldecoder/test_approved_system/transform_k8s_podname.approved.json b/model/modeldecoder/test_approved_system/transform_k8s_podname.approved.json deleted file mode 100644 index 19765bd501b..00000000000 --- a/model/modeldecoder/test_approved_system/transform_k8s_podname.approved.json +++ /dev/null @@ -1 +0,0 @@ -null diff --git a/model/modeldecoder/test_approved_system/transform_k8s_podname_with_configured_hostname.approved.json b/model/modeldecoder/test_approved_system/transform_k8s_podname_with_configured_hostname.approved.json deleted file mode 100644 index e9b4133ce07..00000000000 --- a/model/modeldecoder/test_approved_system/transform_k8s_podname_with_configured_hostname.approved.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "name": "custom hostname" -} diff --git a/model/modeldecoder/transaction.go b/model/modeldecoder/transaction.go deleted file mode 100644 index 63b32742b2f..00000000000 --- a/model/modeldecoder/transaction.go +++ /dev/null @@ -1,267 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "encoding/json" - "net/http" - - "github.com/pkg/errors" - "github.com/santhosh-tekuri/jsonschema" - - "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/model/modeldecoder/field" - "github.com/elastic/apm-server/model/transaction/generated/schema" - "github.com/elastic/apm-server/utility" - "github.com/elastic/apm-server/validation" -) - -var ( - transactionSchema = validation.CreateSchema(schema.ModelSchema, "transaction") - rumV3TransactionSchema = validation.CreateSchema(schema.RUMV3Schema, "transaction") -) - -// DecodeRUMV3Transaction decodes a v3 RUM transaction. -func DecodeRUMV3Transaction(input Input, batch *model.Batch) error { - fieldName := field.Mapper(input.Config.HasShortFieldNames) - transaction, err := decodeTransaction(input, rumV3TransactionSchema) - if err != nil { - return err - } - raw := input.Raw.(map[string]interface{}) - spans, err := decodeRUMV3Spans(raw, input, transaction) - if err != nil { - return err - } - transaction.Marks = decodeRUMV3Marks(getObject(raw, fieldName("marks")), input.Config) - metricsets, err := decodeRUMV3Metricsets(raw, input, transaction) - if err != nil { - return nil - } - batch.Transactions = append(batch.Transactions, transaction) - batch.Spans = append(batch.Spans, spans...) - batch.Metricsets = append(batch.Metricsets, metricsets...) - return nil -} - -func decodeRUMV3Metricsets(raw map[string]interface{}, input Input, tr *model.Transaction) ([]*model.Metricset, error) { - decoder := &utility.ManualDecoder{} - fieldName := field.Mapper(input.Config.HasShortFieldNames) - rawMetricsets := decoder.InterfaceArr(raw, fieldName("metricset")) - var metricsets = make([]*model.Metricset, len(rawMetricsets)) - for idx, rawMetricset := range rawMetricsets { - metricset, err := decodeMetricset(Input{ - Raw: rawMetricset, - RequestTime: input.RequestTime, - Metadata: input.Metadata, - Config: input.Config, - }, rumV3Schema) - if err != nil { - return metricsets, err - } - metricset.Transaction = model.MetricsetTransaction{ - Type: tr.Type, - Name: tr.Name, - Result: tr.Result, - } - metricsets[idx] = metricset - } - return metricsets, nil -} - -func decodeRUMV3Spans(raw map[string]interface{}, input Input, tr *model.Transaction) ([]*model.Span, error) { - decoder := &utility.ManualDecoder{} - fieldName := field.Mapper(input.Config.HasShortFieldNames) - rawSpans := decoder.InterfaceArr(raw, fieldName("span")) - var spans = make([]*model.Span, len(rawSpans)) - for idx, rawSpan := range rawSpans { - span, parentIndex, err := decodeRUMV3Span(Input{ - Raw: rawSpan, - RequestTime: input.RequestTime, - Metadata: input.Metadata, - Config: input.Config, - }) - if err != nil { - return spans, err - } - span.TransactionID = tr.ID - span.TraceID = tr.TraceID - if parentIndex >= 0 && parentIndex < idx { - span.ParentID = spans[parentIndex].ID - } else { - span.ParentID = tr.ID - } - spans[idx] = span - } - return spans, nil -} - -// DecodeRUMV2Transaction decodes a v2 RUM transaction. -func DecodeRUMV2Transaction(input Input, batch *model.Batch) error { - // Identical to backend agent transactions. - return DecodeTransaction(input, batch) -} - -// DecodeTransaction decodes a v2 transaction. -func DecodeTransaction(input Input, batch *model.Batch) error { - transaction, err := decodeTransaction(input, transactionSchema) - if err != nil { - return err - } - batch.Transactions = append(batch.Transactions, transaction) - return nil -} - -func decodeTransaction(input Input, schema *jsonschema.Schema) (*model.Transaction, error) { - raw, err := validation.ValidateObject(input.Raw, schema) - if err != nil { - return nil, errors.Wrap(err, "failed to validate transaction") - } - - fieldName := field.Mapper(input.Config.HasShortFieldNames) - ctx, err := decodeContext(getObject(raw, fieldName("context")), input.Config, &input.Metadata) - if err != nil { - return nil, err - } - decoder := utility.ManualDecoder{} - e := model.Transaction{ - Metadata: input.Metadata, - Labels: ctx.Labels, - Page: ctx.Page, - HTTP: ctx.Http, - URL: ctx.URL, - Custom: ctx.Custom, - Experimental: ctx.Experimental, - Message: ctx.Message, - Sampled: decoder.BoolPtr(raw, fieldName("sampled")), - Marks: decodeV2Marks(getObject(raw, fieldName("marks"))), - Timestamp: decoder.TimeEpochMicro(raw, fieldName("timestamp")), - SpanCount: model.SpanCount{ - Dropped: decoder.IntPtr(raw, fieldName("dropped"), fieldName("span_count")), - Started: decoder.IntPtr(raw, fieldName("started"), fieldName("span_count"))}, - } - if decoder.Err != nil { - return nil, decoder.Err - } - decodeString(raw, "id", &e.ID) - decodeString(raw, fieldName("trace_id"), &e.TraceID) - decodeString(raw, fieldName("parent_id"), &e.ParentID) - decodeString(raw, fieldName("type"), &e.Type) - decodeString(raw, fieldName("name"), &e.Name) - decodeString(raw, fieldName("result"), &e.Result) - decodeString(raw, fieldName("outcome"), &e.Outcome) - decodeFloat64(raw, fieldName("duration"), &e.Duration) - if e.Outcome == "" { - if ctx.Http != nil && ctx.Http.Response != nil && ctx.Http.Response.StatusCode != nil { - statusCode := *ctx.Http.Response.StatusCode - if statusCode >= http.StatusInternalServerError { - e.Outcome = "failure" - } else { - e.Outcome = "success" - } - } else { - e.Outcome = "unknown" - } - } - - if obj := getObject(raw, fieldName("experience")); obj != nil { - var experience model.UserExperience - decodeUserExperience(obj, &experience) - e.UserExperience = &experience - } - - if e.Timestamp.IsZero() { - e.Timestamp = input.RequestTime - } - return &e, nil -} - -func decodeV2Marks(raw map[string]interface{}) model.TransactionMarks { - if len(raw) == 0 { - return nil - } - marks := make(model.TransactionMarks, len(raw)) - for group, v := range raw { - groupObj, ok := v.(map[string]interface{}) - if !ok { - continue - } - groupMarks := make(model.TransactionMark, len(groupObj)) - for k, v := range groupObj { - switch v := v.(type) { - case json.Number: - if f, err := v.Float64(); err == nil { - groupMarks[k] = f - } - case float64: - groupMarks[k] = v - } - } - marks[group] = groupMarks - } - return marks -} - -func decodeRUMV3Marks(raw map[string]interface{}, cfg Config) model.TransactionMarks { - fieldName := field.Mapper(cfg.HasShortFieldNames) - marks := make(model.TransactionMarks) - decodeMarks := func(group string, names ...string) { - groupObj := getObject(raw, fieldName(group)) - if groupObj == nil { - return - } - groupMarks := make(model.TransactionMark, len(groupObj)) - for _, name := range names { - var v float64 - if decodeFloat64(groupObj, fieldName(name), &v) { - groupMarks[name] = v - } - } - if len(groupMarks) != 0 { - marks[group] = groupMarks - } - } - - decodeMarks("agent", - "domComplete", - "domInteractive", - "domContentLoadedEventStart", - "domContentLoadedEventEnd", - "timeToFirstByte", - "firstContentfulPaint", - "largestContentfulPaint", - ) - decodeMarks("navigationTiming", - "fetchStart", - "domainLookupStart", - "domainLookupEnd", - "connectStart", - "connectEnd", - "requestStart", - "responseStart", - "responseEnd", - "domComplete", - "domInteractive", - "domLoading", - "domContentLoadedEventStart", - "domContentLoadedEventEnd", - "loadEventStart", - "loadEventEnd", - ) - return marks -} diff --git a/model/modeldecoder/transaction_test.go b/model/modeldecoder/transaction_test.go deleted file mode 100644 index 947d783c4a2..00000000000 --- a/model/modeldecoder/transaction_test.go +++ /dev/null @@ -1,461 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "encoding/json" - "fmt" - "net" - "net/http" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/tests" -) - -var ( - trID = "123" - trType = "type" - trName = "foo()" - trResult = "555" - trOutcome = "success" - - trDuration = 6.0 - - traceID = "0147258369012345abcdef0123456789" - parentID = "abcdef0123456789" - - timestampParsed = time.Date(2017, 5, 30, 18, 53, 27, 154*1e6, time.UTC) - timestampEpoch = json.Number(fmt.Sprintf("%d", timestampParsed.UnixNano()/1000)) - - marks = map[string]interface{}{"navigationTiming": map[string]interface{}{ - "appBeforeBootstrap": 608.9300000000001, - "navigationStart": -21.0, - }} - - trUserName = "jane" - trUserID = "abc123" - trUserEmail = "j@d.com" - trUserIP = "127.0.0.1" - - trPageURL = "https://mypage.com" - trPageReferer = "http:mypage.com" - trRequestHeaderUserAgent = "go-1.1" -) - -var fullTransactionInput = map[string]interface{}{ - "id": trID, - "type": trType, - "name": trName, - "duration": trDuration, - "timestamp": timestampEpoch, - "result": trResult, - "outcome": trOutcome, - "sampled": true, - "trace_id": traceID, - "parent_id": parentID, - "span_count": map[string]interface{}{"dropped": 12.0, "started": 6.0}, - "marks": marks, - "context": map[string]interface{}{ - "a": "b", - "custom": map[string]interface{}{"abc": 1}, - "user": map[string]interface{}{"username": trUserName, "email": trUserEmail, "ip": trUserIP, "id": trUserID}, - "tags": map[string]interface{}{"foo": "bar"}, - "page": map[string]interface{}{"url": trPageURL, "referer": trPageReferer}, - "request": map[string]interface{}{ - "method": "POST", - "url": map[string]interface{}{"raw": "127.0.0.1"}, - "headers": map[string]interface{}{"user-agent": trRequestHeaderUserAgent}, - }, - "response": map[string]interface{}{ - "finished": false, - "headers": map[string]interface{}{"Content-Type": "text/html"}, - }, - }, - "experience": map[string]interface{}{ - "cls": 1, - "fid": 2, - "tbt": 3, - "ignored": 4, - }, -} - -func TestDecodeTransactionInvalid(t *testing.T) { - err := DecodeTransaction(Input{Raw: nil}, &model.Batch{}) - require.EqualError(t, err, "failed to validate transaction: error validating JSON: input missing") - - err = DecodeTransaction(Input{Raw: ""}, &model.Batch{}) - require.EqualError(t, err, "failed to validate transaction: error validating JSON: invalid input type") - - baseInput := map[string]interface{}{ - "type": "type", - "trace_id": "trace_id", - "id": "id", - "duration": 123, - "span_count": map[string]interface{}{"dropped": 1.0, "started": 2.0}, - } - - for name, test := range map[string]struct { - input map[string]interface{} - err string - }{ - "missing trace_id": { - input: map[string]interface{}{"trace_id": nil}, - err: "missing properties: \"trace_id\"", - }, - "negative duration": { - input: map[string]interface{}{"duration": -1.0}, - err: "duration.*must be >= 0 but found -1", - }, - "invalid outcome": { - input: map[string]interface{}{"outcome": `¯\_(ツ)_/¯`}, - err: `outcome.*must be one of , "success", "failure", "unknown"`, - }, - } { - t.Run(name, func(t *testing.T) { - input := make(map[string]interface{}) - for k, v := range baseInput { - input[k] = v - } - for k, v := range test.input { - if v == nil { - delete(input, k) - } else { - input[k] = v - } - } - err := DecodeTransaction(Input{Raw: input}, &model.Batch{}) - assert.Error(t, err) - assert.Regexp(t, test.err, err.Error()) - }) - } -} - -func TestTransactionDecodeRUMV3Marks(t *testing.T) { - // TODO use DecodeRUMV3Transaction to ensure we test with completely valid input data. - - // unknown fields are ignored - input := map[string]interface{}{ - "foo": 0, - "a": map[string]interface{}{ - "foo": 0, - "dc": 1.2, - }, - "nt": map[string]interface{}{ - "foo": 0, - "dc": 1.2, - }, - } - marks := decodeRUMV3Marks(input, Config{HasShortFieldNames: true}) - - var f = 1.2 - assert.Equal(t, model.TransactionMarks{ - "agent": {"domComplete": f}, - "navigationTiming": {"domComplete": f}, - }, marks) -} - -func TestTransactionEventDecode(t *testing.T) { - id, trType, name, result := "123", "type", "foo()", "555" - outcome := "success" - requestTime := time.Now() - timestampParsed := time.Date(2017, 5, 30, 18, 53, 27, 154*1e6, time.UTC) - timestampEpoch := json.Number(fmt.Sprintf("%d", timestampParsed.UnixNano()/1000)) - - traceID, parentID := "0147258369012345abcdef0123456789", "abcdef0123456789" - dropped, started, duration := 12, 6, 1.67 - name, userID, email, userIP := "jane", "abc123", "j@d.com", "127.0.0.1" - url, referer, origURL := "https://mypage.com", "http:mypage.com", "127.0.0.1" - sampled := true - labels := model.Labels{"foo": "bar"} - ua := "go-1.1" - page := model.Page{URL: model.ParseURL(url, ""), Referer: &referer} - request := model.Req{Method: "post", Socket: &model.Socket{}, Headers: http.Header{"User-Agent": []string{ua}}} - response := model.Resp{Finished: new(bool), MinimalResp: model.MinimalResp{Headers: http.Header{"Content-Type": []string{"text/html"}}}} - badRequestResp, internalErrorResp := 400, 500 - h := model.Http{Request: &request, Response: &response} - ctxURL := model.URL{Original: &origURL} - custom := model.Custom{"abc": 1} - - inputMetadata := model.Metadata{Service: model.Service{Name: "foo"}} - - mergedMetadata := inputMetadata - mergedMetadata.User = model.User{Name: name, Email: email, ID: userID} - mergedMetadata.UserAgent.Original = ua - mergedMetadata.Client.IP = net.ParseIP(userIP) - - // baseInput holds the minimal valid input. Test-specific input is added to this. - baseInput := map[string]interface{}{ - "id": id, "type": trType, "name": name, "duration": duration, "trace_id": traceID, - "span_count": map[string]interface{}{"dropped": 12.0, "started": 6.0}, - } - - for name, test := range map[string]struct { - input map[string]interface{} - cfg Config - e *model.Transaction - }{ - "no timestamp specified, request time used": { - input: map[string]interface{}{}, - e: &model.Transaction{ - Metadata: inputMetadata, - ID: id, - Type: trType, - Name: name, - TraceID: traceID, - Duration: duration, - Timestamp: requestTime, - SpanCount: model.SpanCount{Dropped: &dropped, Started: &started}, - Outcome: "unknown", - }, - }, - "event experimental=true, no experimental payload": { - input: map[string]interface{}{ - "timestamp": timestampEpoch, - "context": map[string]interface{}{"foo": "bar"}, - }, - cfg: Config{Experimental: true}, - e: &model.Transaction{ - Metadata: inputMetadata, - ID: id, - Type: trType, - Name: name, - TraceID: traceID, - Duration: duration, - Timestamp: timestampParsed, - SpanCount: model.SpanCount{Dropped: &dropped, Started: &started}, - Outcome: "unknown", - }, - }, - "event experimental=false": { - input: map[string]interface{}{ - "timestamp": timestampEpoch, - "context": map[string]interface{}{"experimental": map[string]interface{}{"foo": "bar"}}, - }, - cfg: Config{Experimental: false}, - e: &model.Transaction{ - Metadata: inputMetadata, - ID: id, - Type: trType, - Name: name, - TraceID: traceID, - Duration: duration, - Timestamp: timestampParsed, - SpanCount: model.SpanCount{Dropped: &dropped, Started: &started}, - Outcome: "unknown", - }, - }, - "event experimental=true": { - input: map[string]interface{}{ - "timestamp": timestampEpoch, - "context": map[string]interface{}{"experimental": map[string]interface{}{"foo": "bar"}}, - }, - cfg: Config{Experimental: true}, - e: &model.Transaction{ - Metadata: inputMetadata, - ID: id, - Type: trType, - Name: name, - TraceID: traceID, - Duration: duration, - Timestamp: timestampParsed, - SpanCount: model.SpanCount{Dropped: &dropped, Started: &started}, - Experimental: map[string]interface{}{"foo": "bar"}, - Outcome: "unknown", - }, - }, - "messaging event": { - input: map[string]interface{}{ - "timestamp": timestampEpoch, - "type": "messaging", - "context": map[string]interface{}{ - "message": map[string]interface{}{ - "queue": map[string]interface{}{"name": "order"}, - "body": "confirmed", - "headers": map[string]interface{}{"internal": "false"}, - "age": map[string]interface{}{"ms": json.Number("1577958057123")}, - }, - }, - }, - e: &model.Transaction{ - Metadata: inputMetadata, - ID: id, - Name: name, - Type: "messaging", - TraceID: traceID, - Duration: duration, - Timestamp: timestampParsed, - SpanCount: model.SpanCount{Dropped: &dropped, Started: &started}, - Outcome: "unknown", - Message: &model.Message{ - QueueName: tests.StringPtr("order"), - Body: tests.StringPtr("confirmed"), - Headers: http.Header{"Internal": []string{"false"}}, - AgeMillis: tests.IntPtr(1577958057123), - }, - }, - }, - "valid event": { - input: map[string]interface{}{ - "timestamp": timestampEpoch, - "result": result, - "outcome": outcome, - "sampled": sampled, - "parent_id": parentID, - "marks": marks, - "context": map[string]interface{}{ - "a": "b", - "custom": map[string]interface{}{"abc": 1}, - "user": map[string]interface{}{"username": name, "email": email, "ip": userIP, "id": userID}, - "tags": map[string]interface{}{"foo": "bar"}, - "page": map[string]interface{}{"url": url, "referer": referer}, - "request": map[string]interface{}{ - "method": "POST", - "url": map[string]interface{}{"raw": "127.0.0.1"}, - "headers": map[string]interface{}{"user-agent": ua}, - }, - "response": map[string]interface{}{ - "finished": false, - "headers": map[string]interface{}{"Content-Type": "text/html"}, - }, - }, - "experience": map[string]interface{}{ - "cls": 1.0, - "fid": 2.3, - "ignored": 4, - }, - }, - e: &model.Transaction{ - Metadata: mergedMetadata, - ID: id, - Type: trType, - Name: name, - Result: result, - Outcome: outcome, - ParentID: parentID, - TraceID: traceID, - Duration: duration, - Timestamp: timestampParsed, - Marks: model.TransactionMarks{ - "navigationTiming": model.TransactionMark{ - "appBeforeBootstrap": 608.9300000000001, - "navigationStart": -21, - }, - }, - Sampled: &sampled, - SpanCount: model.SpanCount{Dropped: &dropped, Started: &started}, - Labels: &labels, - Page: &page, - Custom: &custom, - HTTP: &h, - URL: &ctxURL, - UserExperience: &model.UserExperience{ - CumulativeLayoutShift: 1, - FirstInputDelay: 2.3, - TotalBlockingTime: -1, // undefined - }, - }, - }, - "with derived success outcome": { - input: map[string]interface{}{ - "context": map[string]interface{}{ - "response": map[string]interface{}{ - "status_code": json.Number("400"), - }, - }, - }, - e: &model.Transaction{ - Metadata: inputMetadata, - ID: id, - Type: trType, - Name: name, - TraceID: traceID, - Duration: duration, - HTTP: &model.Http{Response: &model.Resp{ - MinimalResp: model.MinimalResp{StatusCode: &badRequestResp}, - }}, - Timestamp: requestTime, - SpanCount: model.SpanCount{Dropped: &dropped, Started: &started}, - // a 4xx code is a success from the server perspective - Outcome: "success", - }, - }, - "with derived failure outcome": { - input: map[string]interface{}{ - "context": map[string]interface{}{ - "response": map[string]interface{}{ - "status_code": json.Number("500"), - }, - }, - }, - e: &model.Transaction{ - Metadata: inputMetadata, - ID: id, - Type: trType, - Name: name, - TraceID: traceID, - Duration: duration, - Timestamp: requestTime, - HTTP: &model.Http{Response: &model.Resp{ - MinimalResp: model.MinimalResp{StatusCode: &internalErrorResp}, - }}, - SpanCount: model.SpanCount{Dropped: &dropped, Started: &started}, - Outcome: "failure", - }, - }, - } { - t.Run(name, func(t *testing.T) { - input := make(map[string]interface{}) - for k, v := range baseInput { - input[k] = v - } - for k, v := range test.input { - input[k] = v - } - batch := &model.Batch{} - err := DecodeTransaction(Input{ - Raw: input, - RequestTime: requestTime, - Metadata: inputMetadata, - Config: test.cfg, - }, batch) - require.NoError(t, err) - assert.Equal(t, test.e, batch.Transactions[0]) - }) - } -} - -func BenchmarkDecodeTransaction(b *testing.B) { - var fullMetadata model.Metadata - require.NoError(b, DecodeMetadata(fullInput, false, &fullMetadata)) - - b.ResetTimer() - b.ReportAllocs() - for i := 0; i < b.N; i++ { - if err := DecodeTransaction(Input{ - Metadata: fullMetadata, - Raw: fullTransactionInput, - }, &model.Batch{}); err != nil { - b.Fatal(err) - } - } -} diff --git a/model/modeldecoder/user.go b/model/modeldecoder/user.go deleted file mode 100644 index d0975d3686e..00000000000 --- a/model/modeldecoder/user.go +++ /dev/null @@ -1,51 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "encoding/json" - "net" - - "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/model/modeldecoder/field" -) - -func decodeUser(input map[string]interface{}, hasShortFieldNames bool, out *model.User, client *model.Client) { - if input == nil { - return - } - - fieldName := field.Mapper(hasShortFieldNames) - decodeString(input, fieldName("username"), &out.Name) - decodeString(input, fieldName("email"), &out.Email) - - var ipString string - if decodeString(input, "ip", &ipString) { - client.IP = net.ParseIP(ipString) - } - - // id can be string or int - switch id := input["id"].(type) { - case json.Number: - out.ID = id.String() - case string: - if id != "" { - out.ID = id - } - } -} diff --git a/model/modeldecoder/user_test.go b/model/modeldecoder/user_test.go deleted file mode 100644 index 6e720541e54..00000000000 --- a/model/modeldecoder/user_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import ( - "encoding/json" - "net" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/apm-server/model" -) - -func TestUserDecode(t *testing.T) { - id, mail, name, ip := "12", "m@g.dk", "foo", "127.0.0.1" - for _, test := range []struct { - input map[string]interface{} - user model.User - client model.Client - }{ - {input: nil}, - { - input: map[string]interface{}{"id": json.Number("12")}, - user: model.User{ID: id}, - }, - { - input: map[string]interface{}{"ip": ip}, - client: model.Client{IP: net.ParseIP(ip)}, - }, - { - input: map[string]interface{}{ - "id": id, "email": mail, "username": name, "ip": ip, - }, - user: model.User{ID: id, Email: mail, Name: name}, - client: model.Client{IP: net.ParseIP(ip)}, - }, - } { - var user model.User - var client model.Client - decodeUser(test.input, false, &user, &client) - assert.Equal(t, test.user, user) - assert.Equal(t, test.client, client) - } -} diff --git a/model/modeldecoder/utility.go b/model/modeldecoder/utility.go deleted file mode 100644 index 7cf782ec87f..00000000000 --- a/model/modeldecoder/utility.go +++ /dev/null @@ -1,56 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package modeldecoder - -import "encoding/json" - -func getObject(obj map[string]interface{}, key string) map[string]interface{} { - value, _ := obj[key].(map[string]interface{}) - return value -} - -func decodeString(obj map[string]interface{}, key string, out *string) bool { - if value, ok := obj[key].(string); ok { - *out = value - return true - } - return false -} - -func decodeInt(obj map[string]interface{}, key string, out *int) bool { - var f float64 - if decodeFloat64(obj, key, &f) { - *out = int(f) - return true - } - return false -} - -func decodeFloat64(obj map[string]interface{}, key string, out *float64) bool { - switch value := obj[key].(type) { - case json.Number: - if f, err := value.Float64(); err == nil { - *out = f - } - return true - case float64: - *out = value - return true - } - return false -} diff --git a/model/modeldecoder/v2/decoder.go b/model/modeldecoder/v2/decoder.go index bb2b95ec42a..bf455fb2303 100644 --- a/model/modeldecoder/v2/decoder.go +++ b/model/modeldecoder/v2/decoder.go @@ -19,59 +19,198 @@ package v2 import ( "fmt" + "io" + "net/http" + "net/textproto" + "strconv" + "strings" "sync" - - "github.com/elastic/beats/v7/libbeat/common" + "time" "github.com/elastic/apm-server/decoder" "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modeldecoder" + "github.com/elastic/apm-server/model/modeldecoder/modeldecoderutil" + "github.com/elastic/apm-server/model/modeldecoder/nullable" + "github.com/elastic/apm-server/utility" +) + +var ( + errorRootPool = sync.Pool{ + New: func() interface{} { + return &errorRoot{} + }, + } + metadataRootPool = sync.Pool{ + New: func() interface{} { + return &metadataRoot{} + }, + } + metricsetRootPool = sync.Pool{ + New: func() interface{} { + return &metricsetRoot{} + }, + } + spanRootPool = sync.Pool{ + New: func() interface{} { + return &spanRoot{} + }, + } + transactionRootPool = sync.Pool{ + New: func() interface{} { + return &transactionRoot{} + }, + } ) -var metadataRootPool = sync.Pool{ - New: func() interface{} { - return &metadataRoot{} - }, +func fetchErrorRoot() *errorRoot { + return errorRootPool.Get().(*errorRoot) +} + +func releaseErrorRoot(root *errorRoot) { + root.Reset() + errorRootPool.Put(root) } func fetchMetadataRoot() *metadataRoot { return metadataRootPool.Get().(*metadataRoot) } -func releaseMetadataRoot(m *metadataRoot) { - m.Reset() - metadataRootPool.Put(m) +func releaseMetadataRoot(root *metadataRoot) { + root.Reset() + metadataRootPool.Put(root) +} + +func fetchMetricsetRoot() *metricsetRoot { + return metricsetRootPool.Get().(*metricsetRoot) +} + +func releaseMetricsetRoot(root *metricsetRoot) { + root.Reset() + metricsetRootPool.Put(root) +} + +func fetchSpanRoot() *spanRoot { + return spanRootPool.Get().(*spanRoot) +} + +func releaseSpanRoot(root *spanRoot) { + root.Reset() + spanRootPool.Put(root) +} + +func fetchTransactionRoot() *transactionRoot { + return transactionRootPool.Get().(*transactionRoot) +} + +func releaseTransactionRoot(root *transactionRoot) { + root.Reset() + transactionRootPool.Put(root) +} + +// DecodeMetadata decodes metadata from d, updating out. +// +// DecodeMetadata should be used when the the stream in the decoder does not contain the +// `metadata` key, but only the metadata data. +func DecodeMetadata(d decoder.Decoder, out *model.APMEvent) error { + return decodeMetadata(decodeIntoMetadata, d, out) +} + +// DecodeNestedMetadata decodes metadata from d, updating out. +// +// DecodeNestedMetadata should be used when the stream in the decoder contains the `metadata` key +func DecodeNestedMetadata(d decoder.Decoder, out *model.APMEvent) error { + return decodeMetadata(decodeIntoMetadataRoot, d, out) +} + +// DecodeNestedError decodes an error from d, appending it to batch. +// +// DecodeNestedError should be used when the stream in the decoder contains the `error` key +func DecodeNestedError(d decoder.Decoder, input *modeldecoder.Input, batch *model.Batch) error { + root := fetchErrorRoot() + defer releaseErrorRoot(root) + err := d.Decode(root) + if err != nil && err != io.EOF { + return modeldecoder.NewDecoderErrFromJSONIter(err) + } + if err := root.validate(); err != nil { + return modeldecoder.NewValidationErr(err) + } + event := input.Base + mapToErrorModel(&root.Error, &event) + *batch = append(*batch, event) + return err +} + +// DecodeNestedMetricset decodes a metricset from d, appending it to batch. +// +// DecodeNestedMetricset should be used when the stream in the decoder contains the `metricset` key +func DecodeNestedMetricset(d decoder.Decoder, input *modeldecoder.Input, batch *model.Batch) error { + root := fetchMetricsetRoot() + defer releaseMetricsetRoot(root) + var err error + if err = d.Decode(root); err != nil && err != io.EOF { + return modeldecoder.NewDecoderErrFromJSONIter(err) + } + if err := root.validate(); err != nil { + return modeldecoder.NewValidationErr(err) + } + event := input.Base + mapToMetricsetModel(&root.Metricset, &event) + *batch = append(*batch, event) + return err } -// DecodeMetadata uses the given decoder to create the input models, -// then runs the defined validations on the input models -// and finally maps the values fom the input model to the given *model.Metadata instance +// DecodeNestedSpan decodes a span from d, appending it to batch. // -// DecodeMetadata should be used when the underlying byte stream does not contain the -// `metadata` key, but only the metadata. -func DecodeMetadata(d decoder.Decoder, out *model.Metadata) error { - return decode(decodeIntoMetadata, d, out) +// DecodeNestedSpan should be used when the stream in the decoder contains the `span` key +func DecodeNestedSpan(d decoder.Decoder, input *modeldecoder.Input, batch *model.Batch) error { + root := fetchSpanRoot() + defer releaseSpanRoot(root) + var err error + if err = d.Decode(root); err != nil && err != io.EOF { + return modeldecoder.NewDecoderErrFromJSONIter(err) + } + if err := root.validate(); err != nil { + return modeldecoder.NewValidationErr(err) + } + event := input.Base + mapToSpanModel(&root.Span, &event) + *batch = append(*batch, event) + return err } -// DecodeNestedMetadata uses the given decoder to create the input models, -// then runs the defined validations on the input models -// and finally maps the values fom the input model to the given *model.Metadata instance +// DecodeNestedTransaction decodes a transaction from d, appending it to batch. // -// DecodeNestedMetadata should be used when the underlying byte stream does start with the `metadata` key -func DecodeNestedMetadata(d decoder.Decoder, out *model.Metadata) error { - return decode(decodeIntoMetadataRoot, d, out) +// DecodeNestedTransaction should be used when the stream in the decoder contains the `transaction` key +func DecodeNestedTransaction(d decoder.Decoder, input *modeldecoder.Input, batch *model.Batch) error { + root := fetchTransactionRoot() + defer releaseTransactionRoot(root) + var err error + if err = d.Decode(root); err != nil && err != io.EOF { + return modeldecoder.NewDecoderErrFromJSONIter(err) + } + if err := root.validate(); err != nil { + return modeldecoder.NewValidationErr(err) + } + event := input.Base + mapToTransactionModel(&root.Transaction, &event) + *batch = append(*batch, event) + return err } -func decode(decFn func(d decoder.Decoder, m *metadataRoot) error, d decoder.Decoder, out *model.Metadata) error { +func decodeMetadata(decFn func(d decoder.Decoder, m *metadataRoot) error, d decoder.Decoder, out *model.APMEvent) error { m := fetchMetadataRoot() defer releaseMetadataRoot(m) - if err := decFn(d, m); err != nil { - return fmt.Errorf("decode error %w", err) + var err error + if err = decFn(d, m); err != nil && err != io.EOF { + return modeldecoder.NewDecoderErrFromJSONIter(err) } if err := m.validate(); err != nil { - return fmt.Errorf("validation error %w", err) + return modeldecoder.NewValidationErr(err) } mapToMetadataModel(&m.Metadata, out) - return nil + return err } func decodeIntoMetadata(d decoder.Decoder, m *metadataRoot) error { @@ -82,142 +221,950 @@ func decodeIntoMetadataRoot(d decoder.Decoder, m *metadataRoot) error { return d.Decode(m) } -func mapToMetadataModel(m *metadata, out *model.Metadata) { +func mapToClientModel(from contextRequest, source *model.Source, client *model.Client) { + // http.Request.Headers and http.Request.Socket are only set for backend events. + if source.IP == nil { + source.IP = utility.ParseIP(from.Socket.RemoteAddress.Val) + } + if client.IP == nil { + client.IP = source.IP + if ip := utility.ExtractIPFromHeader(from.Headers.Val); ip != nil { + client.IP = ip + } + } +} + +func mapToErrorModel(from *errorEvent, event *model.APMEvent) { + out := &model.Error{} + event.Error = out + event.Processor = model.ErrorProcessor + + // overwrite metadata with event specific information + mapToServiceModel(from.Context.Service, &event.Service) + mapToAgentModel(from.Context.Service.Agent, &event.Agent) + overwriteUserInMetadataModel(from.Context.User, event) + mapToUserAgentModel(from.Context.Request.Headers, &event.UserAgent) + mapToClientModel(from.Context.Request, &event.Source, &event.Client) + + // map errorEvent specific data + + if from.Context.IsSet() { + if len(from.Context.Tags) > 0 { + event.Labels = modeldecoderutil.MergeLabels( + event.Labels, + modeldecoderutil.NormalizeLabelValues(from.Context.Tags), + ) + } + if from.Context.Request.IsSet() { + event.HTTP.Request = &model.HTTPRequest{} + mapToRequestModel(from.Context.Request, event.HTTP.Request) + if from.Context.Request.HTTPVersion.IsSet() { + event.HTTP.Version = from.Context.Request.HTTPVersion.Val + } + } + if from.Context.Response.IsSet() { + event.HTTP.Response = &model.HTTPResponse{} + mapToResponseModel(from.Context.Response, event.HTTP.Response) + } + if from.Context.Request.URL.IsSet() { + mapToRequestURLModel(from.Context.Request.URL, &event.URL) + } + if from.Context.Page.IsSet() { + if from.Context.Page.URL.IsSet() && !from.Context.Request.URL.IsSet() { + event.URL = model.ParseURL(from.Context.Page.URL.Val, "", "") + } + if from.Context.Page.Referer.IsSet() { + if event.HTTP.Request == nil { + event.HTTP.Request = &model.HTTPRequest{} + } + if event.HTTP.Request.Referrer == "" { + event.HTTP.Request.Referrer = from.Context.Page.Referer.Val + } + } + } + if len(from.Context.Custom) > 0 { + out.Custom = modeldecoderutil.NormalizeLabelValues(from.Context.Custom.Clone()) + } + } + if from.Culprit.IsSet() { + out.Culprit = from.Culprit.Val + } + if from.Exception.IsSet() { + out.Exception = &model.Exception{} + mapToExceptionModel(from.Exception, out.Exception) + } + if from.ID.IsSet() { + out.ID = from.ID.Val + } + if from.Log.IsSet() { + log := model.Log{} + if from.Log.Level.IsSet() { + log.Level = from.Log.Level.Val + } + if from.Log.LoggerName.IsSet() { + log.LoggerName = from.Log.LoggerName.Val + } + if from.Log.Message.IsSet() { + log.Message = from.Log.Message.Val + } + if from.Log.ParamMessage.IsSet() { + log.ParamMessage = from.Log.ParamMessage.Val + } + if len(from.Log.Stacktrace) > 0 { + log.Stacktrace = make(model.Stacktrace, len(from.Log.Stacktrace)) + mapToStracktraceModel(from.Log.Stacktrace, log.Stacktrace) + } + out.Log = &log + } + if from.ParentID.IsSet() { + event.Parent.ID = from.ParentID.Val + } + if !from.Timestamp.Val.IsZero() { + event.Timestamp = from.Timestamp.Val + } + if from.TraceID.IsSet() { + event.Trace.ID = from.TraceID.Val + } + if from.Transaction.IsSet() { + event.Transaction = &model.Transaction{} + if from.Transaction.Sampled.IsSet() { + event.Transaction.Sampled = from.Transaction.Sampled.Val + } + if from.Transaction.Type.IsSet() { + event.Transaction.Type = from.Transaction.Type.Val + } + if from.TransactionID.IsSet() { + event.Transaction.ID = from.TransactionID.Val + } + } +} + +func mapToExceptionModel(from errorException, out *model.Exception) { + if !from.IsSet() { + return + } + if len(from.Attributes) > 0 { + out.Attributes = from.Attributes.Clone() + } + if from.Code.IsSet() { + out.Code = modeldecoderutil.ExceptionCodeString(from.Code.Val) + } + if len(from.Cause) > 0 { + out.Cause = make([]model.Exception, len(from.Cause)) + for i := 0; i < len(from.Cause); i++ { + var ex model.Exception + mapToExceptionModel(from.Cause[i], &ex) + out.Cause[i] = ex + } + } + if from.Handled.IsSet() { + out.Handled = &from.Handled.Val + } + if from.Message.IsSet() { + out.Message = from.Message.Val + } + if from.Module.IsSet() { + out.Module = from.Module.Val + } + if len(from.Stacktrace) > 0 { + out.Stacktrace = make(model.Stacktrace, len(from.Stacktrace)) + mapToStracktraceModel(from.Stacktrace, out.Stacktrace) + } + if from.Type.IsSet() { + out.Type = from.Type.Val + } +} + +func mapToMetadataModel(from *metadata, out *model.APMEvent) { // Cloud - if m.Cloud.Account.ID.IsSet() { - out.Cloud.AccountID = m.Cloud.Account.ID.Val + if from.Cloud.Account.ID.IsSet() { + out.Cloud.AccountID = from.Cloud.Account.ID.Val + } + if from.Cloud.Account.Name.IsSet() { + out.Cloud.AccountName = from.Cloud.Account.Name.Val } - if m.Cloud.Account.Name.IsSet() { - out.Cloud.AccountName = m.Cloud.Account.Name.Val + if from.Cloud.AvailabilityZone.IsSet() { + out.Cloud.AvailabilityZone = from.Cloud.AvailabilityZone.Val } - if m.Cloud.AvailabilityZone.IsSet() { - out.Cloud.AvailabilityZone = m.Cloud.AvailabilityZone.Val + if from.Cloud.Instance.ID.IsSet() { + out.Cloud.InstanceID = from.Cloud.Instance.ID.Val } - if m.Cloud.Instance.ID.IsSet() { - out.Cloud.InstanceID = m.Cloud.Instance.ID.Val + if from.Cloud.Instance.Name.IsSet() { + out.Cloud.InstanceName = from.Cloud.Instance.Name.Val } - if m.Cloud.Instance.Name.IsSet() { - out.Cloud.InstanceName = m.Cloud.Instance.Name.Val + if from.Cloud.Machine.Type.IsSet() { + out.Cloud.MachineType = from.Cloud.Machine.Type.Val } - if m.Cloud.Machine.Type.IsSet() { - out.Cloud.MachineType = m.Cloud.Machine.Type.Val + if from.Cloud.Project.ID.IsSet() { + out.Cloud.ProjectID = from.Cloud.Project.ID.Val } - if m.Cloud.Project.ID.IsSet() { - out.Cloud.ProjectID = m.Cloud.Project.ID.Val + if from.Cloud.Project.Name.IsSet() { + out.Cloud.ProjectName = from.Cloud.Project.Name.Val } - if m.Cloud.Project.Name.IsSet() { - out.Cloud.ProjectName = m.Cloud.Project.Name.Val + if from.Cloud.Provider.IsSet() { + out.Cloud.Provider = from.Cloud.Provider.Val } - if m.Cloud.Provider.IsSet() { - out.Cloud.Provider = m.Cloud.Provider.Val + if from.Cloud.Region.IsSet() { + out.Cloud.Region = from.Cloud.Region.Val } - if m.Cloud.Region.IsSet() { - out.Cloud.Region = m.Cloud.Region.Val + if from.Cloud.Service.Name.IsSet() { + out.Cloud.ServiceName = from.Cloud.Service.Name.Val } // Labels - if len(m.Labels) > 0 { - out.Labels = common.MapStr{} - out.Labels.Update(m.Labels) + if len(from.Labels) > 0 { + out.Labels = modeldecoderutil.NormalizeLabelValues(from.Labels.Clone()) } // Process - if len(m.Process.Argv) > 0 { - out.Process.Argv = m.Process.Argv + if len(from.Process.Argv) > 0 { + out.Process.Argv = append(out.Process.Argv[:0], from.Process.Argv...) } - if m.Process.Pid.IsSet() { - out.Process.Pid = m.Process.Pid.Val + if from.Process.Pid.IsSet() { + out.Process.Pid = from.Process.Pid.Val } - if m.Process.Ppid.IsSet() { - var pid = m.Process.Ppid.Val + if from.Process.Ppid.IsSet() { + var pid = from.Process.Ppid.Val out.Process.Ppid = &pid } - if m.Process.Title.IsSet() { - out.Process.Title = m.Process.Title.Val + if from.Process.Title.IsSet() { + out.Process.Title = from.Process.Title.Val } // Service - if m.Service.Agent.EphemeralID.IsSet() { - out.Service.Agent.EphemeralID = m.Service.Agent.EphemeralID.Val + if from.Service.Agent.EphemeralID.IsSet() { + out.Agent.EphemeralID = from.Service.Agent.EphemeralID.Val } - if m.Service.Agent.Name.IsSet() { - out.Service.Agent.Name = m.Service.Agent.Name.Val + if from.Service.Agent.Name.IsSet() { + out.Agent.Name = from.Service.Agent.Name.Val } - if m.Service.Agent.Version.IsSet() { - out.Service.Agent.Version = m.Service.Agent.Version.Val + if from.Service.Agent.Version.IsSet() { + out.Agent.Version = from.Service.Agent.Version.Val } - if m.Service.Environment.IsSet() { - out.Service.Environment = m.Service.Environment.Val + if from.Service.Environment.IsSet() { + out.Service.Environment = from.Service.Environment.Val } - if m.Service.Framework.Name.IsSet() { - out.Service.Framework.Name = m.Service.Framework.Name.Val + if from.Service.Framework.Name.IsSet() { + out.Service.Framework.Name = from.Service.Framework.Name.Val } - if m.Service.Framework.Version.IsSet() { - out.Service.Framework.Version = m.Service.Framework.Version.Val + if from.Service.Framework.Version.IsSet() { + out.Service.Framework.Version = from.Service.Framework.Version.Val } - if m.Service.Language.Name.IsSet() { - out.Service.Language.Name = m.Service.Language.Name.Val + if from.Service.Language.Name.IsSet() { + out.Service.Language.Name = from.Service.Language.Name.Val } - if m.Service.Language.Version.IsSet() { - out.Service.Language.Version = m.Service.Language.Version.Val + if from.Service.Language.Version.IsSet() { + out.Service.Language.Version = from.Service.Language.Version.Val } - if m.Service.Name.IsSet() { - out.Service.Name = m.Service.Name.Val + if from.Service.Name.IsSet() { + out.Service.Name = from.Service.Name.Val } - if m.Service.Node.Name.IsSet() { - out.Service.Node.Name = m.Service.Node.Name.Val + if from.Service.Node.Name.IsSet() { + out.Service.Node.Name = from.Service.Node.Name.Val } - if m.Service.Runtime.Name.IsSet() { - out.Service.Runtime.Name = m.Service.Runtime.Name.Val + if from.Service.Runtime.Name.IsSet() { + out.Service.Runtime.Name = from.Service.Runtime.Name.Val } - if m.Service.Runtime.Version.IsSet() { - out.Service.Runtime.Version = m.Service.Runtime.Version.Val + if from.Service.Runtime.Version.IsSet() { + out.Service.Runtime.Version = from.Service.Runtime.Version.Val } - if m.Service.Version.IsSet() { - out.Service.Version = m.Service.Version.Val + if from.Service.Version.IsSet() { + out.Service.Version = from.Service.Version.Val } // System - if m.System.Architecture.IsSet() { - out.System.Architecture = m.System.Architecture.Val + if from.System.Architecture.IsSet() { + out.Host.Architecture = from.System.Architecture.Val } - if m.System.ConfiguredHostname.IsSet() { - out.System.ConfiguredHostname = m.System.ConfiguredHostname.Val + if from.System.ConfiguredHostname.IsSet() { + out.Host.Name = from.System.ConfiguredHostname.Val } - if m.System.Container.ID.IsSet() { - out.System.Container.ID = m.System.Container.ID.Val + if from.System.Container.ID.IsSet() { + out.Container.ID = from.System.Container.ID.Val } - if m.System.DetectedHostname.IsSet() { - out.System.DetectedHostname = m.System.DetectedHostname.Val + if from.System.DetectedHostname.IsSet() { + out.Host.Hostname = from.System.DetectedHostname.Val } - if !m.System.ConfiguredHostname.IsSet() && !m.System.DetectedHostname.IsSet() && - m.System.HostnameDeprecated.IsSet() { - out.System.DetectedHostname = m.System.HostnameDeprecated.Val + if !from.System.ConfiguredHostname.IsSet() && !from.System.DetectedHostname.IsSet() && + from.System.DeprecatedHostname.IsSet() { + out.Host.Hostname = from.System.DeprecatedHostname.Val } - if m.System.Kubernetes.Namespace.IsSet() { - out.System.Kubernetes.Namespace = m.System.Kubernetes.Namespace.Val + if from.System.Kubernetes.Namespace.IsSet() { + out.Kubernetes.Namespace = from.System.Kubernetes.Namespace.Val } - if m.System.Kubernetes.Node.Name.IsSet() { - out.System.Kubernetes.NodeName = m.System.Kubernetes.Node.Name.Val + if from.System.Kubernetes.Node.Name.IsSet() { + out.Kubernetes.NodeName = from.System.Kubernetes.Node.Name.Val } - if m.System.Kubernetes.Pod.Name.IsSet() { - out.System.Kubernetes.PodName = m.System.Kubernetes.Pod.Name.Val + if from.System.Kubernetes.Pod.Name.IsSet() { + out.Kubernetes.PodName = from.System.Kubernetes.Pod.Name.Val } - if m.System.Kubernetes.Pod.UID.IsSet() { - out.System.Kubernetes.PodUID = m.System.Kubernetes.Pod.UID.Val + if from.System.Kubernetes.Pod.UID.IsSet() { + out.Kubernetes.PodUID = from.System.Kubernetes.Pod.UID.Val } - if m.System.Platform.IsSet() { - out.System.Platform = m.System.Platform.Val + if from.System.Platform.IsSet() { + out.Host.OS.Platform = from.System.Platform.Val } // User - if m.User.ID.IsSet() { - out.User.ID = fmt.Sprint(m.User.ID.Val) + if from.User.Domain.IsSet() { + out.User.Domain = fmt.Sprint(from.User.Domain.Val) + } + if from.User.ID.IsSet() { + out.User.ID = fmt.Sprint(from.User.ID.Val) + } + if from.User.Email.IsSet() { + out.User.Email = from.User.Email.Val + } + if from.User.Name.IsSet() { + out.User.Name = from.User.Name.Val + } + + // Network + if from.Network.Connection.Type.IsSet() { + out.Network.Connection.Type = from.Network.Connection.Type.Val + } +} + +func mapToMetricsetModel(from *metricset, event *model.APMEvent) { + event.Metricset = &model.Metricset{} + event.Processor = model.MetricsetProcessor + + if !from.Timestamp.Val.IsZero() { + event.Timestamp = from.Timestamp.Val + } + + if len(from.Samples) > 0 { + samples := make(map[string]model.MetricsetSample, len(from.Samples)) + for name, sample := range from.Samples { + var counts []int64 + var values []float64 + if n := len(sample.Values); n > 0 { + values = make([]float64, n) + copy(values, sample.Values) + } + if n := len(sample.Counts); n > 0 { + counts = make([]int64, n) + copy(counts, sample.Counts) + } + samples[name] = model.MetricsetSample{ + Type: model.MetricType(sample.Type.Val), + Unit: sample.Unit.Val, + Value: sample.Value.Val, + Histogram: model.Histogram{ + Values: values, + Counts: counts, + }, + } + } + event.Metricset.Samples = samples + } + + if len(from.Tags) > 0 { + event.Labels = modeldecoderutil.MergeLabels( + event.Labels, + modeldecoderutil.NormalizeLabelValues(from.Tags), + ) + } + + if from.Span.IsSet() { + event.Span = &model.Span{} + if from.Span.Subtype.IsSet() { + event.Span.Subtype = from.Span.Subtype.Val + } + if from.Span.Type.IsSet() { + event.Span.Type = from.Span.Type.Val + } + } + + if from.Transaction.IsSet() { + event.Transaction = &model.Transaction{} + if from.Transaction.Name.IsSet() { + event.Transaction.Name = from.Transaction.Name.Val + } + if from.Transaction.Type.IsSet() { + event.Transaction.Type = from.Transaction.Type.Val + } + // Transaction fields specified: this is an APM-internal metricset. + modeldecoderutil.SetInternalMetrics(event) + } +} + +func mapToRequestModel(from contextRequest, out *model.HTTPRequest) { + if from.Method.IsSet() { + out.Method = from.Method.Val + } + if len(from.Env) > 0 { + out.Env = from.Env.Clone() + } + if from.Body.IsSet() { + out.Body = modeldecoderutil.NormalizeHTTPRequestBody(from.Body.Val) + } + if len(from.Cookies) > 0 { + out.Cookies = from.Cookies.Clone() + } + if from.Headers.IsSet() { + out.Headers = modeldecoderutil.HTTPHeadersToMap(from.Headers.Val.Clone()) + } +} + +func mapToRequestURLModel(from contextRequestURL, out *model.URL) { + if from.Raw.IsSet() { + out.Original = from.Raw.Val + } + if from.Full.IsSet() { + out.Full = from.Full.Val + } + if from.Hostname.IsSet() { + out.Domain = from.Hostname.Val + } + if from.Path.IsSet() { + out.Path = from.Path.Val + } + if from.Search.IsSet() { + out.Query = from.Search.Val + } + if from.Hash.IsSet() { + out.Fragment = from.Hash.Val + } + if from.Protocol.IsSet() { + out.Scheme = strings.TrimSuffix(from.Protocol.Val, ":") + } + if from.Port.IsSet() { + // should never result in an error, type is checked when decoding + port, err := strconv.Atoi(fmt.Sprint(from.Port.Val)) + if err == nil { + out.Port = port + } + } +} + +func mapToResponseModel(from contextResponse, out *model.HTTPResponse) { + if from.Finished.IsSet() { + val := from.Finished.Val + out.Finished = &val + } + if from.Headers.IsSet() { + out.Headers = modeldecoderutil.HTTPHeadersToMap(from.Headers.Val.Clone()) + } + if from.HeadersSent.IsSet() { + val := from.HeadersSent.Val + out.HeadersSent = &val + } + if from.StatusCode.IsSet() { + out.StatusCode = from.StatusCode.Val + } + if from.TransferSize.IsSet() { + val := from.TransferSize.Val + out.TransferSize = &val + } + if from.EncodedBodySize.IsSet() { + val := from.EncodedBodySize.Val + out.EncodedBodySize = &val + } + if from.DecodedBodySize.IsSet() { + val := from.DecodedBodySize.Val + out.DecodedBodySize = &val + } +} + +func mapToServiceModel(from contextService, out *model.Service) { + if from.Environment.IsSet() { + out.Environment = from.Environment.Val + } + if from.Framework.Name.IsSet() { + out.Framework.Name = from.Framework.Name.Val + } + if from.Framework.Version.IsSet() { + out.Framework.Version = from.Framework.Version.Val + } + if from.Language.Name.IsSet() { + out.Language.Name = from.Language.Name.Val + } + if from.Language.Version.IsSet() { + out.Language.Version = from.Language.Version.Val + } + if from.Name.IsSet() { + out.Name = from.Name.Val + } + if from.Node.Name.IsSet() { + out.Node.Name = from.Node.Name.Val + } + if from.Runtime.Name.IsSet() { + out.Runtime.Name = from.Runtime.Name.Val + } + if from.Runtime.Version.IsSet() { + out.Runtime.Version = from.Runtime.Version.Val + } + if from.Version.IsSet() { + out.Version = from.Version.Val + } +} + +func mapToAgentModel(from contextServiceAgent, out *model.Agent) { + if from.Name.IsSet() { + out.Name = from.Name.Val + } + if from.Version.IsSet() { + out.Version = from.Version.Val + } + if from.EphemeralID.IsSet() { + out.EphemeralID = from.EphemeralID.Val + } +} + +func mapToSpanModel(from *span, event *model.APMEvent) { + out := &model.Span{} + event.Span = out + event.Processor = model.SpanProcessor + + // map span specific data + if !from.Action.IsSet() && !from.Subtype.IsSet() { + sep := "." + typ := strings.Split(from.Type.Val, sep) + out.Type = typ[0] + if len(typ) > 1 { + out.Subtype = typ[1] + if len(typ) > 2 { + out.Action = strings.Join(typ[2:], sep) + } + } + } else { + if from.Action.IsSet() { + out.Action = from.Action.Val + } + if from.Subtype.IsSet() { + out.Subtype = from.Subtype.Val + } + if from.Type.IsSet() { + out.Type = from.Type.Val + } + } + if from.Composite.IsSet() { + composite := model.Composite{} + if from.Composite.Count.IsSet() { + composite.Count = from.Composite.Count.Val + } + if from.Composite.Sum.IsSet() { + composite.Sum = from.Composite.Sum.Val + } + if from.Composite.CompressionStrategy.IsSet() { + composite.CompressionStrategy = from.Composite.CompressionStrategy.Val + } + out.Composite = &composite + } + if len(from.ChildIDs) > 0 { + event.Child.ID = make([]string, len(from.ChildIDs)) + copy(event.Child.ID, from.ChildIDs) + } + if from.Context.Database.IsSet() { + db := model.DB{} + if from.Context.Database.Instance.IsSet() { + db.Instance = from.Context.Database.Instance.Val + } + if from.Context.Database.Link.IsSet() { + db.Link = from.Context.Database.Link.Val + } + if from.Context.Database.RowsAffected.IsSet() { + val := from.Context.Database.RowsAffected.Val + db.RowsAffected = &val + } + if from.Context.Database.Statement.IsSet() { + db.Statement = from.Context.Database.Statement.Val + } + if from.Context.Database.Type.IsSet() { + db.Type = from.Context.Database.Type.Val + } + if from.Context.Database.User.IsSet() { + db.UserName = from.Context.Database.User.Val + } + out.DB = &db + } + if from.Context.Destination.Address.IsSet() || from.Context.Destination.Port.IsSet() { + if from.Context.Destination.Address.IsSet() { + event.Destination.Address = from.Context.Destination.Address.Val + } + if from.Context.Destination.Port.IsSet() { + event.Destination.Port = from.Context.Destination.Port.Val + } + } + if from.Context.Destination.Service.IsSet() { + service := model.DestinationService{} + if from.Context.Destination.Service.Name.IsSet() { + service.Name = from.Context.Destination.Service.Name.Val + } + if from.Context.Destination.Service.Resource.IsSet() { + service.Resource = from.Context.Destination.Service.Resource.Val + } + if from.Context.Destination.Service.Type.IsSet() { + service.Type = from.Context.Destination.Service.Type.Val + } + out.DestinationService = &service + } + if from.Context.HTTP.IsSet() { + if from.Context.HTTP.Method.IsSet() { + event.HTTP.Request = &model.HTTPRequest{} + event.HTTP.Request.Method = from.Context.HTTP.Method.Val + } + if from.Context.HTTP.Response.IsSet() { + response := model.HTTPResponse{} + if from.Context.HTTP.Response.DecodedBodySize.IsSet() { + val := from.Context.HTTP.Response.DecodedBodySize.Val + response.DecodedBodySize = &val + } + if from.Context.HTTP.Response.EncodedBodySize.IsSet() { + val := from.Context.HTTP.Response.EncodedBodySize.Val + response.EncodedBodySize = &val + } + if from.Context.HTTP.Response.Headers.IsSet() { + response.Headers = modeldecoderutil.HTTPHeadersToMap(from.Context.HTTP.Response.Headers.Val.Clone()) + } + if from.Context.HTTP.Response.StatusCode.IsSet() { + response.StatusCode = from.Context.HTTP.Response.StatusCode.Val + } + if from.Context.HTTP.Response.TransferSize.IsSet() { + val := from.Context.HTTP.Response.TransferSize.Val + response.TransferSize = &val + } + event.HTTP.Response = &response + } + if from.Context.HTTP.StatusCode.IsSet() { + if event.HTTP.Response == nil { + event.HTTP.Response = &model.HTTPResponse{} + } + event.HTTP.Response.StatusCode = from.Context.HTTP.StatusCode.Val + } + if from.Context.HTTP.URL.IsSet() { + event.URL.Original = from.Context.HTTP.URL.Val + } + } + if from.Context.Message.IsSet() { + message := model.Message{} + if from.Context.Message.Body.IsSet() { + message.Body = from.Context.Message.Body.Val + } + if from.Context.Message.Headers.IsSet() { + message.Headers = from.Context.Message.Headers.Val.Clone() + } + if from.Context.Message.Age.Milliseconds.IsSet() { + val := from.Context.Message.Age.Milliseconds.Val + message.AgeMillis = &val + } + if from.Context.Message.Queue.Name.IsSet() { + message.QueueName = from.Context.Message.Queue.Name.Val + } + out.Message = &message + } + if from.Context.Service.IsSet() { + mapToServiceModel(from.Context.Service, &event.Service) + mapToAgentModel(from.Context.Service.Agent, &event.Agent) + } + if len(from.Context.Tags) > 0 { + event.Labels = modeldecoderutil.MergeLabels( + event.Labels, + modeldecoderutil.NormalizeLabelValues(from.Context.Tags), + ) + } + if from.Duration.IsSet() { + duration := time.Duration(from.Duration.Val * float64(time.Millisecond)) + event.Event.Duration = duration + } + if from.ID.IsSet() { + out.ID = from.ID.Val + } + if from.Name.IsSet() { + out.Name = from.Name.Val + } + if from.Outcome.IsSet() { + event.Event.Outcome = from.Outcome.Val + } else { + if from.Context.HTTP.StatusCode.IsSet() { + statusCode := from.Context.HTTP.StatusCode.Val + if statusCode >= http.StatusBadRequest { + event.Event.Outcome = "failure" + } else { + event.Event.Outcome = "success" + } + } else { + event.Event.Outcome = "unknown" + } + } + if from.ParentID.IsSet() { + event.Parent.ID = from.ParentID.Val + } + if from.SampleRate.IsSet() && from.SampleRate.Val > 0 { + out.RepresentativeCount = 1 / from.SampleRate.Val + } + if len(from.Stacktrace) > 0 { + out.Stacktrace = make(model.Stacktrace, len(from.Stacktrace)) + mapToStracktraceModel(from.Stacktrace, out.Stacktrace) + } + if from.Start.IsSet() { + val := from.Start.Val + out.Start = &val + } + if from.Sync.IsSet() { + val := from.Sync.Val + out.Sync = &val + } + if !from.Timestamp.Val.IsZero() { + event.Timestamp = from.Timestamp.Val + } else if from.Start.IsSet() { + // event.Timestamp is initialized to the time the payload was + // received by apm-server; offset that by "start" milliseconds + // for RUM. + event.Timestamp = event.Timestamp.Add( + time.Duration(float64(time.Millisecond) * from.Start.Val), + ) + } + if from.TraceID.IsSet() { + event.Trace.ID = from.TraceID.Val + } + if from.TransactionID.IsSet() { + event.Transaction = &model.Transaction{ID: from.TransactionID.Val} + } +} + +func mapToStracktraceModel(from []stacktraceFrame, out model.Stacktrace) { + for idx, eventFrame := range from { + fr := model.StacktraceFrame{} + if eventFrame.AbsPath.IsSet() { + fr.AbsPath = eventFrame.AbsPath.Val + } + if eventFrame.Classname.IsSet() { + fr.Classname = eventFrame.Classname.Val + } + if eventFrame.ColumnNumber.IsSet() { + val := eventFrame.ColumnNumber.Val + fr.Colno = &val + } + if eventFrame.ContextLine.IsSet() { + fr.ContextLine = eventFrame.ContextLine.Val + } + if eventFrame.Filename.IsSet() { + fr.Filename = eventFrame.Filename.Val + } + if eventFrame.Function.IsSet() { + fr.Function = eventFrame.Function.Val + } + if eventFrame.LibraryFrame.IsSet() { + val := eventFrame.LibraryFrame.Val + fr.LibraryFrame = val + } + if eventFrame.LineNumber.IsSet() { + val := eventFrame.LineNumber.Val + fr.Lineno = &val + } + if eventFrame.Module.IsSet() { + fr.Module = eventFrame.Module.Val + } + if len(eventFrame.PostContext) > 0 { + fr.PostContext = make([]string, len(eventFrame.PostContext)) + copy(fr.PostContext, eventFrame.PostContext) + } + if len(eventFrame.PreContext) > 0 { + fr.PreContext = make([]string, len(eventFrame.PreContext)) + copy(fr.PreContext, eventFrame.PreContext) + } + if len(eventFrame.Vars) > 0 { + fr.Vars = eventFrame.Vars.Clone() + } + out[idx] = &fr + } +} + +func mapToTransactionModel(from *transaction, event *model.APMEvent) { + out := &model.Transaction{} + event.Processor = model.TransactionProcessor + event.Transaction = out + + // overwrite metadata with event specific information + mapToServiceModel(from.Context.Service, &event.Service) + mapToAgentModel(from.Context.Service.Agent, &event.Agent) + overwriteUserInMetadataModel(from.Context.User, event) + mapToUserAgentModel(from.Context.Request.Headers, &event.UserAgent) + mapToClientModel(from.Context.Request, &event.Source, &event.Client) + + // map transaction specific data + + if from.Context.IsSet() { + if len(from.Context.Custom) > 0 { + out.Custom = modeldecoderutil.NormalizeLabelValues(from.Context.Custom.Clone()) + } + if len(from.Context.Tags) > 0 { + event.Labels = modeldecoderutil.MergeLabels( + event.Labels, + modeldecoderutil.NormalizeLabelValues(from.Context.Tags), + ) + } + if from.Context.Message.IsSet() { + out.Message = &model.Message{} + if from.Context.Message.Age.IsSet() { + val := from.Context.Message.Age.Milliseconds.Val + out.Message.AgeMillis = &val + } + if from.Context.Message.Body.IsSet() { + out.Message.Body = from.Context.Message.Body.Val + } + if from.Context.Message.Headers.IsSet() { + out.Message.Headers = from.Context.Message.Headers.Val.Clone() + } + if from.Context.Message.Queue.IsSet() && from.Context.Message.Queue.Name.IsSet() { + out.Message.QueueName = from.Context.Message.Queue.Name.Val + } + } + if from.Context.Request.IsSet() { + event.HTTP.Request = &model.HTTPRequest{} + mapToRequestModel(from.Context.Request, event.HTTP.Request) + if from.Context.Request.HTTPVersion.IsSet() { + event.HTTP.Version = from.Context.Request.HTTPVersion.Val + } + } + if from.Context.Request.URL.IsSet() { + mapToRequestURLModel(from.Context.Request.URL, &event.URL) + } + if from.Context.Response.IsSet() { + event.HTTP.Response = &model.HTTPResponse{} + mapToResponseModel(from.Context.Response, event.HTTP.Response) + } + if from.Context.Page.IsSet() { + if from.Context.Page.URL.IsSet() && !from.Context.Request.URL.IsSet() { + event.URL = model.ParseURL(from.Context.Page.URL.Val, "", "") + } + if from.Context.Page.Referer.IsSet() { + if event.HTTP.Request == nil { + event.HTTP.Request = &model.HTTPRequest{} + } + if event.HTTP.Request.Referrer == "" { + event.HTTP.Request.Referrer = from.Context.Page.Referer.Val + } + } + } + } + if from.Duration.IsSet() { + duration := time.Duration(from.Duration.Val * float64(time.Millisecond)) + event.Event.Duration = duration + } + if from.ID.IsSet() { + out.ID = from.ID.Val + } + if from.Marks.IsSet() { + out.Marks = make(model.TransactionMarks, len(from.Marks.Events)) + for event, val := range from.Marks.Events { + if len(val.Measurements) > 0 { + out.Marks[event] = model.TransactionMark(val.Measurements) + } + } + } + if from.Name.IsSet() { + out.Name = from.Name.Val + } + if from.Outcome.IsSet() { + event.Event.Outcome = from.Outcome.Val + } else { + if from.Context.Response.StatusCode.IsSet() { + statusCode := from.Context.Response.StatusCode.Val + if statusCode >= http.StatusInternalServerError { + event.Event.Outcome = "failure" + } else { + event.Event.Outcome = "success" + } + } else { + event.Event.Outcome = "unknown" + } + } + if from.ParentID.IsSet() { + event.Parent.ID = from.ParentID.Val + } + if from.Result.IsSet() { + out.Result = from.Result.Val + } + sampled := true + if from.Sampled.IsSet() { + sampled = from.Sampled.Val + } + out.Sampled = sampled + if from.SampleRate.IsSet() { + if from.SampleRate.Val > 0 { + out.RepresentativeCount = 1 / from.SampleRate.Val + } + } else { + out.RepresentativeCount = 1 + } + if from.Session.ID.IsSet() { + event.Session.ID = from.Session.ID.Val + event.Session.Sequence = from.Session.Sequence.Val + } + if from.SpanCount.Dropped.IsSet() { + dropped := from.SpanCount.Dropped.Val + out.SpanCount.Dropped = &dropped + } + if from.SpanCount.Started.IsSet() { + started := from.SpanCount.Started.Val + out.SpanCount.Started = &started + } + if !from.Timestamp.Val.IsZero() { + event.Timestamp = from.Timestamp.Val + } + if from.TraceID.IsSet() { + event.Trace.ID = from.TraceID.Val + } + if from.Type.IsSet() { + out.Type = from.Type.Val + } + if from.UserExperience.IsSet() { + out.UserExperience = &model.UserExperience{ + CumulativeLayoutShift: -1, + FirstInputDelay: -1, + TotalBlockingTime: -1, + Longtask: model.LongtaskMetrics{Count: -1}, + } + if from.UserExperience.CumulativeLayoutShift.IsSet() { + out.UserExperience.CumulativeLayoutShift = from.UserExperience.CumulativeLayoutShift.Val + } + if from.UserExperience.FirstInputDelay.IsSet() { + out.UserExperience.FirstInputDelay = from.UserExperience.FirstInputDelay.Val + + } + if from.UserExperience.TotalBlockingTime.IsSet() { + out.UserExperience.TotalBlockingTime = from.UserExperience.TotalBlockingTime.Val + } + if from.UserExperience.Longtask.IsSet() { + out.UserExperience.Longtask = model.LongtaskMetrics{ + Count: from.UserExperience.Longtask.Count.Val, + Sum: from.UserExperience.Longtask.Sum.Val, + Max: from.UserExperience.Longtask.Max.Val, + } + } + } +} + +func mapToUserAgentModel(from nullable.HTTPHeader, out *model.UserAgent) { + // overwrite userAgent information if available + if from.IsSet() { + if h := from.Val.Values(textproto.CanonicalMIMEHeaderKey("User-Agent")); len(h) > 0 { + out.Original = strings.Join(h, ", ") + } + } +} + +func overwriteUserInMetadataModel(from user, out *model.APMEvent) { + // overwrite User specific values if set + // either populate all User fields or none to avoid mixing + // different user data + if !from.Domain.IsSet() && !from.ID.IsSet() && !from.Email.IsSet() && !from.Name.IsSet() { + return + } + out.User = model.User{} + if from.Domain.IsSet() { + out.User.Domain = fmt.Sprint(from.Domain.Val) + } + if from.ID.IsSet() { + out.User.ID = fmt.Sprint(from.ID.Val) } - if m.User.Email.IsSet() { - out.User.Email = m.User.Email.Val + if from.Email.IsSet() { + out.User.Email = from.Email.Val } - if m.User.Name.IsSet() { - out.User.Name = m.User.Name.Val + if from.Name.IsSet() { + out.User.Name = from.Name.Val } } diff --git a/model/modeldecoder/v2/decoder_test.go b/model/modeldecoder/v2/decoder_test.go deleted file mode 100644 index 6cefecfc8d7..00000000000 --- a/model/modeldecoder/v2/decoder_test.go +++ /dev/null @@ -1,144 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package v2 - -import ( - "fmt" - "net" - "reflect" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/beats/v7/libbeat/common" - - "github.com/elastic/apm-server/decoder" - "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/model/modeldecoder/modeldecodertest" -) - -func TestResetModelOnRelease(t *testing.T) { - inp := `{"metadata":{"service":{"name":"service-a"}}}` - m := fetchMetadataRoot() - require.NoError(t, decoder.NewJSONIteratorDecoder(strings.NewReader(inp)).Decode(m)) - require.True(t, m.IsSet()) - releaseMetadataRoot(m) - assert.False(t, m.IsSet()) -} - -func TestDecodeMetadata(t *testing.T) { - - for _, tc := range []struct { - name string - input string - decodeFn func(decoder.Decoder, *model.Metadata) error - }{ - {name: "decodeMetadata", decodeFn: DecodeMetadata, - input: `{"service":{"name":"user-service","agent":{"name":"go","version":"1.0.0"}}}`}, - {name: "decodeNestedMetadata", decodeFn: DecodeNestedMetadata, - input: `{"metadata":{"service":{"name":"user-service","agent":{"name":"go","version":"1.0.0"}}}}`}, - } { - t.Run("decode", func(t *testing.T) { - var out model.Metadata - dec := decoder.NewJSONIteratorDecoder(strings.NewReader(tc.input)) - require.NoError(t, tc.decodeFn(dec, &out)) - assert.Equal(t, model.Metadata{Service: model.Service{ - Name: "user-service", - Agent: model.Agent{Name: "go", Version: "1.0.0"}}}, out) - - err := tc.decodeFn(decoder.NewJSONIteratorDecoder(strings.NewReader(`malformed`)), &out) - require.Error(t, err) - assert.Contains(t, err.Error(), "decode") - }) - - t.Run("validate", func(t *testing.T) { - inp := `{}` - var out model.Metadata - err := tc.decodeFn(decoder.NewJSONIteratorDecoder(strings.NewReader(inp)), &out) - require.Error(t, err) - assert.Contains(t, err.Error(), "validation") - }) - } - -} - -func TestMappingToModel(t *testing.T) { - // setup: - // create initialized modeldecoder and empty model metadata - // map modeldecoder to model metadata and manually set - // enhanced data that are never set by the modeldecoder - var m metadata - modeldecodertest.SetStructValues(&m, "init", 5000) - var modelM model.Metadata - modelM.System.IP, modelM.Client.IP = net.ParseIP("127.0.0.1"), net.ParseIP("127.0.0.1") - modelM.UserAgent.Original, modelM.UserAgent.Name = "Firefox/15.0.1", "Firefox/15.0.1" - mapToMetadataModel(&m, &modelM) - - // iterate through model and assert values are set - assertStructValues(t, &modelM, "init", 5000) - - // overwrite model metadata with specified Values - // then iterate through model and assert values are overwritten - modeldecodertest.SetStructValues(&m, "overwritten", 12) - mapToMetadataModel(&m, &modelM) - assertStructValues(t, &modelM, "overwritten", 12) - - // map an empty modeldecoder metadata to the model - // and assert values are unchanged - modeldecodertest.SetZeroStructValues(&m) - mapToMetadataModel(&m, &modelM) - assertStructValues(t, &modelM, "overwritten", 12) -} - -func assertStructValues(t *testing.T, i interface{}, vStr string, vInt int) { - modeldecodertest.IterateStruct(i, func(f reflect.Value, key string) { - fVal := f.Interface() - var newVal interface{} - switch fVal.(type) { - case map[string]interface{}: - newVal = map[string]interface{}{vStr: vStr} - case common.MapStr: - newVal = common.MapStr{vStr: vStr} - case []string: - newVal = []string{vStr} - case []int: - newVal = []int{vInt, vInt} - case string: - newVal = vStr - case int: - newVal = vInt - case *int: - iptr := f.Interface().(*int) - fVal = *iptr - newVal = vInt - case net.IP: - default: - if f.Type().Kind() == reflect.Struct { - return - } - panic(fmt.Sprintf("unhandled type %T for key %s", f.Type().Kind(), key)) - } - if strings.HasPrefix(key, "UserAgent") || key == "Client.IP" || key == "System.IP" { - // these values are not set by modeldecoder - return - } - assert.Equal(t, newVal, fVal, key) - }) -} diff --git a/model/modeldecoder/v2/error_test.go b/model/modeldecoder/v2/error_test.go new file mode 100644 index 00000000000..d7e5fb22db7 --- /dev/null +++ b/model/modeldecoder/v2/error_test.go @@ -0,0 +1,200 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "net" + "net/http" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/decoder" + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modeldecoder" + "github.com/elastic/apm-server/model/modeldecoder/modeldecodertest" + "github.com/elastic/beats/v7/libbeat/common" +) + +func TestResetErrorOnRelease(t *testing.T) { + inp := `{"error":{"id":"tr-a"}}` + root := fetchErrorRoot() + require.NoError(t, decoder.NewJSONDecoder(strings.NewReader(inp)).Decode(root)) + require.True(t, root.IsSet()) + releaseErrorRoot(root) + assert.False(t, root.IsSet()) +} + +func TestDecodeNestedError(t *testing.T) { + t.Run("decode", func(t *testing.T) { + now := time.Now() + defaultVal := modeldecodertest.DefaultValues() + _, eventBase := initializedInputMetadata(defaultVal) + eventBase.Timestamp = now + input := modeldecoder.Input{Base: eventBase} + str := `{"error":{"id":"a-b-c","timestamp":1599996822281000,"log":{"message":"abc"}}}` + dec := decoder.NewJSONDecoder(strings.NewReader(str)) + var batch model.Batch + require.NoError(t, DecodeNestedError(dec, &input, &batch)) + require.Len(t, batch, 1) + require.NotNil(t, batch[0].Error) + + defaultVal.Update(time.Unix(1599996822, 281000000).UTC()) + modeldecodertest.AssertStructValues(t, &batch[0], isMetadataException, defaultVal) + + str = `{"error":{"id":"a-b-c","log":{"message":"abc"},"context":{"experimental":"exp"}}}` + dec = decoder.NewJSONDecoder(strings.NewReader(str)) + batch = model.Batch{} + require.NoError(t, DecodeNestedError(dec, &input, &batch)) + // if no timestamp is provided, leave base event time unmodified + assert.Equal(t, now, batch[0].Timestamp) + + err := DecodeNestedError(decoder.NewJSONDecoder(strings.NewReader(`malformed`)), &input, &batch) + require.Error(t, err) + assert.Contains(t, err.Error(), "decode") + }) + + t.Run("validate", func(t *testing.T) { + var out model.Batch + err := DecodeNestedError(decoder.NewJSONDecoder(strings.NewReader(`{}`)), &modeldecoder.Input{}, &out) + require.Error(t, err) + assert.Contains(t, err.Error(), "validation") + }) +} + +func TestDecodeMapToErrorModel(t *testing.T) { + gatewayIP := net.ParseIP("192.168.0.1") + randomIP := net.ParseIP("71.0.54.1") + + exceptions := func(key string) bool { return false } + t.Run("metadata-overwrite", func(t *testing.T) { + // overwrite defined metadata with event metadata values + var input errorEvent + _, out := initializedInputMetadata(modeldecodertest.DefaultValues()) + otherVal := modeldecodertest.NonDefaultValues() + modeldecodertest.SetStructValues(&input, otherVal) + mapToErrorModel(&input, &out) + input.Reset() + + // ensure event Metadata are updated where expected + userAgent := strings.Join(otherVal.HTTPHeader.Values("User-Agent"), ", ") + assert.Equal(t, userAgent, out.UserAgent.Original) + // do not overwrite client.ip if already set in metadata + ip := modeldecodertest.DefaultValues().IP + assert.Equal(t, ip, out.Client.IP, out.Client.IP.String()) + assert.Equal(t, common.MapStr{ + "init0": "init", "init1": "init", "init2": "init", + "overwritten0": "overwritten", "overwritten1": "overwritten", + }, out.Labels) + // service and user values should be set + modeldecodertest.AssertStructValues(t, &out.Service, exceptions, otherVal) + modeldecodertest.AssertStructValues(t, &out.User, exceptions, otherVal) + }) + + t.Run("client-ip-header", func(t *testing.T) { + var input errorEvent + var out model.APMEvent + input.Context.Request.Headers.Set(http.Header{}) + input.Context.Request.Headers.Val.Add("x-real-ip", gatewayIP.String()) + input.Context.Request.Socket.RemoteAddress.Set(randomIP.String()) + mapToErrorModel(&input, &out) + assert.Equal(t, gatewayIP, out.Client.IP, out.Client.IP.String()) + }) + + t.Run("client-ip-socket", func(t *testing.T) { + var input errorEvent + var out model.APMEvent + input.Context.Request.Socket.RemoteAddress.Set(randomIP.String()) + mapToErrorModel(&input, &out) + assert.Equal(t, randomIP, out.Client.IP, out.Client.IP.String()) + }) + + t.Run("error-values", func(t *testing.T) { + exceptions := func(key string) bool { + for _, s := range []string{ + // GroupingKey is set by a model processor + "GroupingKey", + // stacktrace original and sourcemap values are set when sourcemapping is applied + "Exception.Stacktrace.Original", + "Exception.Stacktrace.Sourcemap", + "Log.Stacktrace.Original", + "Log.Stacktrace.Sourcemap", + // ExcludeFromGrouping is set when processing the event + "Exception.Stacktrace.ExcludeFromGrouping", + "Log.Stacktrace.ExcludeFromGrouping", + } { + if strings.HasPrefix(key, s) { + return true + } + } + return false + } + var input errorEvent + var out1, out2 model.APMEvent + defaultVal := modeldecodertest.DefaultValues() + modeldecodertest.SetStructValues(&input, defaultVal) + mapToErrorModel(&input, &out1) + input.Reset() + modeldecodertest.AssertStructValues(t, out1.Error, exceptions, defaultVal) + + // reuse input model for different event + // ensure memory is not shared by reusing input model + otherVal := modeldecodertest.NonDefaultValues() + modeldecodertest.SetStructValues(&input, otherVal) + mapToErrorModel(&input, &out2) + modeldecodertest.AssertStructValues(t, out2.Error, exceptions, otherVal) + modeldecodertest.AssertStructValues(t, out1.Error, exceptions, defaultVal) + }) + + t.Run("http-headers", func(t *testing.T) { + var input errorEvent + input.Context.Request.Headers.Set(http.Header{"a": []string{"b"}, "c": []string{"d", "e"}}) + input.Context.Response.Headers.Set(http.Header{"f": []string{"g"}}) + var out model.APMEvent + mapToErrorModel(&input, &out) + assert.Equal(t, common.MapStr{"a": []string{"b"}, "c": []string{"d", "e"}}, out.HTTP.Request.Headers) + assert.Equal(t, common.MapStr{"f": []string{"g"}}, out.HTTP.Response.Headers) + }) + + t.Run("page.URL", func(t *testing.T) { + var input errorEvent + input.Context.Page.URL.Set("https://my.site.test:9201") + var out model.APMEvent + mapToErrorModel(&input, &out) + assert.Equal(t, "https://my.site.test:9201", out.URL.Full) + }) + + t.Run("page.referer", func(t *testing.T) { + var input errorEvent + input.Context.Page.Referer.Set("https://my.site.test:9201") + var out model.APMEvent + mapToErrorModel(&input, &out) + assert.Equal(t, "https://my.site.test:9201", out.HTTP.Request.Referrer) + }) + + t.Run("exception-code", func(t *testing.T) { + var input errorEvent + var out model.APMEvent + input.Exception.Code.Set(123.456) + mapToErrorModel(&input, &out) + assert.Equal(t, "123", out.Error.Exception.Code) + }) +} diff --git a/model/modeldecoder/v2/jsonschema_test.go b/model/modeldecoder/v2/jsonschema_test.go new file mode 100644 index 00000000000..deee2548e38 --- /dev/null +++ b/model/modeldecoder/v2/jsonschema_test.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/xeipuuv/gojsonschema" + + "github.com/elastic/apm-server/decoder" +) + +type testFile struct { + name string + r io.Reader + valid bool +} + +func TestJSONSchema(t *testing.T) { + rootDir := filepath.Join("..", "..", "..") + // read and organize test data + testdataDir := filepath.Join(rootDir, "testdata", "intake-v2") + var files []testFile + err := filepath.Walk(testdataDir, func(p string, info os.FileInfo, _ error) error { + if info.IsDir() { + return nil + } + // ignore files with invalid json + if info.Name() == "invalid-json-metadata.ndjson" { + return nil + } + var valid bool + if !strings.HasPrefix(info.Name(), "invalid") { + valid = true + } + r, err := os.Open(p) + if err != nil { + return err + } + files = append(files, testFile{name: info.Name(), r: r, valid: valid}) + return nil + }) + require.NoError(t, err) + // read and organize schemas + schemaDir := filepath.Join(rootDir, "docs", "spec", "v2") + schemas := map[string]string{} + err = filepath.Walk(schemaDir, func(p string, info os.FileInfo, _ error) error { + if info.IsDir() { + return nil + } + f, err := ioutil.ReadFile(p) + if err != nil { + return err + } + schemas[strings.TrimSuffix(info.Name(), ".json")] = string(f) + return nil + }) + require.NoError(t, err) + + for _, f := range files { + // validate data against schemas + t.Run(f.name, func(t *testing.T) { + var data map[string]json.RawMessage + dec := decoder.NewNDJSONStreamDecoder(f.r, 300*1024) + b, err := dec.ReadAhead() + require.NoError(t, err) + require.NoError(t, json.Unmarshal(b, &data)) + for k := range data { + schema, ok := schemas[k] + if !ok && !f.valid { + // if no schema exists for invalid test event just ignore it + continue + } + t.Run(k, func(t *testing.T) { + schemaLoader := gojsonschema.NewStringLoader(schema) + dataLoader := gojsonschema.NewStringLoader(string(data[k])) + result, err := gojsonschema.Validate(schemaLoader, dataLoader) + require.NoError(t, err) + expected := f.valid + if k == "metadata" && f.name != "invalid-metadata.ndjson" { + // all invalid test files contain valid metadata + expected = true + } + assert.Equal(t, expected, result.Valid(), result.Errors()) + }) + } + }) + } +} diff --git a/model/modeldecoder/v2/metadata_test.go b/model/modeldecoder/v2/metadata_test.go new file mode 100644 index 00000000000..fc3b8e942b8 --- /dev/null +++ b/model/modeldecoder/v2/metadata_test.go @@ -0,0 +1,275 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "reflect" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/decoder" + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modeldecoder/modeldecodertest" +) + +func isMetadataException(key string) bool { + return isUnmappedMetadataField(key) || isEventField(key) +} + +// unmappedMetadataFields holds the list of model fields that have no equivalent +// in the metadata input type. +func isUnmappedMetadataField(key string) bool { + switch key { + case + "Child", + "Child.ID", + "Client.Domain", + "Client.IP", + "Client.Port", + "Container.Runtime", + "Container.ImageName", + "Container.ImageTag", + "Container.Name", + "DataStream", + "DataStream.Type", + "DataStream.Dataset", + "DataStream.Namespace", + "Destination", + "Destination.Address", + "Destination.IP", + "Destination.Port", + "ECSVersion", + "HTTP", + "HTTP.Request", + "HTTP.Response", + "HTTP.Version", + "Message", + "Network", + "Network.Connection", + "Network.Connection.Subtype", + "Network.Carrier", + "Network.Carrier.Name", + "Network.Carrier.MCC", + "Network.Carrier.MNC", + "Network.Carrier.ICC", + "Observer", + "Observer.EphemeralID", + "Observer.Hostname", + "Observer.ID", + "Observer.Name", + "Observer.Type", + "Observer.Version", + "Observer.VersionMajor", + "Parent", + "Parent.ID", + "Process.CommandLine", + "Process.Executable", + "Processor", + "Processor.Event", + "Processor.Name", + "Host.OS.Full", + "Host.OS.Type", + "Host.ID", + "Host.IP", + "Host.Type", + "UserAgent", + "UserAgent.Name", + "UserAgent.Original", + "Event", + "Event.Duration", + "Event.Outcome", + "Session.ID", + "Session", + "Session.Sequence", + "Source.Domain", + "Source.IP", + "Source.Port", + "Trace", + "Trace.ID", + "URL", + "URL.Original", + "URL.Scheme", + "URL.Full", + "URL.Domain", + "URL.Port", + "URL.Path", + "URL.Query", + "URL.Fragment": + return true + } + return false +} + +func isEventField(key string) bool { + for _, prefix := range []string{"Error", "Metricset", "ProfileSample", "Span", "Transaction"} { + if key == prefix || strings.HasPrefix(key, prefix+".") { + return true + } + } + return false +} + +func initializedInputMetadata(values *modeldecodertest.Values) (metadata, model.APMEvent) { + var input metadata + var out model.APMEvent + modeldecodertest.SetStructValues(&input, values) + mapToMetadataModel(&input, &out) + modeldecodertest.SetStructValues(&out, values, func(key string, field, value reflect.Value) bool { + return isUnmappedMetadataField(key) || isEventField(key) + }) + return input, out +} + +func TestResetMetadataOnRelease(t *testing.T) { + inp := `{"metadata":{"service":{"name":"service-a"}}}` + m := fetchMetadataRoot() + require.NoError(t, decoder.NewJSONDecoder(strings.NewReader(inp)).Decode(m)) + require.True(t, m.IsSet()) + releaseMetadataRoot(m) + assert.False(t, m.IsSet()) +} + +func TestDecodeMetadata(t *testing.T) { + for _, tc := range []struct { + name string + input string + decodeFn func(decoder.Decoder, *model.APMEvent) error + }{ + {name: "decodeMetadata", decodeFn: DecodeMetadata, + input: `{"service":{"name":"user-service","agent":{"name":"go","version":"1.0.0"}}}`}, + {name: "decodeNestedMetadata", decodeFn: DecodeNestedMetadata, + input: `{"metadata":{"service":{"name":"user-service","agent":{"name":"go","version":"1.0.0"}}}}`}, + } { + t.Run("decode", func(t *testing.T) { + var out model.APMEvent + dec := decoder.NewJSONDecoder(strings.NewReader(tc.input)) + require.NoError(t, tc.decodeFn(dec, &out)) + assert.Equal(t, model.APMEvent{ + Service: model.Service{Name: "user-service"}, + Agent: model.Agent{Name: "go", Version: "1.0.0"}, + }, out) + + err := tc.decodeFn(decoder.NewJSONDecoder(strings.NewReader(`malformed`)), &out) + require.Error(t, err) + assert.Contains(t, err.Error(), "decode") + }) + + t.Run("validate", func(t *testing.T) { + inp := `{}` + var out model.APMEvent + err := tc.decodeFn(decoder.NewJSONDecoder(strings.NewReader(inp)), &out) + require.Error(t, err) + assert.Contains(t, err.Error(), "validation") + }) + } +} + +func TestDecodeMapToMetadataModel(t *testing.T) { + + t.Run("overwrite", func(t *testing.T) { + // setup: + // create initialized modeldecoder and empty model metadata + // map modeldecoder to model metadata and manually set + // enhanced data that are never set by the modeldecoder + defaultVal := modeldecodertest.DefaultValues() + input, out := initializedInputMetadata(defaultVal) + out.Timestamp = defaultVal.Time + + // iterate through model and assert values are set + modeldecodertest.AssertStructValues(t, &out, isMetadataException, defaultVal) + + // overwrite model metadata with specified Values + // then iterate through model and assert values are overwritten + otherVal := modeldecodertest.NonDefaultValues() + // System.IP and Client.IP are not set by decoder, + // therefore their values are not updated + otherVal.Update(defaultVal.IP) + input.Reset() + modeldecodertest.SetStructValues(&input, otherVal) + out.Timestamp = otherVal.Time + mapToMetadataModel(&input, &out) + modeldecodertest.AssertStructValues(t, &out, isMetadataException, otherVal) + + // map an empty modeldecoder metadata to the model + // and assert values are unchanged + input.Reset() + modeldecodertest.SetZeroStructValues(&input) + mapToMetadataModel(&input, &out) + modeldecodertest.AssertStructValues(t, &out, isMetadataException, otherVal) + }) + + t.Run("reused-memory", func(t *testing.T) { + var out2 model.APMEvent + defaultVal := modeldecodertest.DefaultValues() + input, out1 := initializedInputMetadata(defaultVal) + out1.Timestamp = defaultVal.Time + + // iterate through model and assert values are set + modeldecodertest.AssertStructValues(t, &out1, isMetadataException, defaultVal) + + // overwrite model metadata with specified Values + // then iterate through model and assert values are overwritten + otherVal := modeldecodertest.NonDefaultValues() + // System.IP and Client.IP are not set by decoder, + // therefore their values are not updated + otherVal.Update(defaultVal.IP) + input.Reset() + modeldecodertest.SetStructValues(&input, otherVal) + mapToMetadataModel(&input, &out2) + out2.Timestamp = otherVal.Time + out2.Host.IP = defaultVal.IP + out2.Client.IP = defaultVal.IP + out2.Source.IP = defaultVal.IP + modeldecodertest.AssertStructValues(t, &out2, isMetadataException, otherVal) + modeldecodertest.AssertStructValues(t, &out1, isMetadataException, defaultVal) + }) + + t.Run("system", func(t *testing.T) { + var input metadata + var out model.APMEvent + // full input information + modeldecodertest.SetStructValues(&input, modeldecodertest.DefaultValues()) + input.System.ConfiguredHostname.Set("configured-host") + input.System.DetectedHostname.Set("detected-host") + input.System.DeprecatedHostname.Set("deprecated-host") + mapToMetadataModel(&input, &out) + assert.Equal(t, "configured-host", out.Host.Name) + assert.Equal(t, "detected-host", out.Host.Hostname) + // no detected-host information + out = model.APMEvent{} + input.System.DetectedHostname.Reset() + mapToMetadataModel(&input, &out) + assert.Equal(t, "configured-host", out.Host.Name) + assert.Empty(t, out.Host.Hostname) + // no configured-host information + out = model.APMEvent{} + input.System.ConfiguredHostname.Reset() + mapToMetadataModel(&input, &out) + assert.Empty(t, out.Host.Name) + assert.Equal(t, "deprecated-host", out.Host.Hostname) + // no host information given + out = model.APMEvent{} + input.System.DeprecatedHostname.Reset() + assert.Empty(t, out.Host.Name) + assert.Empty(t, out.Host.Hostname) + + }) +} diff --git a/model/modeldecoder/v2/metricset_test.go b/model/modeldecoder/v2/metricset_test.go new file mode 100644 index 00000000000..4493706cec3 --- /dev/null +++ b/model/modeldecoder/v2/metricset_test.go @@ -0,0 +1,254 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/decoder" + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modeldecoder" + "github.com/elastic/apm-server/model/modeldecoder/modeldecodertest" +) + +func TestResetMetricsetOnRelease(t *testing.T) { + inp := `{"metricset":{"samples":{"a.b.":{"value":2048}}}}` + root := fetchMetricsetRoot() + require.NoError(t, decoder.NewJSONDecoder(strings.NewReader(inp)).Decode(root)) + require.True(t, root.IsSet()) + releaseMetricsetRoot(root) + assert.False(t, root.IsSet()) +} + +func TestDecodeNestedMetricset(t *testing.T) { + t.Run("decode", func(t *testing.T) { + now := time.Now() + defaultVal := modeldecodertest.DefaultValues() + _, eventBase := initializedInputMetadata(defaultVal) + eventBase.Timestamp = now + input := modeldecoder.Input{Base: eventBase} + str := `{"metricset":{"timestamp":1599996822281000,"samples":{"a.b":{"value":2048}}}}` + dec := decoder.NewJSONDecoder(strings.NewReader(str)) + var batch model.Batch + require.NoError(t, DecodeNestedMetricset(dec, &input, &batch)) + require.Len(t, batch, 1) + require.NotNil(t, batch[0].Metricset) + assert.Equal(t, map[string]model.MetricsetSample{"a.b": {Value: 2048}}, batch[0].Metricset.Samples) + defaultVal.Update(time.Unix(1599996822, 281000000).UTC()) + modeldecodertest.AssertStructValues(t, &batch[0], isMetadataException, defaultVal) + + // invalid type + err := DecodeNestedMetricset(decoder.NewJSONDecoder(strings.NewReader(`malformed`)), &input, &batch) + require.Error(t, err) + assert.Contains(t, err.Error(), "decode") + }) + + t.Run("validate", func(t *testing.T) { + var batch model.Batch + err := DecodeNestedMetricset(decoder.NewJSONDecoder(strings.NewReader(`{}`)), &modeldecoder.Input{}, &batch) + require.Error(t, err) + assert.Contains(t, err.Error(), "validation") + }) +} + +func TestDecodeMapToMetricsetModel(t *testing.T) { + exceptions := func(key string) bool { + if key == "DocCount" || + key == "Name" || + key == "TimeseriesInstanceID" || + // test Samples separately + strings.HasPrefix(key, "Samples") { + return true + } + return false + } + + t.Run("metricset-values", func(t *testing.T) { + var input metricset + var out1, out2 model.APMEvent + now := time.Now().Add(time.Second) + defaultVal := modeldecodertest.DefaultValues() + modeldecodertest.SetStructValues(&input, defaultVal) + input.Transaction.Reset() // tested by TestDecodeMetricsetInternal + + mapToMetricsetModel(&input, &out1) + input.Reset() + modeldecodertest.AssertStructValues(t, out1.Metricset, exceptions, defaultVal) + defaultSamples := map[string]model.MetricsetSample{ + defaultVal.Str + "0": { + Type: model.MetricType(defaultVal.Str), + Unit: defaultVal.Str, + Value: defaultVal.Float, + Histogram: model.Histogram{ + Counts: repeatInt64(int64(defaultVal.Int), defaultVal.N), + Values: repeatFloat64(defaultVal.Float, defaultVal.N), + }, + }, + defaultVal.Str + "1": { + Type: model.MetricType(defaultVal.Str), + Unit: defaultVal.Str, + Value: defaultVal.Float, + Histogram: model.Histogram{ + Counts: repeatInt64(int64(defaultVal.Int), defaultVal.N), + Values: repeatFloat64(defaultVal.Float, defaultVal.N), + }, + }, + defaultVal.Str + "2": { + Type: model.MetricType(defaultVal.Str), + Unit: defaultVal.Str, + Value: defaultVal.Float, + Histogram: model.Histogram{ + Counts: repeatInt64(int64(defaultVal.Int), defaultVal.N), + Values: repeatFloat64(defaultVal.Float, defaultVal.N), + }, + }, + } + assert.Equal(t, defaultSamples, out1.Metricset.Samples) + + // leave Timestamp unmodified if eventTime is zero + out1.Timestamp = now + defaultVal.Update(time.Time{}) + modeldecodertest.SetStructValues(&input, defaultVal) + input.Transaction.Reset() + mapToMetricsetModel(&input, &out1) + defaultVal.Update(now) + input.Reset() + modeldecodertest.AssertStructValues(t, out1.Metricset, exceptions, defaultVal) + + // ensure memory is not shared by reusing input model + otherVal := modeldecodertest.NonDefaultValues() + modeldecodertest.SetStructValues(&input, otherVal) + input.Transaction.Reset() + mapToMetricsetModel(&input, &out2) + modeldecodertest.AssertStructValues(t, out2.Metricset, exceptions, otherVal) + otherSamples := map[string]model.MetricsetSample{ + otherVal.Str + "0": { + Type: model.MetricType(otherVal.Str), + Unit: otherVal.Str, + Value: otherVal.Float, + Histogram: model.Histogram{ + Counts: repeatInt64(int64(otherVal.Int), otherVal.N), + Values: repeatFloat64(otherVal.Float, otherVal.N), + }, + }, + otherVal.Str + "1": { + Type: model.MetricType(otherVal.Str), + Unit: otherVal.Str, + Value: otherVal.Float, + Histogram: model.Histogram{ + Counts: repeatInt64(int64(otherVal.Int), otherVal.N), + Values: repeatFloat64(otherVal.Float, otherVal.N), + }, + }, + } + assert.Equal(t, otherSamples, out2.Metricset.Samples) + modeldecodertest.AssertStructValues(t, out1.Metricset, exceptions, defaultVal) + assert.Equal(t, defaultSamples, out1.Metricset.Samples) + }) +} + +func TestDecodeMetricsetInternal(t *testing.T) { + var batch model.Batch + + err := DecodeNestedMetricset(decoder.NewJSONDecoder(strings.NewReader(`{ + "metricset": { + "timestamp": 0, + "samples": { + "transaction.breakdown.count": {"value": 123}, + "transaction.duration.count": {"value": 456}, + "transaction.duration.sum.us": {"value": 789} + }, + "transaction": { + "name": "transaction_name", + "type": "transaction_type" + } + } + }`)), &modeldecoder.Input{}, &batch) + require.NoError(t, err) + + err = DecodeNestedMetricset(decoder.NewJSONDecoder(strings.NewReader(`{ + "metricset": { + "timestamp": 0, + "samples": { + "span.self_time.count": {"value": 123}, + "span.self_time.sum.us": {"value": 456} + }, + "transaction": { + "name": "transaction_name", + "type": "transaction_type" + }, + "span": { + "type": "span_type", + "subtype": "span_subtype" + } + } + }`)), &modeldecoder.Input{}, &batch) + require.NoError(t, err) + + assert.Equal(t, model.Batch{{ + Timestamp: time.Unix(0, 0).UTC(), + Processor: model.MetricsetProcessor, + Metricset: &model.Metricset{}, + Transaction: &model.Transaction{ + Name: "transaction_name", + Type: "transaction_type", + BreakdownCount: 123, + AggregatedDuration: model.AggregatedDuration{ + Count: 456, + Sum: 789 * time.Microsecond, + }, + }, + }, { + Timestamp: time.Unix(0, 0).UTC(), + Processor: model.MetricsetProcessor, + Metricset: &model.Metricset{}, + Transaction: &model.Transaction{ + Name: "transaction_name", + Type: "transaction_type", + }, + Span: &model.Span{ + Type: "span_type", + Subtype: "span_subtype", + SelfTime: model.AggregatedDuration{ + Count: 123, + Sum: 456 * time.Microsecond, + }, + }, + }}, batch) +} + +func repeatInt64(v int64, n int) []int64 { + vs := make([]int64, n) + for i := range vs { + vs[i] = v + } + return vs +} + +func repeatFloat64(v float64, n int) []float64 { + vs := make([]float64, n) + for i := range vs { + vs[i] = v + } + return vs +} diff --git a/model/modeldecoder/v2/model.go b/model/modeldecoder/v2/model.go index a8003993bac..ea304807d25 100644 --- a/model/modeldecoder/v2/model.go +++ b/model/modeldecoder/v2/model.go @@ -18,7 +18,7 @@ package v2 import ( - "regexp" + "encoding/json" "github.com/elastic/beats/v7/libbeat/common" @@ -26,129 +26,883 @@ import ( ) var ( - alphaNumericExtRegex = regexp.MustCompile("^[a-zA-Z0-9 _-]+$") - labelsRegex = regexp.MustCompile("^[^.*\"]*$") //do not allow '.' '*' '"' + patternAlphaNumericExt = `^[a-zA-Z0-9 _-]+$` + patternNoAsteriskQuote = `^[^*"]*$` //do not allow '*' '"' + + enumOutcome = []string{"success", "failure", "unknown"} ) +// entry points + +// errorRoot requires an error event to be present +type errorRoot struct { + Error errorEvent `json:"error" validate:"required"` +} + +// metadatatRoot requires a metadata event to be present type metadataRoot struct { Metadata metadata `json:"metadata" validate:"required"` } +// metricsetRoot requires a metricset event to be present +type metricsetRoot struct { + Metricset metricset `json:"metricset" validate:"required"` +} + +// spanRoot requires a span event to be present +type spanRoot struct { + Span span `json:"span" validate:"required"` +} + +// transactionRoot requires a transaction event to be present +type transactionRoot struct { + Transaction transaction `json:"transaction" validate:"required"` +} + +// other structs + +type context struct { + // Custom can contain additional metadata to be stored with the event. + // The format is unspecified and can be deeply nested objects. + // The information will not be indexed or searchable in Elasticsearch. + Custom common.MapStr `json:"custom"` + // Message holds details related to message receiving and publishing + // if the captured event integrates with a messaging system + Message contextMessage `json:"message"` + // Page holds information related to the current page and page referers. + // It is only sent from RUM agents. + Page contextPage `json:"page"` + // Response describes the HTTP response information in case the event was + // created as a result of an HTTP request. + Response contextResponse `json:"response"` + // Request describes the HTTP request information in case the event was + // created as a result of an HTTP request. + Request contextRequest `json:"request"` + // Service related information can be sent per event. Information provided + // here will override the more generic information retrieved from metadata, + // missing service fields will be retrieved from the metadata information. + Service contextService `json:"service"` + // Tags are a flat mapping of user-defined tags. On the agent side, tags + // are called labels. Allowed value types are string, boolean and number + // values. Tags are indexed and searchable. + Tags common.MapStr `json:"tags" validate:"inputTypesVals=string;bool;number,maxLengthVals=1024"` + // User holds information about the correlated user for this event. If + // user data are provided here, all user related information from metadata + // is ignored, otherwise the metadata's user information will be stored + // with the event. + User user `json:"user"` +} + +type contextMessage struct { + // Age of the message. If the monitored messaging framework provides a + // timestamp for the message, agents may use it. Otherwise, the sending + // agent can add a timestamp in milliseconds since the Unix epoch to the + // message's metadata to be retrieved by the receiving agent. If a + // timestamp is not available, agents should omit this field. + Age contextMessageAge `json:"age"` + // Body of the received message, similar to an HTTP request body + Body nullable.String `json:"body"` + // Headers received with the message, similar to HTTP request headers. + Headers nullable.HTTPHeader `json:"headers"` + // Queue holds information about the message queue where the message is received. + Queue contextMessageQueue `json:"queue"` +} + +type contextMessageAge struct { + // Age of the message in milliseconds. + Milliseconds nullable.Int `json:"ms"` +} + +type contextMessageQueue struct { + // Name holds the name of the message queue where the message is received. + Name nullable.String `json:"name" validate:"maxLength=1024"` +} + +type contextPage struct { + // Referer holds the URL of the page that 'linked' to the current page. + Referer nullable.String `json:"referer"` + // URL of the current page + URL nullable.String `json:"url"` +} + +type contextRequest struct { + // Body only contais the request bod, not the query string information. + // It can either be a dictionary (for standard HTTP requests) or a raw + // request body. + Body nullable.Interface `json:"body" validate:"inputTypes=string;object"` + // Cookies used by the request, parsed as key-value objects. + Cookies common.MapStr `json:"cookies"` + // Env holds environment variable information passed to the monitored service. + Env common.MapStr `json:"env"` + // Headers includes any HTTP headers sent by the requester. Cookies will + // be taken by headers if supplied. + Headers nullable.HTTPHeader `json:"headers"` + // HTTPVersion holds information about the used HTTP version. + HTTPVersion nullable.String `json:"http_version" validate:"maxLength=1024"` + // Method holds information about the method of the HTTP request. + Method nullable.String `json:"method" validate:"required,maxLength=1024"` + // Socket holds information related to the recorded request, + // such as whether or not data were encrypted and the remote address. + Socket contextRequestSocket `json:"socket"` + // URL holds information sucha as the raw URL, scheme, host and path. + URL contextRequestURL `json:"url"` +} + +type contextRequestURL struct { + // Full, possibly agent-assembled URL of the request, + // e.g. https://example.com:443/search?q=elasticsearch#top. + Full nullable.String `json:"full" validate:"maxLength=1024"` + // Hash of the request URL, e.g. 'top' + Hash nullable.String `json:"hash" validate:"maxLength=1024"` + // Hostname information of the request, e.g. 'example.com'." + Hostname nullable.String `json:"hostname" validate:"maxLength=1024"` + // Path of the request, e.g. '/search' + Path nullable.String `json:"pathname" validate:"maxLength=1024"` + // Port of the request, e.g. '443'. Can be sent as string or int. + Port nullable.Interface `json:"port" validate:"inputTypes=string;int,targetType=int,maxLength=1024"` + // Protocol information for the recorded request, e.g. 'https:'. + Protocol nullable.String `json:"protocol" validate:"maxLength=1024"` + // Raw unparsed URL of the HTTP request line, + // e.g https://example.com:443/search?q=elasticsearch. This URL may be + // absolute or relative. For more details, see + // https://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2. + Raw nullable.String `json:"raw" validate:"maxLength=1024"` + // Search contains the query string information of the request. It is + // expected to have values delimited by ampersands. + Search nullable.String `json:"search" validate:"maxLength=1024"` +} + +type contextRequestSocket struct { + // Encrypted indicates whether a request was sent as TLS/HTTPS request. + // DEPRECATED: this field will be removed in a future release. + Encrypted nullable.Bool `json:"encrypted"` + // RemoteAddress holds the network address sending the request. It should + // be obtained through standard APIs and not be parsed from any headers + // like 'Forwarded'. + RemoteAddress nullable.String `json:"remote_address"` +} + +type contextResponse struct { + // DecodedBodySize holds the size of the decoded payload. + DecodedBodySize nullable.Float64 `json:"decoded_body_size"` + // EncodedBodySize holds the size of the encoded payload. + EncodedBodySize nullable.Float64 `json:"encoded_body_size"` + // Finished indicates whether the response was finished or not. + Finished nullable.Bool `json:"finished"` + // Headers holds the http headers sent in the http response. + Headers nullable.HTTPHeader `json:"headers"` + // HeadersSent indicates whether http headers were sent. + HeadersSent nullable.Bool `json:"headers_sent"` + // StatusCode sent in the http response. + StatusCode nullable.Int `json:"status_code"` + // TransferSize holds the total size of the payload. + TransferSize nullable.Float64 `json:"transfer_size"` +} + +type contextService struct { + // Agent holds information about the APM agent capturing the event. + Agent contextServiceAgent `json:"agent"` + // Environment in which the monitored service is running, + // e.g. `production` or `staging`. + Environment nullable.String `json:"environment" validate:"maxLength=1024"` + // Framework holds information about the framework used in the + // monitored service. + Framework contextServiceFramework `json:"framework"` + // Language holds information about the programming language of the + // monitored service. + Language contextServiceLanguage `json:"language"` + // Name of the monitored service. + Name nullable.String `json:"name" validate:"maxLength=1024,pattern=patternAlphaNumericExt"` + // Node must be a unique meaningful name of the service node. + Node contextServiceNode `json:"node"` + // Runtime holds information about the language runtime running the + // monitored service + Runtime contextServiceRuntime `json:"runtime"` + // Version of the monitored service. + Version nullable.String `json:"version" validate:"maxLength=1024"` +} + +type contextServiceAgent struct { + // EphemeralID is a free format ID used for metrics correlation by agents + EphemeralID nullable.String `json:"ephemeral_id" validate:"maxLength=1024"` + // Name of the APM agent capturing information. + Name nullable.String `json:"name" validate:"maxLength=1024"` + // Version of the APM agent capturing information. + Version nullable.String `json:"version" validate:"maxLength=1024"` +} + +type contextServiceFramework struct { + // Name of the used framework + Name nullable.String `json:"name" validate:"maxLength=1024"` + // Version of the used framework + Version nullable.String `json:"version" validate:"maxLength=1024"` +} + +type contextServiceLanguage struct { + // Name of the used programming language + Name nullable.String `json:"name" validate:"maxLength=1024"` + // Version of the used programming language + Version nullable.String `json:"version" validate:"maxLength=1024"` +} + +type contextServiceNode struct { + // Name of the service node + Name nullable.String `json:"configured_name" validate:"maxLength=1024"` +} + +type contextServiceRuntime struct { + // Name of the language runtime + Name nullable.String `json:"name" validate:"maxLength=1024"` + // Version of the language runtime + Version nullable.String `json:"version" validate:"maxLength=1024"` +} + +// errorEvent represents an error or a logged error message, +// captured by an APM agent in a monitored service. +type errorEvent struct { + // Context holds arbitrary contextual information for the event. + Context context `json:"context"` + // Culprit identifies the function call which was the primary perpetrator + // of this event. + Culprit nullable.String `json:"culprit" validate:"maxLength=1024"` + // Exception holds information about the original error. + // The information is language specific. + Exception errorException `json:"exception"` + // ID holds the hex encoded 128 random bits ID of the event. + ID nullable.String `json:"id" validate:"required,maxLength=1024"` + // Log holds additional information added when the error is logged. + Log errorLog `json:"log"` + // ParentID holds the hex encoded 64 random bits ID of the parent + // transaction or span. + ParentID nullable.String `json:"parent_id" validate:"requiredIfAny=transaction_id;trace_id,maxLength=1024"` + // Timestamp holds the recorded time of the event, UTC based and formatted + // as microseconds since Unix epoch. + Timestamp nullable.TimeMicrosUnix `json:"timestamp"` + // TraceID holds the hex encoded 128 random bits ID of the correlated trace. + TraceID nullable.String `json:"trace_id" validate:"requiredIfAny=transaction_id;parent_id,maxLength=1024"` + // Transaction holds information about the correlated transaction. + Transaction errorTransactionRef `json:"transaction"` + // TransactionID holds the hex encoded 64 random bits ID of the correlated + // transaction. + TransactionID nullable.String `json:"transaction_id" validate:"maxLength=1024"` + _ struct{} `validate:"requiredAnyOf=exception;log"` +} + +type errorException struct { + // Attributes of the exception. + Attributes common.MapStr `json:"attributes"` + // Code that is set when the error happened, e.g. database error code. + Code nullable.Interface `json:"code" validate:"inputTypes=string;int,maxLength=1024"` + // Cause can hold a collection of error exceptions representing chained + // exceptions. The chain starts with the outermost exception, followed + // by its cause, and so on. + Cause []errorException `json:"cause"` + // Handled indicates whether the error was caught in the code or not. + Handled nullable.Bool `json:"handled"` + // Message contains the originally captured error message. + Message nullable.String `json:"message"` + // Module describes the exception type's module namespace. + Module nullable.String `json:"module" validate:"maxLength=1024"` + // Stacktrace information of the captured exception. + Stacktrace []stacktraceFrame `json:"stacktrace"` + // Type of the exception. + Type nullable.String `json:"type" validate:"maxLength=1024"` + _ struct{} `validate:"requiredAnyOf=message;type"` +} + +type errorLog struct { + // Level represents the severity of the recorded log. + Level nullable.String `json:"level" validate:"maxLength=1024"` + // LoggerName holds the name of the used logger instance. + LoggerName nullable.String `json:"logger_name" validate:"maxLength=1024"` + // Message of the logged error. In case a parameterized message is captured, + // Message should contain the same information, but with any placeholders + // being replaced. + Message nullable.String `json:"message" validate:"required"` + // ParamMessage should contain the same information as Message, but with + // placeholders where parameters were logged, e.g. 'error connecting to %s'. + // The string is not interpreted, allowing differnt placeholders per client + // languange. The information might be used to group errors together. + ParamMessage nullable.String `json:"param_message" validate:"maxLength=1024"` + // Stacktrace information of the captured error. + Stacktrace []stacktraceFrame `json:"stacktrace"` +} + +type errorTransactionRef struct { + // Sampled indicates whether or not the full information for a transaction + // is captured. If a transaction is unsampled no spans and less context + // information will be reported. + Sampled nullable.Bool `json:"sampled"` + // Type expresses the correlated transaction's type as keyword that has + // specific relevance within the service's domain, + // eg: 'request', 'backgroundjob'. + Type nullable.String `json:"type" validate:"maxLength=1024"` +} + type metadata struct { - Cloud metadataCloud `json:"cloud"` - Labels common.MapStr `json:"labels" validate:"patternKeys=labelsRegex,typesVals=string;bool;number,maxVals=1024"` + // Cloud metadata about where the monitored service is running. + Cloud metadataCloud `json:"cloud"` + // Labels are a flat mapping of user-defined tags. Allowed value types are + // string, boolean and number values. Labels are indexed and searchable. + Labels common.MapStr `json:"labels" validate:"inputTypesVals=string;bool;number,maxLengthVals=1024"` + // Process metadata about the monitored service. Process metadataProcess `json:"process"` + // Service metadata about the monitored service. Service metadataService `json:"service" validate:"required"` - System metadataSystem `json:"system"` - User metadataUser `json:"user"` + // System metadata + System metadataSystem `json:"system"` + // User metadata, which can be overwritten on a per event basis. + User user `json:"user"` + // Network holds information about the network over which the + // monitored service is communicating. + Network network `json:"network"` } type metadataCloud struct { - Account metadataCloudAccount `json:"account"` - AvailabilityZone nullable.String `json:"availability_zone" validate:"max=1024"` - Instance metadataCloudInstance `json:"instance"` - Machine metadataCloudMachine `json:"machine"` - Project metadataCloudProject `json:"project"` - Provider nullable.String `json:"provider" validate:"required,max=1024"` - Region nullable.String `json:"region" validate:"max=1024"` + // Account where the monitored service is running. + Account metadataCloudAccount `json:"account"` + // AvailabilityZone where the monitored service is running, e.g. us-east-1a + AvailabilityZone nullable.String `json:"availability_zone" validate:"maxLength=1024"` + // Instance on which the monitored service is running. + Instance metadataCloudInstance `json:"instance"` + // Machine on which the monitored service is running. + Machine metadataCloudMachine `json:"machine"` + // Project in which the monitored service is running. + Project metadataCloudProject `json:"project"` + // Provider that is used, e.g. aws, azure, gcp, digitalocean. + Provider nullable.String `json:"provider" validate:"required,maxLength=1024"` + // Region where the monitored service is running, e.g. us-east-1 + Region nullable.String `json:"region" validate:"maxLength=1024"` + // Service that is monitored on cloud + Service metadataCloudService `json:"service"` } type metadataCloudAccount struct { - ID nullable.String `json:"id" validate:"max=1024"` - Name nullable.String `json:"name" validate:"max=1024"` + // ID of the cloud account. + ID nullable.String `json:"id" validate:"maxLength=1024"` + // Name of the cloud account. + Name nullable.String `json:"name" validate:"maxLength=1024"` } type metadataCloudInstance struct { - ID nullable.String `json:"id" validate:"max=1024"` - Name nullable.String `json:"name" validate:"max=1024"` + // ID of the cloud instance. + ID nullable.String `json:"id" validate:"maxLength=1024"` + // Name of the cloud instance. + Name nullable.String `json:"name" validate:"maxLength=1024"` } type metadataCloudMachine struct { - Type nullable.String `json:"type" validate:"max=1024"` + // ID of the cloud machine. + Type nullable.String `json:"type" validate:"maxLength=1024"` } type metadataCloudProject struct { - ID nullable.String `json:"id" validate:"max=1024"` - Name nullable.String `json:"name" validate:"max=1024"` + // ID of the cloud project. + ID nullable.String `json:"id" validate:"maxLength=1024"` + // Name of the cloud project. + Name nullable.String `json:"name" validate:"maxLength=1024"` +} + +type metadataCloudService struct { + // Name of the cloud service, intended to distinguish services running on + // different platforms within a provider, eg AWS EC2 vs Lambda, + // GCP GCE vs App Engine, Azure VM vs App Server. + Name nullable.String `json:"name" validate:"maxLength=1024"` } type metadataProcess struct { - Argv []string `json:"argv"` - Pid nullable.Int `json:"pid" validate:"required"` - Ppid nullable.Int `json:"ppid"` - Title nullable.String `json:"title" validate:"max=1024"` + // Argv holds the command line arguments used to start this process. + Argv []string `json:"argv"` + // PID holds the process ID of the service. + Pid nullable.Int `json:"pid" validate:"required"` + // Ppid holds the parent process ID of the service. + Ppid nullable.Int `json:"ppid"` + // Title is the process title. It can be the same as process name. + Title nullable.String `json:"title" validate:"maxLength=1024"` } type metadataService struct { - Agent metadataServiceAgent `json:"agent" validate:"required"` - Environment nullable.String `json:"environment" validate:"max=1024"` - Framework metadataServiceFramework `json:"framework"` - Language metadataServiceLanguage `json:"language"` - Name nullable.String `json:"name" validate:"required,max=1024,pattern=alphaNumericExtRegex"` - Node metadataServiceNode `json:"node"` - Runtime metadataServiceRuntime `json:"runtime"` - Version nullable.String `json:"version" validate:"max=1024"` + // Agent holds information about the APM agent capturing the event. + Agent metadataServiceAgent `json:"agent" validate:"required"` + // Environment in which the monitored service is running, + // e.g. `production` or `staging`. + Environment nullable.String `json:"environment" validate:"maxLength=1024"` + // Framework holds information about the framework used in the + // monitored service. + Framework metadataServiceFramework `json:"framework"` + // Language holds information about the programming language of the + // monitored service. + Language metadataServiceLanguage `json:"language"` + // Name of the monitored service. + Name nullable.String `json:"name" validate:"required,minLength=1,maxLength=1024,pattern=patternAlphaNumericExt"` + // Node must be a unique meaningful name of the service node. + Node metadataServiceNode `json:"node"` + // Runtime holds information about the language runtime running the + // monitored service + Runtime metadataServiceRuntime `json:"runtime"` + // Version of the monitored service. + Version nullable.String `json:"version" validate:"maxLength=1024"` } type metadataServiceAgent struct { - EphemeralID nullable.String `json:"ephemeral_id" validate:"max=1024"` - Name nullable.String `json:"name" validate:"required,max=1024"` - Version nullable.String `json:"version" validate:"required,max=1024"` + // EphemeralID is a free format ID used for metrics correlation by agents + EphemeralID nullable.String `json:"ephemeral_id" validate:"maxLength=1024"` + // Name of the APM agent capturing information. + Name nullable.String `json:"name" validate:"required,minLength=1,maxLength=1024"` + // Version of the APM agent capturing information. + Version nullable.String `json:"version" validate:"required,maxLength=1024"` } type metadataServiceFramework struct { - Name nullable.String `json:"name" validate:"max=1024"` - Version nullable.String `json:"version" validate:"max=1024"` + // Name of the used framework + Name nullable.String `json:"name" validate:"maxLength=1024"` + // Version of the used framework + Version nullable.String `json:"version" validate:"maxLength=1024"` } type metadataServiceLanguage struct { - Name nullable.String `json:"name" validate:"required,max=1024"` - Version nullable.String `json:"version" validate:"max=1024"` + // Name of the used programming language + Name nullable.String `json:"name" validate:"required,maxLength=1024"` + // Version of the used programming language + Version nullable.String `json:"version" validate:"maxLength=1024"` } type metadataServiceNode struct { - Name nullable.String `json:"configured_name" validate:"max=1024"` + // Name of the service node + Name nullable.String `json:"configured_name" validate:"maxLength=1024"` } type metadataServiceRuntime struct { - Name nullable.String `json:"name" validate:"required,max=1024"` - Version nullable.String `json:"version" validate:"required,max=1024"` + // Name of the language runtime + Name nullable.String `json:"name" validate:"required,maxLength=1024"` + // Name of the language runtime + Version nullable.String `json:"version" validate:"required,maxLength=1024"` } type metadataSystem struct { - Architecture nullable.String `json:"architecture" validate:"max=1024"` - ConfiguredHostname nullable.String `json:"configured_hostname" validate:"max=1024"` - Container metadataSystemContainer `json:"container"` - DetectedHostname nullable.String `json:"detected_hostname" validate:"max=1024"` - HostnameDeprecated nullable.String `json:"hostname" validate:"max=1024"` - Kubernetes metadataSystemKubernetes `json:"kubernetes"` - Platform nullable.String `json:"platform" validate:"max=1024"` + // Architecture of the system the monitored service is running on. + Architecture nullable.String `json:"architecture" validate:"maxLength=1024"` + // ConfiguredHostname is the configured name of the host the monitored + // service is running on. It should only be sent when configured by the + // user. If given, it is used as the event's hostname. + ConfiguredHostname nullable.String `json:"configured_hostname" validate:"maxLength=1024"` + // Container holds the system's container ID if available. + Container metadataSystemContainer `json:"container"` + // DetectedHostname is the hostname detected by the APM agent. It usually + // contains what the hostname command returns on the host machine. + // It will be used as the event's hostname if ConfiguredHostname is not present. + DetectedHostname nullable.String `json:"detected_hostname" validate:"maxLength=1024"` + // Deprecated: Use ConfiguredHostname and DetectedHostname instead. + // DeprecatedHostname is the host name of the system the service is + // running on. It does not distinguish between configured and detected + // hostname and therefore is deprecated and only used if no other hostname + // information is available. + DeprecatedHostname nullable.String `json:"hostname" validate:"maxLength=1024"` + // Kubernetes system information if the monitored service runs on Kubernetes. + Kubernetes metadataSystemKubernetes `json:"kubernetes"` + // Platform name of the system platform the monitored service is running on. + Platform nullable.String `json:"platform" validate:"maxLength=1024"` } type metadataSystemContainer struct { - // `id` is the only field in `system.container`, - // if `system.container:{}` is sent, it should be considered valid - // if additional attributes are defined in the future, add the required tag - ID nullable.String `json:"id"` //validate:"required" + // ID of the container the monitored service is running in. + ID nullable.String `json:"id" validate:"maxLength=1024"` } type metadataSystemKubernetes struct { - Namespace nullable.String `json:"namespace" validate:"max=1024"` - Node metadataSystemKubernetesNode `json:"node"` - Pod metadataSystemKubernetesPod `json:"pod"` + // Namespace of the Kubernetes resource the monitored service is run on. + Namespace nullable.String `json:"namespace" validate:"maxLength=1024"` + // Node related information + Node metadataSystemKubernetesNode `json:"node"` + // Pod related information + Pod metadataSystemKubernetesPod `json:"pod"` } type metadataSystemKubernetesNode struct { - Name nullable.String `json:"name" validate:"max=1024"` + // Name of the Kubernetes Node + Name nullable.String `json:"name" validate:"maxLength=1024"` } type metadataSystemKubernetesPod struct { - Name nullable.String `json:"name" validate:"max=1024"` - UID nullable.String `json:"uid" validate:"max=1024"` + // Name of the Kubernetes Pod + Name nullable.String `json:"name" validate:"maxLength=1024"` + // UID is the system-generated string uniquely identifying the Pod. + UID nullable.String `json:"uid" validate:"maxLength=1024"` +} + +type network struct { + Connection networkConnection `json:"connection"` +} + +type networkConnection struct { + Type nullable.String `json:"type" validate:"maxLength=1024"` +} + +type metricset struct { + // Timestamp holds the recorded time of the event, UTC based and formatted + // as microseconds since Unix epoch + Timestamp nullable.TimeMicrosUnix `json:"timestamp"` + // Samples hold application metrics collected from the agent. + Samples map[string]metricsetSampleValue `json:"samples" validate:"required,patternKeys=patternNoAsteriskQuote"` + // Span holds selected information about the correlated transaction. + Span metricsetSpanRef `json:"span"` + // Tags are a flat mapping of user-defined tags. On the agent side, tags + // are called labels. Allowed value types are string, boolean and number + // values. Tags are indexed and searchable. + Tags common.MapStr `json:"tags" validate:"inputTypesVals=string;bool;number,maxLengthVals=1024"` + // Transaction holds selected information about the correlated transaction. + Transaction metricsetTransactionRef `json:"transaction"` +} + +type metricsetSampleValue struct { + // Type holds an optional metric type: gauge, counter, or histogram. + // + // If Type is unknown, it will be ignored. + Type nullable.String `json:"type"` + + // Unit holds an optional unit for the metric. + // + // - "percent" (value is in the range [0,1]) + // - "byte" + // - a time unit: "nanos", "micros", "ms", "s", "m", "h", "d" + // + // If Unit is unknown, it will be ignored. + Unit nullable.String `json:"unit"` + + // Value holds the value of a single metric sample. + Value nullable.Float64 `json:"value"` + + // Values holds the bucket values for histogram metrics. + // + // Values must be provided in ascending order; failure to do + // so will result in the metric being discarded. + Values []float64 `json:"values" validate:"requiredIfAny=counts"` + + // Counts holds the bucket counts for histogram metrics. + // + // These numbers must be positive or zero. + // + // If Counts is specified, then Values is expected to be + // specified with the same number of elements, and with the + // same order. + Counts []int64 `json:"counts" validate:"requiredIfAny=values,minVals=0"` + + // At least one of value or values must be specified. + _ struct{} `validate:"requiredAnyOf=value;values"` +} + +type metricsetSpanRef struct { + // Subtype is a further sub-division of the type (e.g. postgresql, elasticsearch) + Subtype nullable.String `json:"subtype" validate:"maxLength=1024"` + // Type expresses the correlated span's type as keyword that has specific + // relevance within the service's domain, eg: 'request', 'backgroundjob'. + Type nullable.String `json:"type" validate:"maxLength=1024"` +} + +type metricsetTransactionRef struct { + // Name of the correlated transaction. + Name nullable.String `json:"name" validate:"maxLength=1024"` + // Type expresses the correlated transaction's type as keyword that has specific + // relevance within the service's domain, eg: 'request', 'backgroundjob'. + Type nullable.String `json:"type" validate:"maxLength=1024"` +} + +type span struct { + // Action holds the specific kind of event within the sub-type represented + // by the span (e.g. query, connect) + Action nullable.String `json:"action" validate:"maxLength=1024"` + // ChildIDs holds a list of successor transactions and/or spans. + ChildIDs []string `json:"child_ids" validate:"maxLength=1024"` + // Composite holds details on a group of spans represented by a single one. + Composite spanComposite `json:"composite"` + // Context holds arbitrary contextual information for the event. + Context spanContext `json:"context"` + // Duration of the span in milliseconds. When the span is a composite one, + // duration is the gross duration, including "whitespace" in between spans. + Duration nullable.Float64 `json:"duration" validate:"required,min=0"` + // ID holds the hex encoded 64 random bits ID of the event. + ID nullable.String `json:"id" validate:"required,maxLength=1024"` + // Name is the generic designation of a span in the scope of a transaction. + Name nullable.String `json:"name" validate:"required,maxLength=1024"` + // Outcome of the span: success, failure, or unknown. Outcome may be one of + // a limited set of permitted values describing the success or failure of + // the span. It can be used for calculating error rates for outgoing requests. + Outcome nullable.String `json:"outcome" validate:"enum=enumOutcome"` + // ParentID holds the hex encoded 64 random bits ID of the parent + // transaction or span. + ParentID nullable.String `json:"parent_id" validate:"required,maxLength=1024"` + // SampleRate applied to the monitored service at the time where this span + // was recorded. + SampleRate nullable.Float64 `json:"sample_rate"` + // Stacktrace connected to this span event. + Stacktrace []stacktraceFrame `json:"stacktrace"` + // Start is the offset relative to the transaction's timestamp identifying + // the start of the span, in milliseconds. + Start nullable.Float64 `json:"start"` + // Subtype is a further sub-division of the type (e.g. postgresql, elasticsearch) + Subtype nullable.String `json:"subtype" validate:"maxLength=1024"` + // Sync indicates whether the span was executed synchronously or asynchronously. + Sync nullable.Bool `json:"sync"` + // Timestamp holds the recorded time of the event, UTC based and formatted + // as microseconds since Unix epoch + Timestamp nullable.TimeMicrosUnix `json:"timestamp"` + // TraceID holds the hex encoded 128 random bits ID of the correlated trace. + TraceID nullable.String `json:"trace_id" validate:"required,maxLength=1024"` + // TransactionID holds the hex encoded 64 random bits ID of the correlated + // transaction. + TransactionID nullable.String `json:"transaction_id" validate:"maxLength=1024"` + // Type holds the span's type, and can have specific keywords + // within the service's domain (eg: 'request', 'backgroundjob', etc) + Type nullable.String `json:"type" validate:"required,maxLength=1024"` + _ struct{} `validate:"requiredAnyOf=start;timestamp"` +} + +type spanContext struct { + // Database contains contextual data for database spans + Database spanContextDatabase `json:"db"` + // Destination contains contextual data about the destination of spans + Destination spanContextDestination `json:"destination"` + // HTTP contains contextual information when the span concerns an HTTP request. + HTTP spanContextHTTP `json:"http"` + // Message holds details related to message receiving and publishing + // if the captured event integrates with a messaging system + Message contextMessage `json:"message"` + // Service related information can be sent per span. Information provided + // here will override the more generic information retrieved from metadata, + // missing service fields will be retrieved from the metadata information. + Service contextService `json:"service"` + // Tags are a flat mapping of user-defined tags. On the agent side, tags + // are called labels. Allowed value types are string, boolean and number + // values. Tags are indexed and searchable. + Tags common.MapStr `json:"tags" validate:"inputTypesVals=string;bool;number,maxLengthVals=1024"` +} + +type spanContextDatabase struct { + // Instance name of the database. + Instance nullable.String `json:"instance"` + // Link to the database server. + Link nullable.String `json:"link" validate:"maxLength=1024"` + // RowsAffected shows the number of rows affected by the statement. + RowsAffected nullable.Int `json:"rows_affected"` + // Statement of the recorded database event, e.g. query. + Statement nullable.String `json:"statement"` + // Type of the recorded database event., e.g. sql, cassandra, hbase, redis. + Type nullable.String `json:"type"` + // User is the username with which the database is accessed. + User nullable.String `json:"user"` +} + +type spanContextDestination struct { + // Address is the destination network address: + // hostname (e.g. 'localhost'), + // FQDN (e.g. 'elastic.co'), + // IPv4 (e.g. '127.0.0.1') + // IPv6 (e.g. '::1') + Address nullable.String `json:"address" validate:"maxLength=1024"` + // Port is the destination network port (e.g. 443) + Port nullable.Int `json:"port"` + // Service describes the destination service + Service spanContextDestinationService `json:"service"` +} + +type spanContextDestinationService struct { + // Name is the identifier for the destination service, + // e.g. 'http://elastic.co', 'elasticsearch', 'rabbitmq' ( + // DEPRECATED: this field will be removed in a future release + Name nullable.String `json:"name" validate:"maxLength=1024"` + // Resource identifies the destination service resource being operated on + // e.g. 'http://elastic.co:80', 'elasticsearch', 'rabbitmq/queue_name' + Resource nullable.String `json:"resource" validate:"required,maxLength=1024"` + // Type of the destination service, e.g. db, elasticsearch. Should + // typically be the same as span.type. + // DEPRECATED: this field will be removed in a future release + Type nullable.String `json:"type" validate:"maxLength=1024"` +} + +type spanContextHTTP struct { + // Method holds information about the method of the HTTP request. + Method nullable.String `json:"method" validate:"maxLength=1024"` + // Response describes the HTTP response information in case the event was + // created as a result of an HTTP request. + Response spanContextHTTPResponse `json:"response"` + // Deprecated: Use Response.StatusCode instead. + // StatusCode sent in the http response. + StatusCode nullable.Int `json:"status_code"` + // URL is the raw url of the correlating HTTP request. + URL nullable.String `json:"url"` +} + +type spanContextHTTPResponse struct { + // DecodedBodySize holds the size of the decoded payload. + DecodedBodySize nullable.Float64 `json:"decoded_body_size"` + // EncodedBodySize holds the size of the encoded payload. + EncodedBodySize nullable.Float64 `json:"encoded_body_size"` + // Headers holds the http headers sent in the http response. + Headers nullable.HTTPHeader `json:"headers"` + // StatusCode sent in the http response. + StatusCode nullable.Int `json:"status_code"` + // TransferSize holds the total size of the payload. + TransferSize nullable.Float64 `json:"transfer_size"` +} + +type stacktraceFrame struct { + // AbsPath is the absolute path of the frame's file. + AbsPath nullable.String `json:"abs_path"` + // Classname of the frame. + Classname nullable.String `json:"classname"` + // ColumnNumber of the frame. + ColumnNumber nullable.Int `json:"colno"` + // ContextLine is the line from the frame's file. + ContextLine nullable.String `json:"context_line"` + // Filename is the relative name of the frame's file. + Filename nullable.String `json:"filename"` + // Function represented by the frame. + Function nullable.String `json:"function"` + // LibraryFrame indicates whether the frame is from a third party library. + LibraryFrame nullable.Bool `json:"library_frame"` + // LineNumber of the frame. + LineNumber nullable.Int `json:"lineno"` + // Module to which the frame belongs to. + Module nullable.String `json:"module"` + // PostContext is a slice of code lines immediately before the line + // from the frame's file. + PostContext []string `json:"post_context"` + // PreContext is a slice of code lines immediately after the line + // from the frame's file. + PreContext []string `json:"pre_context"` + // Vars is a flat mapping of local variables of the frame. + Vars common.MapStr `json:"vars"` + _ struct{} `validate:"requiredAnyOf=classname;filename"` +} + +type spanComposite struct { + // Count is the number of compressed spans the composite span represents. + // The minimum count is 2, as a composite span represents at least two spans. + Count nullable.Int `json:"count" validate:"required,min=2"` + // Sum is the durations of all compressed spans this composite span + // represents in milliseconds. + Sum nullable.Float64 `json:"sum" validate:"required,min=0"` + // A string value indicating which compression strategy was used. The valid + // values are `exact_match` and `same_kind`. + CompressionStrategy nullable.String `json:"compression_strategy" validate:"required"` +} + +type transaction struct { + // Context holds arbitrary contextual information for the event. + Context context `json:"context"` + // Duration how long the transaction took to complete, in milliseconds + // with 3 decimal points. + Duration nullable.Float64 `json:"duration" validate:"required,min=0"` + // ID holds the hex encoded 64 random bits ID of the event. + ID nullable.String `json:"id" validate:"required,maxLength=1024"` + // Marks capture the timing of a significant event during the lifetime of + // a transaction. Marks are organized into groups and can be set by the + // user or the agent. Marks are only reported by RUM agents. + Marks transactionMarks `json:"marks"` + // Name is the generic designation of a transaction in the scope of a + // single service, eg: 'GET /users/:id'. + Name nullable.String `json:"name" validate:"maxLength=1024"` + // Outcome of the transaction with a limited set of permitted values, + // describing the success or failure of the transaction from the service's + // perspective. It is used for calculating error rates for incoming requests. + // Permitted values: success, failure, unknown. + Outcome nullable.String `json:"outcome" validate:"enum=enumOutcome"` + // ParentID holds the hex encoded 64 random bits ID of the parent + // transaction or span. + ParentID nullable.String `json:"parent_id" validate:"maxLength=1024"` + // Result of the transaction. For HTTP-related transactions, this should + // be the status code formatted like 'HTTP 2xx'. + Result nullable.String `json:"result" validate:"maxLength=1024"` + // Sampled indicates whether or not the full information for a transaction + // is captured. If a transaction is unsampled no spans and less context + // information will be reported. + Sampled nullable.Bool `json:"sampled"` + // SampleRate applied to the monitored service at the time where this transaction + // was recorded. Allowed values are [0..1]. A SampleRate <1 indicates that + // not all spans are recorded. + SampleRate nullable.Float64 `json:"sample_rate"` + // Session holds optional transaction session information for RUM. + Session transactionSession `json:"session"` + // SpanCount counts correlated spans. + SpanCount transactionSpanCount `json:"span_count" validate:"required"` + // Timestamp holds the recorded time of the event, UTC based and formatted + // as microseconds since Unix epoch + Timestamp nullable.TimeMicrosUnix `json:"timestamp"` + // TraceID holds the hex encoded 128 random bits ID of the correlated trace. + TraceID nullable.String `json:"trace_id" validate:"required,maxLength=1024"` + // Type expresses the transaction's type as keyword that has specific + // relevance within the service's domain, eg: 'request', 'backgroundjob'. + Type nullable.String `json:"type" validate:"required,maxLength=1024"` + // UserExperience holds metrics for measuring real user experience. + // This information is only sent by RUM agents. + UserExperience transactionUserExperience `json:"experience"` +} + +type transactionSession struct { + // ID holds a session ID for grouping a set of related transactions. + ID nullable.String `json:"id" validate:"required,maxLength=1024"` + + // Sequence holds an optional sequence number for a transaction within + // a session. It is not meaningful to compare sequences across two + // different sessions. + Sequence nullable.Int `json:"sequence" validate:"min=1"` +} + +type transactionMarks struct { + Events map[string]transactionMarkEvents `json:"-"` +} + +func (m *transactionMarks) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &m.Events) +} + +type transactionMarkEvents struct { + Measurements map[string]float64 `json:"-"` +} + +func (m *transactionMarkEvents) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &m.Measurements) +} + +type transactionSpanCount struct { + // Dropped is the number of correlated spans that have been dropped by + // the APM agent recording the transaction. + Dropped nullable.Int `json:"dropped"` + // Started is the number of correlated spans that are recorded. + Started nullable.Int `json:"started" validate:"required"` +} + +// transactionUserExperience holds real user (browser) experience metrics. +type transactionUserExperience struct { + // CumulativeLayoutShift holds the Cumulative Layout Shift (CLS) metric value, + // or a negative value if CLS is unknown. See https://web.dev/cls/ + CumulativeLayoutShift nullable.Float64 `json:"cls" validate:"min=0"` + // FirstInputDelay holds the First Input Delay (FID) metric value, + // or a negative value if FID is unknown. See https://web.dev/fid/ + FirstInputDelay nullable.Float64 `json:"fid" validate:"min=0"` + // Longtask holds longtask duration/count metrics. + Longtask longtaskMetrics `json:"longtask"` + // TotalBlockingTime holds the Total Blocking Time (TBT) metric value, + // or a negative value if TBT is unknown. See https://web.dev/tbt/ + TotalBlockingTime nullable.Float64 `json:"tbt" validate:"min=0"` +} + +type longtaskMetrics struct { + // Count is the total number of of longtasks. + Count nullable.Int `json:"count" validate:"required,min=0"` + // Max longtask duration + Max nullable.Float64 `json:"max" validate:"required,min=0"` + // Sum of longtask durations + Sum nullable.Float64 `json:"sum" validate:"required,min=0"` } -type metadataUser struct { - ID nullable.Interface `json:"id,omitempty" validate:"max=1024,types=string;int"` - Email nullable.String `json:"email" validate:"max=1024"` - Name nullable.String `json:"username" validate:"max=1024"` +type user struct { + // Domain of the logged in user + Domain nullable.String `json:"domain" validate:"maxLength=1024"` + // ID identifies the logged in user, e.g. can be the primary key of the user + ID nullable.Interface `json:"id" validate:"maxLength=1024,inputTypes=string;int"` + // Email of the user. + Email nullable.String `json:"email" validate:"maxLength=1024"` + // Name of the user. + Name nullable.String `json:"username" validate:"maxLength=1024"` } diff --git a/model/modeldecoder/v2/model_generated.go b/model/modeldecoder/v2/model_generated.go index 519c6155244..f6529cec3a4 100644 --- a/model/modeldecoder/v2/model_generated.go +++ b/model/modeldecoder/v2/model_generated.go @@ -22,572 +22,2065 @@ package v2 import ( "encoding/json" "fmt" + "regexp" + "strconv" "unicode/utf8" + + "github.com/pkg/errors" +) + +var ( + patternAlphaNumericExtRegexp = regexp.MustCompile(patternAlphaNumericExt) + patternNoAsteriskQuoteRegexp = regexp.MustCompile(patternNoAsteriskQuote) ) -func (m *metadataRoot) IsSet() bool { - return m.Metadata.IsSet() +func (val *metadataRoot) IsSet() bool { + return val.Metadata.IsSet() } -func (m *metadataRoot) Reset() { - m.Metadata.Reset() +func (val *metadataRoot) Reset() { + val.Metadata.Reset() } -func (m *metadataRoot) validate() error { - if err := m.Metadata.validate(); err != nil { - return err +func (val *metadataRoot) validate() error { + if err := val.Metadata.validate(); err != nil { + return errors.Wrapf(err, "metadata") } - if !m.Metadata.IsSet() { + if !val.Metadata.IsSet() { return fmt.Errorf("'metadata' required") } return nil } -func (m *metadata) IsSet() bool { - return m.Cloud.IsSet() || len(m.Labels) > 0 || m.Process.IsSet() || m.Service.IsSet() || m.System.IsSet() || m.User.IsSet() +func (val *metadata) IsSet() bool { + return val.Cloud.IsSet() || (len(val.Labels) > 0) || val.Process.IsSet() || val.Service.IsSet() || val.System.IsSet() || val.User.IsSet() || val.Network.IsSet() } -func (m *metadata) Reset() { - m.Cloud.Reset() - for k := range m.Labels { - delete(m.Labels, k) - } - m.Process.Reset() - m.Service.Reset() - m.System.Reset() - m.User.Reset() +func (val *metadata) Reset() { + val.Cloud.Reset() + for k := range val.Labels { + delete(val.Labels, k) + } + val.Process.Reset() + val.Service.Reset() + val.System.Reset() + val.User.Reset() + val.Network.Reset() } -func (m *metadata) validate() error { - if !m.IsSet() { +func (val *metadata) validate() error { + if !val.IsSet() { return nil } - if err := m.Cloud.validate(); err != nil { - return err + if err := val.Cloud.validate(); err != nil { + return errors.Wrapf(err, "cloud") } - for k, v := range m.Labels { - if !labelsRegex.MatchString(k) { - return fmt.Errorf("validation rule 'patternKeys(labelsRegex)' violated for 'metadata.labels'") - } + for k, v := range val.Labels { switch t := v.(type) { case nil: case string: if utf8.RuneCountInString(t) > 1024 { - return fmt.Errorf("validation rule 'maxVals(1024)' violated for 'metadata.labels'") + return fmt.Errorf("'labels': validation rule 'maxLengthVals(1024)' violated") } case bool: case json.Number: default: - return fmt.Errorf("validation rule 'typesVals(string;bool;number)' violated for 'metadata.labels' for key %s", k) + return fmt.Errorf("'labels': validation rule 'inputTypesVals(string;bool;number)' violated for key %s", k) } } - if err := m.Process.validate(); err != nil { - return err + if err := val.Process.validate(); err != nil { + return errors.Wrapf(err, "process") + } + if err := val.Service.validate(); err != nil { + return errors.Wrapf(err, "service") } - if err := m.Service.validate(); err != nil { - return err + if !val.Service.IsSet() { + return fmt.Errorf("'service' required") } - if !m.Service.IsSet() { - return fmt.Errorf("'metadata.service' required") + if err := val.System.validate(); err != nil { + return errors.Wrapf(err, "system") } - if err := m.System.validate(); err != nil { - return err + if err := val.User.validate(); err != nil { + return errors.Wrapf(err, "user") } - if err := m.User.validate(); err != nil { - return err + if err := val.Network.validate(); err != nil { + return errors.Wrapf(err, "network") } return nil } -func (m *metadataCloud) IsSet() bool { - return m.Account.IsSet() || m.AvailabilityZone.IsSet() || m.Instance.IsSet() || m.Machine.IsSet() || m.Project.IsSet() || m.Provider.IsSet() || m.Region.IsSet() +func (val *metadataCloud) IsSet() bool { + return val.Account.IsSet() || val.AvailabilityZone.IsSet() || val.Instance.IsSet() || val.Machine.IsSet() || val.Project.IsSet() || val.Provider.IsSet() || val.Region.IsSet() || val.Service.IsSet() } -func (m *metadataCloud) Reset() { - m.Account.Reset() - m.AvailabilityZone.Reset() - m.Instance.Reset() - m.Machine.Reset() - m.Project.Reset() - m.Provider.Reset() - m.Region.Reset() +func (val *metadataCloud) Reset() { + val.Account.Reset() + val.AvailabilityZone.Reset() + val.Instance.Reset() + val.Machine.Reset() + val.Project.Reset() + val.Provider.Reset() + val.Region.Reset() + val.Service.Reset() } -func (m *metadataCloud) validate() error { - if !m.IsSet() { +func (val *metadataCloud) validate() error { + if !val.IsSet() { return nil } - if err := m.Account.validate(); err != nil { - return err + if err := val.Account.validate(); err != nil { + return errors.Wrapf(err, "account") } - if utf8.RuneCountInString(m.AvailabilityZone.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.cloud.availability_zone'") + if val.AvailabilityZone.IsSet() && utf8.RuneCountInString(val.AvailabilityZone.Val) > 1024 { + return fmt.Errorf("'availability_zone': validation rule 'maxLength(1024)' violated") } - if err := m.Instance.validate(); err != nil { - return err + if err := val.Instance.validate(); err != nil { + return errors.Wrapf(err, "instance") } - if err := m.Machine.validate(); err != nil { - return err + if err := val.Machine.validate(); err != nil { + return errors.Wrapf(err, "machine") } - if err := m.Project.validate(); err != nil { - return err + if err := val.Project.validate(); err != nil { + return errors.Wrapf(err, "project") } - if utf8.RuneCountInString(m.Provider.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.cloud.provider'") + if val.Provider.IsSet() && utf8.RuneCountInString(val.Provider.Val) > 1024 { + return fmt.Errorf("'provider': validation rule 'maxLength(1024)' violated") } - if !m.Provider.IsSet() { - return fmt.Errorf("'metadata.cloud.provider' required") + if !val.Provider.IsSet() { + return fmt.Errorf("'provider' required") } - if utf8.RuneCountInString(m.Region.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.cloud.region'") + if val.Region.IsSet() && utf8.RuneCountInString(val.Region.Val) > 1024 { + return fmt.Errorf("'region': validation rule 'maxLength(1024)' violated") + } + if err := val.Service.validate(); err != nil { + return errors.Wrapf(err, "service") } return nil } -func (m *metadataCloudAccount) IsSet() bool { - return m.ID.IsSet() || m.Name.IsSet() +func (val *metadataCloudAccount) IsSet() bool { + return val.ID.IsSet() || val.Name.IsSet() } -func (m *metadataCloudAccount) Reset() { - m.ID.Reset() - m.Name.Reset() +func (val *metadataCloudAccount) Reset() { + val.ID.Reset() + val.Name.Reset() } -func (m *metadataCloudAccount) validate() error { - if !m.IsSet() { +func (val *metadataCloudAccount) validate() error { + if !val.IsSet() { return nil } - if utf8.RuneCountInString(m.ID.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.cloud.account.id'") + if val.ID.IsSet() && utf8.RuneCountInString(val.ID.Val) > 1024 { + return fmt.Errorf("'id': validation rule 'maxLength(1024)' violated") } - if utf8.RuneCountInString(m.Name.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.cloud.account.name'") + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'name': validation rule 'maxLength(1024)' violated") } return nil } -func (m *metadataCloudInstance) IsSet() bool { - return m.ID.IsSet() || m.Name.IsSet() +func (val *metadataCloudInstance) IsSet() bool { + return val.ID.IsSet() || val.Name.IsSet() } -func (m *metadataCloudInstance) Reset() { - m.ID.Reset() - m.Name.Reset() +func (val *metadataCloudInstance) Reset() { + val.ID.Reset() + val.Name.Reset() } -func (m *metadataCloudInstance) validate() error { - if !m.IsSet() { +func (val *metadataCloudInstance) validate() error { + if !val.IsSet() { return nil } - if utf8.RuneCountInString(m.ID.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.cloud.instance.id'") + if val.ID.IsSet() && utf8.RuneCountInString(val.ID.Val) > 1024 { + return fmt.Errorf("'id': validation rule 'maxLength(1024)' violated") } - if utf8.RuneCountInString(m.Name.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.cloud.instance.name'") + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'name': validation rule 'maxLength(1024)' violated") } return nil } -func (m *metadataCloudMachine) IsSet() bool { - return m.Type.IsSet() +func (val *metadataCloudMachine) IsSet() bool { + return val.Type.IsSet() } -func (m *metadataCloudMachine) Reset() { - m.Type.Reset() +func (val *metadataCloudMachine) Reset() { + val.Type.Reset() } -func (m *metadataCloudMachine) validate() error { - if !m.IsSet() { +func (val *metadataCloudMachine) validate() error { + if !val.IsSet() { return nil } - if utf8.RuneCountInString(m.Type.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.cloud.machine.type'") + if val.Type.IsSet() && utf8.RuneCountInString(val.Type.Val) > 1024 { + return fmt.Errorf("'type': validation rule 'maxLength(1024)' violated") } return nil } -func (m *metadataCloudProject) IsSet() bool { - return m.ID.IsSet() || m.Name.IsSet() +func (val *metadataCloudProject) IsSet() bool { + return val.ID.IsSet() || val.Name.IsSet() } -func (m *metadataCloudProject) Reset() { - m.ID.Reset() - m.Name.Reset() +func (val *metadataCloudProject) Reset() { + val.ID.Reset() + val.Name.Reset() } -func (m *metadataCloudProject) validate() error { - if !m.IsSet() { +func (val *metadataCloudProject) validate() error { + if !val.IsSet() { return nil } - if utf8.RuneCountInString(m.ID.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.cloud.project.id'") + if val.ID.IsSet() && utf8.RuneCountInString(val.ID.Val) > 1024 { + return fmt.Errorf("'id': validation rule 'maxLength(1024)' violated") + } + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'name': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *metadataCloudService) IsSet() bool { + return val.Name.IsSet() +} + +func (val *metadataCloudService) Reset() { + val.Name.Reset() +} + +func (val *metadataCloudService) validate() error { + if !val.IsSet() { + return nil } - if utf8.RuneCountInString(m.Name.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.cloud.project.name'") + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'name': validation rule 'maxLength(1024)' violated") } return nil } -func (m *metadataProcess) IsSet() bool { - return len(m.Argv) > 0 || m.Pid.IsSet() || m.Ppid.IsSet() || m.Title.IsSet() +func (val *metadataProcess) IsSet() bool { + return (len(val.Argv) > 0) || val.Pid.IsSet() || val.Ppid.IsSet() || val.Title.IsSet() } -func (m *metadataProcess) Reset() { - m.Argv = m.Argv[:0] - m.Pid.Reset() - m.Ppid.Reset() - m.Title.Reset() +func (val *metadataProcess) Reset() { + val.Argv = val.Argv[:0] + val.Pid.Reset() + val.Ppid.Reset() + val.Title.Reset() } -func (m *metadataProcess) validate() error { - if !m.IsSet() { +func (val *metadataProcess) validate() error { + if !val.IsSet() { return nil } - if !m.Pid.IsSet() { - return fmt.Errorf("'metadata.process.pid' required") + if !val.Pid.IsSet() { + return fmt.Errorf("'pid' required") } - if utf8.RuneCountInString(m.Title.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.process.title'") + if val.Title.IsSet() && utf8.RuneCountInString(val.Title.Val) > 1024 { + return fmt.Errorf("'title': validation rule 'maxLength(1024)' violated") } return nil } -func (m *metadataService) IsSet() bool { - return m.Agent.IsSet() || m.Environment.IsSet() || m.Framework.IsSet() || m.Language.IsSet() || m.Name.IsSet() || m.Node.IsSet() || m.Runtime.IsSet() || m.Version.IsSet() +func (val *metadataService) IsSet() bool { + return val.Agent.IsSet() || val.Environment.IsSet() || val.Framework.IsSet() || val.Language.IsSet() || val.Name.IsSet() || val.Node.IsSet() || val.Runtime.IsSet() || val.Version.IsSet() } -func (m *metadataService) Reset() { - m.Agent.Reset() - m.Environment.Reset() - m.Framework.Reset() - m.Language.Reset() - m.Name.Reset() - m.Node.Reset() - m.Runtime.Reset() - m.Version.Reset() +func (val *metadataService) Reset() { + val.Agent.Reset() + val.Environment.Reset() + val.Framework.Reset() + val.Language.Reset() + val.Name.Reset() + val.Node.Reset() + val.Runtime.Reset() + val.Version.Reset() } -func (m *metadataService) validate() error { - if !m.IsSet() { +func (val *metadataService) validate() error { + if !val.IsSet() { return nil } - if err := m.Agent.validate(); err != nil { - return err + if err := val.Agent.validate(); err != nil { + return errors.Wrapf(err, "agent") + } + if !val.Agent.IsSet() { + return fmt.Errorf("'agent' required") } - if !m.Agent.IsSet() { - return fmt.Errorf("'metadata.service.agent' required") + if val.Environment.IsSet() && utf8.RuneCountInString(val.Environment.Val) > 1024 { + return fmt.Errorf("'environment': validation rule 'maxLength(1024)' violated") } - if utf8.RuneCountInString(m.Environment.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.service.environment'") + if err := val.Framework.validate(); err != nil { + return errors.Wrapf(err, "framework") } - if err := m.Framework.validate(); err != nil { - return err + if err := val.Language.validate(); err != nil { + return errors.Wrapf(err, "language") } - if err := m.Language.validate(); err != nil { - return err + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'name': validation rule 'maxLength(1024)' violated") } - if utf8.RuneCountInString(m.Name.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.service.name'") + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) < 1 { + return fmt.Errorf("'name': validation rule 'minLength(1)' violated") } - if !alphaNumericExtRegex.MatchString(m.Name.Val) { - return fmt.Errorf("validation rule 'pattern(alphaNumericExtRegex)' violated for 'metadata.service.name'") + if val.Name.Val != "" && !patternAlphaNumericExtRegexp.MatchString(val.Name.Val) { + return fmt.Errorf("'name': validation rule 'pattern(patternAlphaNumericExt)' violated") } - if !m.Name.IsSet() { - return fmt.Errorf("'metadata.service.name' required") + if !val.Name.IsSet() { + return fmt.Errorf("'name' required") } - if err := m.Node.validate(); err != nil { - return err + if err := val.Node.validate(); err != nil { + return errors.Wrapf(err, "node") } - if err := m.Runtime.validate(); err != nil { - return err + if err := val.Runtime.validate(); err != nil { + return errors.Wrapf(err, "runtime") } - if utf8.RuneCountInString(m.Version.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.service.version'") + if val.Version.IsSet() && utf8.RuneCountInString(val.Version.Val) > 1024 { + return fmt.Errorf("'version': validation rule 'maxLength(1024)' violated") } return nil } -func (m *metadataServiceAgent) IsSet() bool { - return m.EphemeralID.IsSet() || m.Name.IsSet() || m.Version.IsSet() +func (val *metadataServiceAgent) IsSet() bool { + return val.EphemeralID.IsSet() || val.Name.IsSet() || val.Version.IsSet() } -func (m *metadataServiceAgent) Reset() { - m.EphemeralID.Reset() - m.Name.Reset() - m.Version.Reset() +func (val *metadataServiceAgent) Reset() { + val.EphemeralID.Reset() + val.Name.Reset() + val.Version.Reset() } -func (m *metadataServiceAgent) validate() error { - if !m.IsSet() { +func (val *metadataServiceAgent) validate() error { + if !val.IsSet() { return nil } - if utf8.RuneCountInString(m.EphemeralID.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.service.agent.ephemeral_id'") + if val.EphemeralID.IsSet() && utf8.RuneCountInString(val.EphemeralID.Val) > 1024 { + return fmt.Errorf("'ephemeral_id': validation rule 'maxLength(1024)' violated") } - if utf8.RuneCountInString(m.Name.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.service.agent.name'") + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'name': validation rule 'maxLength(1024)' violated") } - if !m.Name.IsSet() { - return fmt.Errorf("'metadata.service.agent.name' required") + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) < 1 { + return fmt.Errorf("'name': validation rule 'minLength(1)' violated") } - if utf8.RuneCountInString(m.Version.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.service.agent.version'") + if !val.Name.IsSet() { + return fmt.Errorf("'name' required") } - if !m.Version.IsSet() { - return fmt.Errorf("'metadata.service.agent.version' required") + if val.Version.IsSet() && utf8.RuneCountInString(val.Version.Val) > 1024 { + return fmt.Errorf("'version': validation rule 'maxLength(1024)' violated") + } + if !val.Version.IsSet() { + return fmt.Errorf("'version' required") } return nil } -func (m *metadataServiceFramework) IsSet() bool { - return m.Name.IsSet() || m.Version.IsSet() +func (val *metadataServiceFramework) IsSet() bool { + return val.Name.IsSet() || val.Version.IsSet() } -func (m *metadataServiceFramework) Reset() { - m.Name.Reset() - m.Version.Reset() +func (val *metadataServiceFramework) Reset() { + val.Name.Reset() + val.Version.Reset() } -func (m *metadataServiceFramework) validate() error { - if !m.IsSet() { +func (val *metadataServiceFramework) validate() error { + if !val.IsSet() { return nil } - if utf8.RuneCountInString(m.Name.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.service.framework.name'") + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'name': validation rule 'maxLength(1024)' violated") } - if utf8.RuneCountInString(m.Version.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.service.framework.version'") + if val.Version.IsSet() && utf8.RuneCountInString(val.Version.Val) > 1024 { + return fmt.Errorf("'version': validation rule 'maxLength(1024)' violated") } return nil } -func (m *metadataServiceLanguage) IsSet() bool { - return m.Name.IsSet() || m.Version.IsSet() +func (val *metadataServiceLanguage) IsSet() bool { + return val.Name.IsSet() || val.Version.IsSet() } -func (m *metadataServiceLanguage) Reset() { - m.Name.Reset() - m.Version.Reset() +func (val *metadataServiceLanguage) Reset() { + val.Name.Reset() + val.Version.Reset() } -func (m *metadataServiceLanguage) validate() error { - if !m.IsSet() { +func (val *metadataServiceLanguage) validate() error { + if !val.IsSet() { return nil } - if utf8.RuneCountInString(m.Name.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.service.language.name'") + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'name': validation rule 'maxLength(1024)' violated") } - if !m.Name.IsSet() { - return fmt.Errorf("'metadata.service.language.name' required") + if !val.Name.IsSet() { + return fmt.Errorf("'name' required") } - if utf8.RuneCountInString(m.Version.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.service.language.version'") + if val.Version.IsSet() && utf8.RuneCountInString(val.Version.Val) > 1024 { + return fmt.Errorf("'version': validation rule 'maxLength(1024)' violated") } return nil } -func (m *metadataServiceNode) IsSet() bool { - return m.Name.IsSet() +func (val *metadataServiceNode) IsSet() bool { + return val.Name.IsSet() } -func (m *metadataServiceNode) Reset() { - m.Name.Reset() +func (val *metadataServiceNode) Reset() { + val.Name.Reset() } -func (m *metadataServiceNode) validate() error { - if !m.IsSet() { +func (val *metadataServiceNode) validate() error { + if !val.IsSet() { return nil } - if utf8.RuneCountInString(m.Name.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.service.node.configured_name'") + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'configured_name': validation rule 'maxLength(1024)' violated") } return nil } -func (m *metadataServiceRuntime) IsSet() bool { - return m.Name.IsSet() || m.Version.IsSet() +func (val *metadataServiceRuntime) IsSet() bool { + return val.Name.IsSet() || val.Version.IsSet() } -func (m *metadataServiceRuntime) Reset() { - m.Name.Reset() - m.Version.Reset() +func (val *metadataServiceRuntime) Reset() { + val.Name.Reset() + val.Version.Reset() } -func (m *metadataServiceRuntime) validate() error { - if !m.IsSet() { +func (val *metadataServiceRuntime) validate() error { + if !val.IsSet() { return nil } - if utf8.RuneCountInString(m.Name.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.service.runtime.name'") + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'name': validation rule 'maxLength(1024)' violated") } - if !m.Name.IsSet() { - return fmt.Errorf("'metadata.service.runtime.name' required") + if !val.Name.IsSet() { + return fmt.Errorf("'name' required") } - if utf8.RuneCountInString(m.Version.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.service.runtime.version'") + if val.Version.IsSet() && utf8.RuneCountInString(val.Version.Val) > 1024 { + return fmt.Errorf("'version': validation rule 'maxLength(1024)' violated") } - if !m.Version.IsSet() { - return fmt.Errorf("'metadata.service.runtime.version' required") + if !val.Version.IsSet() { + return fmt.Errorf("'version' required") } return nil } -func (m *metadataSystem) IsSet() bool { - return m.Architecture.IsSet() || m.ConfiguredHostname.IsSet() || m.Container.IsSet() || m.DetectedHostname.IsSet() || m.HostnameDeprecated.IsSet() || m.Kubernetes.IsSet() || m.Platform.IsSet() +func (val *metadataSystem) IsSet() bool { + return val.Architecture.IsSet() || val.ConfiguredHostname.IsSet() || val.Container.IsSet() || val.DetectedHostname.IsSet() || val.DeprecatedHostname.IsSet() || val.Kubernetes.IsSet() || val.Platform.IsSet() } -func (m *metadataSystem) Reset() { - m.Architecture.Reset() - m.ConfiguredHostname.Reset() - m.Container.Reset() - m.DetectedHostname.Reset() - m.HostnameDeprecated.Reset() - m.Kubernetes.Reset() - m.Platform.Reset() +func (val *metadataSystem) Reset() { + val.Architecture.Reset() + val.ConfiguredHostname.Reset() + val.Container.Reset() + val.DetectedHostname.Reset() + val.DeprecatedHostname.Reset() + val.Kubernetes.Reset() + val.Platform.Reset() } -func (m *metadataSystem) validate() error { - if !m.IsSet() { +func (val *metadataSystem) validate() error { + if !val.IsSet() { return nil } - if utf8.RuneCountInString(m.Architecture.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.system.architecture'") + if val.Architecture.IsSet() && utf8.RuneCountInString(val.Architecture.Val) > 1024 { + return fmt.Errorf("'architecture': validation rule 'maxLength(1024)' violated") } - if utf8.RuneCountInString(m.ConfiguredHostname.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.system.configured_hostname'") + if val.ConfiguredHostname.IsSet() && utf8.RuneCountInString(val.ConfiguredHostname.Val) > 1024 { + return fmt.Errorf("'configured_hostname': validation rule 'maxLength(1024)' violated") } - if err := m.Container.validate(); err != nil { - return err + if err := val.Container.validate(); err != nil { + return errors.Wrapf(err, "container") } - if utf8.RuneCountInString(m.DetectedHostname.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.system.detected_hostname'") + if val.DetectedHostname.IsSet() && utf8.RuneCountInString(val.DetectedHostname.Val) > 1024 { + return fmt.Errorf("'detected_hostname': validation rule 'maxLength(1024)' violated") } - if utf8.RuneCountInString(m.HostnameDeprecated.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.system.hostname'") + if val.DeprecatedHostname.IsSet() && utf8.RuneCountInString(val.DeprecatedHostname.Val) > 1024 { + return fmt.Errorf("'hostname': validation rule 'maxLength(1024)' violated") } - if err := m.Kubernetes.validate(); err != nil { - return err + if err := val.Kubernetes.validate(); err != nil { + return errors.Wrapf(err, "kubernetes") } - if utf8.RuneCountInString(m.Platform.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.system.platform'") + if val.Platform.IsSet() && utf8.RuneCountInString(val.Platform.Val) > 1024 { + return fmt.Errorf("'platform': validation rule 'maxLength(1024)' violated") } return nil } -func (m *metadataSystemContainer) IsSet() bool { - return m.ID.IsSet() +func (val *metadataSystemContainer) IsSet() bool { + return val.ID.IsSet() } -func (m *metadataSystemContainer) Reset() { - m.ID.Reset() +func (val *metadataSystemContainer) Reset() { + val.ID.Reset() } -func (m *metadataSystemContainer) validate() error { - if !m.IsSet() { +func (val *metadataSystemContainer) validate() error { + if !val.IsSet() { return nil } + if val.ID.IsSet() && utf8.RuneCountInString(val.ID.Val) > 1024 { + return fmt.Errorf("'id': validation rule 'maxLength(1024)' violated") + } return nil } -func (m *metadataSystemKubernetes) IsSet() bool { - return m.Namespace.IsSet() || m.Node.IsSet() || m.Pod.IsSet() +func (val *metadataSystemKubernetes) IsSet() bool { + return val.Namespace.IsSet() || val.Node.IsSet() || val.Pod.IsSet() } -func (m *metadataSystemKubernetes) Reset() { - m.Namespace.Reset() - m.Node.Reset() - m.Pod.Reset() +func (val *metadataSystemKubernetes) Reset() { + val.Namespace.Reset() + val.Node.Reset() + val.Pod.Reset() } -func (m *metadataSystemKubernetes) validate() error { - if !m.IsSet() { +func (val *metadataSystemKubernetes) validate() error { + if !val.IsSet() { return nil } - if utf8.RuneCountInString(m.Namespace.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.system.kubernetes.namespace'") + if val.Namespace.IsSet() && utf8.RuneCountInString(val.Namespace.Val) > 1024 { + return fmt.Errorf("'namespace': validation rule 'maxLength(1024)' violated") } - if err := m.Node.validate(); err != nil { - return err + if err := val.Node.validate(); err != nil { + return errors.Wrapf(err, "node") } - if err := m.Pod.validate(); err != nil { - return err + if err := val.Pod.validate(); err != nil { + return errors.Wrapf(err, "pod") } return nil } -func (m *metadataSystemKubernetesNode) IsSet() bool { - return m.Name.IsSet() +func (val *metadataSystemKubernetesNode) IsSet() bool { + return val.Name.IsSet() } -func (m *metadataSystemKubernetesNode) Reset() { - m.Name.Reset() +func (val *metadataSystemKubernetesNode) Reset() { + val.Name.Reset() } -func (m *metadataSystemKubernetesNode) validate() error { - if !m.IsSet() { +func (val *metadataSystemKubernetesNode) validate() error { + if !val.IsSet() { return nil } - if utf8.RuneCountInString(m.Name.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.system.kubernetes.node.name'") + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'name': validation rule 'maxLength(1024)' violated") } return nil } -func (m *metadataSystemKubernetesPod) IsSet() bool { - return m.Name.IsSet() || m.UID.IsSet() +func (val *metadataSystemKubernetesPod) IsSet() bool { + return val.Name.IsSet() || val.UID.IsSet() } -func (m *metadataSystemKubernetesPod) Reset() { - m.Name.Reset() - m.UID.Reset() +func (val *metadataSystemKubernetesPod) Reset() { + val.Name.Reset() + val.UID.Reset() } -func (m *metadataSystemKubernetesPod) validate() error { - if !m.IsSet() { +func (val *metadataSystemKubernetesPod) validate() error { + if !val.IsSet() { return nil } - if utf8.RuneCountInString(m.Name.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.system.kubernetes.pod.name'") + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'name': validation rule 'maxLength(1024)' violated") } - if utf8.RuneCountInString(m.UID.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.system.kubernetes.pod.uid'") + if val.UID.IsSet() && utf8.RuneCountInString(val.UID.Val) > 1024 { + return fmt.Errorf("'uid': validation rule 'maxLength(1024)' violated") } return nil } -func (m *metadataUser) IsSet() bool { - return m.ID.IsSet() || m.Email.IsSet() || m.Name.IsSet() +func (val *user) IsSet() bool { + return val.Domain.IsSet() || val.ID.IsSet() || val.Email.IsSet() || val.Name.IsSet() } -func (m *metadataUser) Reset() { - m.ID.Reset() - m.Email.Reset() - m.Name.Reset() +func (val *user) Reset() { + val.Domain.Reset() + val.ID.Reset() + val.Email.Reset() + val.Name.Reset() } -func (m *metadataUser) validate() error { - if !m.IsSet() { +func (val *user) validate() error { + if !val.IsSet() { return nil } - switch t := m.ID.Val.(type) { + if val.Domain.IsSet() && utf8.RuneCountInString(val.Domain.Val) > 1024 { + return fmt.Errorf("'domain': validation rule 'maxLength(1024)' violated") + } + switch t := val.ID.Val.(type) { case string: if utf8.RuneCountInString(t) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.user.id'") + return fmt.Errorf("'id': validation rule 'maxLength(1024)' violated") } + case int: case json.Number: if _, err := t.Int64(); err != nil { - return fmt.Errorf("validation rule 'types(string;int)' violated for 'metadata.user.id'") + return fmt.Errorf("'id': validation rule 'inputTypes(string;int)' violated") + } + case nil: + default: + return fmt.Errorf("'id': validation rule 'inputTypes(string;int)' violated ") + } + if val.Email.IsSet() && utf8.RuneCountInString(val.Email.Val) > 1024 { + return fmt.Errorf("'email': validation rule 'maxLength(1024)' violated") + } + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'username': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *network) IsSet() bool { + return val.Connection.IsSet() +} + +func (val *network) Reset() { + val.Connection.Reset() +} + +func (val *network) validate() error { + if !val.IsSet() { + return nil + } + if err := val.Connection.validate(); err != nil { + return errors.Wrapf(err, "connection") + } + return nil +} + +func (val *networkConnection) IsSet() bool { + return val.Type.IsSet() +} + +func (val *networkConnection) Reset() { + val.Type.Reset() +} + +func (val *networkConnection) validate() error { + if !val.IsSet() { + return nil + } + if val.Type.IsSet() && utf8.RuneCountInString(val.Type.Val) > 1024 { + return fmt.Errorf("'type': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *errorRoot) IsSet() bool { + return val.Error.IsSet() +} + +func (val *errorRoot) Reset() { + val.Error.Reset() +} + +func (val *errorRoot) validate() error { + if err := val.Error.validate(); err != nil { + return errors.Wrapf(err, "error") + } + if !val.Error.IsSet() { + return fmt.Errorf("'error' required") + } + return nil +} + +func (val *errorEvent) IsSet() bool { + return val.Context.IsSet() || val.Culprit.IsSet() || val.Exception.IsSet() || val.ID.IsSet() || val.Log.IsSet() || val.ParentID.IsSet() || val.Timestamp.IsSet() || val.TraceID.IsSet() || val.Transaction.IsSet() || val.TransactionID.IsSet() +} + +func (val *errorEvent) Reset() { + val.Context.Reset() + val.Culprit.Reset() + val.Exception.Reset() + val.ID.Reset() + val.Log.Reset() + val.ParentID.Reset() + val.Timestamp.Reset() + val.TraceID.Reset() + val.Transaction.Reset() + val.TransactionID.Reset() +} + +func (val *errorEvent) validate() error { + if !val.IsSet() { + return nil + } + if err := val.Context.validate(); err != nil { + return errors.Wrapf(err, "context") + } + if val.Culprit.IsSet() && utf8.RuneCountInString(val.Culprit.Val) > 1024 { + return fmt.Errorf("'culprit': validation rule 'maxLength(1024)' violated") + } + if err := val.Exception.validate(); err != nil { + return errors.Wrapf(err, "exception") + } + if val.ID.IsSet() && utf8.RuneCountInString(val.ID.Val) > 1024 { + return fmt.Errorf("'id': validation rule 'maxLength(1024)' violated") + } + if !val.ID.IsSet() { + return fmt.Errorf("'id' required") + } + if err := val.Log.validate(); err != nil { + return errors.Wrapf(err, "log") + } + if val.ParentID.IsSet() && utf8.RuneCountInString(val.ParentID.Val) > 1024 { + return fmt.Errorf("'parent_id': validation rule 'maxLength(1024)' violated") + } + if !val.ParentID.IsSet() { + if val.TransactionID.IsSet() { + return fmt.Errorf("'parent_id' required when 'transaction_id' is set") + } + if val.TraceID.IsSet() { + return fmt.Errorf("'parent_id' required when 'trace_id' is set") + } + } + if val.TraceID.IsSet() && utf8.RuneCountInString(val.TraceID.Val) > 1024 { + return fmt.Errorf("'trace_id': validation rule 'maxLength(1024)' violated") + } + if !val.TraceID.IsSet() { + if val.TransactionID.IsSet() { + return fmt.Errorf("'trace_id' required when 'transaction_id' is set") + } + if val.ParentID.IsSet() { + return fmt.Errorf("'trace_id' required when 'parent_id' is set") + } + } + if err := val.Transaction.validate(); err != nil { + return errors.Wrapf(err, "transaction") + } + if val.TransactionID.IsSet() && utf8.RuneCountInString(val.TransactionID.Val) > 1024 { + return fmt.Errorf("'transaction_id': validation rule 'maxLength(1024)' violated") + } + if !val.Exception.IsSet() && !val.Log.IsSet() { + return fmt.Errorf("requires at least one of the fields 'exception;log'") + } + return nil +} + +func (val *context) IsSet() bool { + return (len(val.Custom) > 0) || val.Message.IsSet() || val.Page.IsSet() || val.Response.IsSet() || val.Request.IsSet() || val.Service.IsSet() || (len(val.Tags) > 0) || val.User.IsSet() +} + +func (val *context) Reset() { + for k := range val.Custom { + delete(val.Custom, k) + } + val.Message.Reset() + val.Page.Reset() + val.Response.Reset() + val.Request.Reset() + val.Service.Reset() + for k := range val.Tags { + delete(val.Tags, k) + } + val.User.Reset() +} + +func (val *context) validate() error { + if !val.IsSet() { + return nil + } + if err := val.Message.validate(); err != nil { + return errors.Wrapf(err, "message") + } + if err := val.Page.validate(); err != nil { + return errors.Wrapf(err, "page") + } + if err := val.Response.validate(); err != nil { + return errors.Wrapf(err, "response") + } + if err := val.Request.validate(); err != nil { + return errors.Wrapf(err, "request") + } + if err := val.Service.validate(); err != nil { + return errors.Wrapf(err, "service") + } + for k, v := range val.Tags { + switch t := v.(type) { + case nil: + case string: + if utf8.RuneCountInString(t) > 1024 { + return fmt.Errorf("'tags': validation rule 'maxLengthVals(1024)' violated") + } + case bool: + case json.Number: + default: + return fmt.Errorf("'tags': validation rule 'inputTypesVals(string;bool;number)' violated for key %s", k) + } + } + if err := val.User.validate(); err != nil { + return errors.Wrapf(err, "user") + } + return nil +} + +func (val *contextMessage) IsSet() bool { + return val.Age.IsSet() || val.Body.IsSet() || val.Headers.IsSet() || val.Queue.IsSet() +} + +func (val *contextMessage) Reset() { + val.Age.Reset() + val.Body.Reset() + val.Headers.Reset() + val.Queue.Reset() +} + +func (val *contextMessage) validate() error { + if !val.IsSet() { + return nil + } + if err := val.Age.validate(); err != nil { + return errors.Wrapf(err, "age") + } + if err := val.Queue.validate(); err != nil { + return errors.Wrapf(err, "queue") + } + return nil +} + +func (val *contextMessageAge) IsSet() bool { + return val.Milliseconds.IsSet() +} + +func (val *contextMessageAge) Reset() { + val.Milliseconds.Reset() +} + +func (val *contextMessageAge) validate() error { + if !val.IsSet() { + return nil + } + return nil +} + +func (val *contextMessageQueue) IsSet() bool { + return val.Name.IsSet() +} + +func (val *contextMessageQueue) Reset() { + val.Name.Reset() +} + +func (val *contextMessageQueue) validate() error { + if !val.IsSet() { + return nil + } + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'name': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *contextPage) IsSet() bool { + return val.Referer.IsSet() || val.URL.IsSet() +} + +func (val *contextPage) Reset() { + val.Referer.Reset() + val.URL.Reset() +} + +func (val *contextPage) validate() error { + if !val.IsSet() { + return nil + } + return nil +} + +func (val *contextResponse) IsSet() bool { + return val.DecodedBodySize.IsSet() || val.EncodedBodySize.IsSet() || val.Finished.IsSet() || val.Headers.IsSet() || val.HeadersSent.IsSet() || val.StatusCode.IsSet() || val.TransferSize.IsSet() +} + +func (val *contextResponse) Reset() { + val.DecodedBodySize.Reset() + val.EncodedBodySize.Reset() + val.Finished.Reset() + val.Headers.Reset() + val.HeadersSent.Reset() + val.StatusCode.Reset() + val.TransferSize.Reset() +} + +func (val *contextResponse) validate() error { + if !val.IsSet() { + return nil + } + return nil +} + +func (val *contextRequest) IsSet() bool { + return val.Body.IsSet() || (len(val.Cookies) > 0) || (len(val.Env) > 0) || val.Headers.IsSet() || val.HTTPVersion.IsSet() || val.Method.IsSet() || val.Socket.IsSet() || val.URL.IsSet() +} + +func (val *contextRequest) Reset() { + val.Body.Reset() + for k := range val.Cookies { + delete(val.Cookies, k) + } + for k := range val.Env { + delete(val.Env, k) + } + val.Headers.Reset() + val.HTTPVersion.Reset() + val.Method.Reset() + val.Socket.Reset() + val.URL.Reset() +} + +func (val *contextRequest) validate() error { + if !val.IsSet() { + return nil + } + switch val.Body.Val.(type) { + case string: + case map[string]interface{}: + case nil: + default: + return fmt.Errorf("'body': validation rule 'inputTypes(string;object)' violated ") + } + if val.HTTPVersion.IsSet() && utf8.RuneCountInString(val.HTTPVersion.Val) > 1024 { + return fmt.Errorf("'http_version': validation rule 'maxLength(1024)' violated") + } + if val.Method.IsSet() && utf8.RuneCountInString(val.Method.Val) > 1024 { + return fmt.Errorf("'method': validation rule 'maxLength(1024)' violated") + } + if !val.Method.IsSet() { + return fmt.Errorf("'method' required") + } + if err := val.Socket.validate(); err != nil { + return errors.Wrapf(err, "socket") + } + if err := val.URL.validate(); err != nil { + return errors.Wrapf(err, "url") + } + return nil +} + +func (val *contextRequestSocket) IsSet() bool { + return val.Encrypted.IsSet() || val.RemoteAddress.IsSet() +} + +func (val *contextRequestSocket) Reset() { + val.Encrypted.Reset() + val.RemoteAddress.Reset() +} + +func (val *contextRequestSocket) validate() error { + if !val.IsSet() { + return nil + } + return nil +} + +func (val *contextRequestURL) IsSet() bool { + return val.Full.IsSet() || val.Hash.IsSet() || val.Hostname.IsSet() || val.Path.IsSet() || val.Port.IsSet() || val.Protocol.IsSet() || val.Raw.IsSet() || val.Search.IsSet() +} + +func (val *contextRequestURL) Reset() { + val.Full.Reset() + val.Hash.Reset() + val.Hostname.Reset() + val.Path.Reset() + val.Port.Reset() + val.Protocol.Reset() + val.Raw.Reset() + val.Search.Reset() +} + +func (val *contextRequestURL) validate() error { + if !val.IsSet() { + return nil + } + if val.Full.IsSet() && utf8.RuneCountInString(val.Full.Val) > 1024 { + return fmt.Errorf("'full': validation rule 'maxLength(1024)' violated") + } + if val.Hash.IsSet() && utf8.RuneCountInString(val.Hash.Val) > 1024 { + return fmt.Errorf("'hash': validation rule 'maxLength(1024)' violated") + } + if val.Hostname.IsSet() && utf8.RuneCountInString(val.Hostname.Val) > 1024 { + return fmt.Errorf("'hostname': validation rule 'maxLength(1024)' violated") + } + if val.Path.IsSet() && utf8.RuneCountInString(val.Path.Val) > 1024 { + return fmt.Errorf("'pathname': validation rule 'maxLength(1024)' violated") + } + switch t := val.Port.Val.(type) { + case string: + if utf8.RuneCountInString(t) > 1024 { + return fmt.Errorf("'port': validation rule 'maxLength(1024)' violated") + } + if _, err := strconv.Atoi(t); err != nil { + return fmt.Errorf("'port': validation rule 'targetType(int)' violated") } case int: + case json.Number: + if _, err := t.Int64(); err != nil { + return fmt.Errorf("'port': validation rule 'inputTypes(string;int)' violated") + } case nil: default: - return fmt.Errorf("validation rule 'types(string;int)' violated for 'metadata.user.id'") + return fmt.Errorf("'port': validation rule 'inputTypes(string;int)' violated ") + } + if val.Protocol.IsSet() && utf8.RuneCountInString(val.Protocol.Val) > 1024 { + return fmt.Errorf("'protocol': validation rule 'maxLength(1024)' violated") + } + if val.Raw.IsSet() && utf8.RuneCountInString(val.Raw.Val) > 1024 { + return fmt.Errorf("'raw': validation rule 'maxLength(1024)' violated") + } + if val.Search.IsSet() && utf8.RuneCountInString(val.Search.Val) > 1024 { + return fmt.Errorf("'search': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *contextService) IsSet() bool { + return val.Agent.IsSet() || val.Environment.IsSet() || val.Framework.IsSet() || val.Language.IsSet() || val.Name.IsSet() || val.Node.IsSet() || val.Runtime.IsSet() || val.Version.IsSet() +} + +func (val *contextService) Reset() { + val.Agent.Reset() + val.Environment.Reset() + val.Framework.Reset() + val.Language.Reset() + val.Name.Reset() + val.Node.Reset() + val.Runtime.Reset() + val.Version.Reset() +} + +func (val *contextService) validate() error { + if !val.IsSet() { + return nil + } + if err := val.Agent.validate(); err != nil { + return errors.Wrapf(err, "agent") + } + if val.Environment.IsSet() && utf8.RuneCountInString(val.Environment.Val) > 1024 { + return fmt.Errorf("'environment': validation rule 'maxLength(1024)' violated") + } + if err := val.Framework.validate(); err != nil { + return errors.Wrapf(err, "framework") + } + if err := val.Language.validate(); err != nil { + return errors.Wrapf(err, "language") + } + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'name': validation rule 'maxLength(1024)' violated") + } + if val.Name.Val != "" && !patternAlphaNumericExtRegexp.MatchString(val.Name.Val) { + return fmt.Errorf("'name': validation rule 'pattern(patternAlphaNumericExt)' violated") + } + if err := val.Node.validate(); err != nil { + return errors.Wrapf(err, "node") + } + if err := val.Runtime.validate(); err != nil { + return errors.Wrapf(err, "runtime") + } + if val.Version.IsSet() && utf8.RuneCountInString(val.Version.Val) > 1024 { + return fmt.Errorf("'version': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *contextServiceAgent) IsSet() bool { + return val.EphemeralID.IsSet() || val.Name.IsSet() || val.Version.IsSet() +} + +func (val *contextServiceAgent) Reset() { + val.EphemeralID.Reset() + val.Name.Reset() + val.Version.Reset() +} + +func (val *contextServiceAgent) validate() error { + if !val.IsSet() { + return nil + } + if val.EphemeralID.IsSet() && utf8.RuneCountInString(val.EphemeralID.Val) > 1024 { + return fmt.Errorf("'ephemeral_id': validation rule 'maxLength(1024)' violated") + } + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'name': validation rule 'maxLength(1024)' violated") + } + if val.Version.IsSet() && utf8.RuneCountInString(val.Version.Val) > 1024 { + return fmt.Errorf("'version': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *contextServiceFramework) IsSet() bool { + return val.Name.IsSet() || val.Version.IsSet() +} + +func (val *contextServiceFramework) Reset() { + val.Name.Reset() + val.Version.Reset() +} + +func (val *contextServiceFramework) validate() error { + if !val.IsSet() { + return nil + } + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'name': validation rule 'maxLength(1024)' violated") + } + if val.Version.IsSet() && utf8.RuneCountInString(val.Version.Val) > 1024 { + return fmt.Errorf("'version': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *contextServiceLanguage) IsSet() bool { + return val.Name.IsSet() || val.Version.IsSet() +} + +func (val *contextServiceLanguage) Reset() { + val.Name.Reset() + val.Version.Reset() +} + +func (val *contextServiceLanguage) validate() error { + if !val.IsSet() { + return nil + } + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'name': validation rule 'maxLength(1024)' violated") + } + if val.Version.IsSet() && utf8.RuneCountInString(val.Version.Val) > 1024 { + return fmt.Errorf("'version': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *contextServiceNode) IsSet() bool { + return val.Name.IsSet() +} + +func (val *contextServiceNode) Reset() { + val.Name.Reset() +} + +func (val *contextServiceNode) validate() error { + if !val.IsSet() { + return nil + } + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'configured_name': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *contextServiceRuntime) IsSet() bool { + return val.Name.IsSet() || val.Version.IsSet() +} + +func (val *contextServiceRuntime) Reset() { + val.Name.Reset() + val.Version.Reset() +} + +func (val *contextServiceRuntime) validate() error { + if !val.IsSet() { + return nil + } + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'name': validation rule 'maxLength(1024)' violated") + } + if val.Version.IsSet() && utf8.RuneCountInString(val.Version.Val) > 1024 { + return fmt.Errorf("'version': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *errorException) IsSet() bool { + return (len(val.Attributes) > 0) || val.Code.IsSet() || (len(val.Cause) > 0) || val.Handled.IsSet() || val.Message.IsSet() || val.Module.IsSet() || (len(val.Stacktrace) > 0) || val.Type.IsSet() +} + +func (val *errorException) Reset() { + for k := range val.Attributes { + delete(val.Attributes, k) + } + val.Code.Reset() + for i := range val.Cause { + val.Cause[i].Reset() + } + val.Cause = val.Cause[:0] + val.Handled.Reset() + val.Message.Reset() + val.Module.Reset() + for i := range val.Stacktrace { + val.Stacktrace[i].Reset() + } + val.Stacktrace = val.Stacktrace[:0] + val.Type.Reset() +} + +func (val *errorException) validate() error { + if !val.IsSet() { + return nil + } + switch t := val.Code.Val.(type) { + case string: + if utf8.RuneCountInString(t) > 1024 { + return fmt.Errorf("'code': validation rule 'maxLength(1024)' violated") + } + case int: + case json.Number: + if _, err := t.Int64(); err != nil { + return fmt.Errorf("'code': validation rule 'inputTypes(string;int)' violated") + } + case nil: + default: + return fmt.Errorf("'code': validation rule 'inputTypes(string;int)' violated ") + } + for _, elem := range val.Cause { + if err := elem.validate(); err != nil { + return errors.Wrapf(err, "cause") + } + } + if val.Module.IsSet() && utf8.RuneCountInString(val.Module.Val) > 1024 { + return fmt.Errorf("'module': validation rule 'maxLength(1024)' violated") + } + for _, elem := range val.Stacktrace { + if err := elem.validate(); err != nil { + return errors.Wrapf(err, "stacktrace") + } + } + if val.Type.IsSet() && utf8.RuneCountInString(val.Type.Val) > 1024 { + return fmt.Errorf("'type': validation rule 'maxLength(1024)' violated") + } + if !val.Message.IsSet() && !val.Type.IsSet() { + return fmt.Errorf("requires at least one of the fields 'message;type'") + } + return nil +} + +func (val *stacktraceFrame) IsSet() bool { + return val.AbsPath.IsSet() || val.Classname.IsSet() || val.ColumnNumber.IsSet() || val.ContextLine.IsSet() || val.Filename.IsSet() || val.Function.IsSet() || val.LibraryFrame.IsSet() || val.LineNumber.IsSet() || val.Module.IsSet() || (len(val.PostContext) > 0) || (len(val.PreContext) > 0) || (len(val.Vars) > 0) +} + +func (val *stacktraceFrame) Reset() { + val.AbsPath.Reset() + val.Classname.Reset() + val.ColumnNumber.Reset() + val.ContextLine.Reset() + val.Filename.Reset() + val.Function.Reset() + val.LibraryFrame.Reset() + val.LineNumber.Reset() + val.Module.Reset() + val.PostContext = val.PostContext[:0] + val.PreContext = val.PreContext[:0] + for k := range val.Vars { + delete(val.Vars, k) + } +} + +func (val *stacktraceFrame) validate() error { + if !val.IsSet() { + return nil + } + if !val.Classname.IsSet() && !val.Filename.IsSet() { + return fmt.Errorf("requires at least one of the fields 'classname;filename'") + } + return nil +} + +func (val *errorLog) IsSet() bool { + return val.Level.IsSet() || val.LoggerName.IsSet() || val.Message.IsSet() || val.ParamMessage.IsSet() || (len(val.Stacktrace) > 0) +} + +func (val *errorLog) Reset() { + val.Level.Reset() + val.LoggerName.Reset() + val.Message.Reset() + val.ParamMessage.Reset() + for i := range val.Stacktrace { + val.Stacktrace[i].Reset() + } + val.Stacktrace = val.Stacktrace[:0] +} + +func (val *errorLog) validate() error { + if !val.IsSet() { + return nil + } + if val.Level.IsSet() && utf8.RuneCountInString(val.Level.Val) > 1024 { + return fmt.Errorf("'level': validation rule 'maxLength(1024)' violated") + } + if val.LoggerName.IsSet() && utf8.RuneCountInString(val.LoggerName.Val) > 1024 { + return fmt.Errorf("'logger_name': validation rule 'maxLength(1024)' violated") + } + if !val.Message.IsSet() { + return fmt.Errorf("'message' required") + } + if val.ParamMessage.IsSet() && utf8.RuneCountInString(val.ParamMessage.Val) > 1024 { + return fmt.Errorf("'param_message': validation rule 'maxLength(1024)' violated") + } + for _, elem := range val.Stacktrace { + if err := elem.validate(); err != nil { + return errors.Wrapf(err, "stacktrace") + } + } + return nil +} + +func (val *errorTransactionRef) IsSet() bool { + return val.Sampled.IsSet() || val.Type.IsSet() +} + +func (val *errorTransactionRef) Reset() { + val.Sampled.Reset() + val.Type.Reset() +} + +func (val *errorTransactionRef) validate() error { + if !val.IsSet() { + return nil + } + if val.Type.IsSet() && utf8.RuneCountInString(val.Type.Val) > 1024 { + return fmt.Errorf("'type': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *metricsetRoot) IsSet() bool { + return val.Metricset.IsSet() +} + +func (val *metricsetRoot) Reset() { + val.Metricset.Reset() +} + +func (val *metricsetRoot) validate() error { + if err := val.Metricset.validate(); err != nil { + return errors.Wrapf(err, "metricset") + } + if !val.Metricset.IsSet() { + return fmt.Errorf("'metricset' required") + } + return nil +} + +func (val *metricset) IsSet() bool { + return val.Timestamp.IsSet() || (len(val.Samples) > 0) || val.Span.IsSet() || (len(val.Tags) > 0) || val.Transaction.IsSet() +} + +func (val *metricset) Reset() { + val.Timestamp.Reset() + for k := range val.Samples { + delete(val.Samples, k) + } + val.Span.Reset() + for k := range val.Tags { + delete(val.Tags, k) + } + val.Transaction.Reset() +} + +func (val *metricset) validate() error { + if !val.IsSet() { + return nil + } + if len(val.Samples) == 0 { + return fmt.Errorf("'samples' required") + } + for k, v := range val.Samples { + if err := v.validate(); err != nil { + return errors.Wrapf(err, "samples") + } + if k != "" && !patternNoAsteriskQuoteRegexp.MatchString(k) { + return fmt.Errorf("'samples': validation rule 'patternKeys(patternNoAsteriskQuote)' violated") + } + } + if err := val.Span.validate(); err != nil { + return errors.Wrapf(err, "span") + } + for k, v := range val.Tags { + switch t := v.(type) { + case nil: + case string: + if utf8.RuneCountInString(t) > 1024 { + return fmt.Errorf("'tags': validation rule 'maxLengthVals(1024)' violated") + } + case bool: + case json.Number: + default: + return fmt.Errorf("'tags': validation rule 'inputTypesVals(string;bool;number)' violated for key %s", k) + } + } + if err := val.Transaction.validate(); err != nil { + return errors.Wrapf(err, "transaction") + } + return nil +} + +func (val *metricsetSampleValue) IsSet() bool { + return val.Type.IsSet() || val.Unit.IsSet() || val.Value.IsSet() || (len(val.Values) > 0) || (len(val.Counts) > 0) +} + +func (val *metricsetSampleValue) Reset() { + val.Type.Reset() + val.Unit.Reset() + val.Value.Reset() + val.Values = val.Values[:0] + val.Counts = val.Counts[:0] +} + +func (val *metricsetSampleValue) validate() error { + if !val.IsSet() { + return nil + } + if !(len(val.Values) > 0) { + if len(val.Counts) > 0 { + return fmt.Errorf("'values' required when 'counts' is set") + } + } + for _, elem := range val.Counts { + if elem < 0 { + return fmt.Errorf("'counts': validation rule 'minVals(0)' violated") + } + } + if !(len(val.Counts) > 0) { + if len(val.Values) > 0 { + return fmt.Errorf("'counts' required when 'values' is set") + } + } + if !val.Value.IsSet() && !(len(val.Values) > 0) { + return fmt.Errorf("requires at least one of the fields 'value;values'") + } + return nil +} + +func (val *metricsetSpanRef) IsSet() bool { + return val.Subtype.IsSet() || val.Type.IsSet() +} + +func (val *metricsetSpanRef) Reset() { + val.Subtype.Reset() + val.Type.Reset() +} + +func (val *metricsetSpanRef) validate() error { + if !val.IsSet() { + return nil + } + if val.Subtype.IsSet() && utf8.RuneCountInString(val.Subtype.Val) > 1024 { + return fmt.Errorf("'subtype': validation rule 'maxLength(1024)' violated") + } + if val.Type.IsSet() && utf8.RuneCountInString(val.Type.Val) > 1024 { + return fmt.Errorf("'type': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *metricsetTransactionRef) IsSet() bool { + return val.Name.IsSet() || val.Type.IsSet() +} + +func (val *metricsetTransactionRef) Reset() { + val.Name.Reset() + val.Type.Reset() +} + +func (val *metricsetTransactionRef) validate() error { + if !val.IsSet() { + return nil + } + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'name': validation rule 'maxLength(1024)' violated") + } + if val.Type.IsSet() && utf8.RuneCountInString(val.Type.Val) > 1024 { + return fmt.Errorf("'type': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *spanRoot) IsSet() bool { + return val.Span.IsSet() +} + +func (val *spanRoot) Reset() { + val.Span.Reset() +} + +func (val *spanRoot) validate() error { + if err := val.Span.validate(); err != nil { + return errors.Wrapf(err, "span") + } + if !val.Span.IsSet() { + return fmt.Errorf("'span' required") + } + return nil +} + +func (val *span) IsSet() bool { + return val.Action.IsSet() || (len(val.ChildIDs) > 0) || val.Composite.IsSet() || val.Context.IsSet() || val.Duration.IsSet() || val.ID.IsSet() || val.Name.IsSet() || val.Outcome.IsSet() || val.ParentID.IsSet() || val.SampleRate.IsSet() || (len(val.Stacktrace) > 0) || val.Start.IsSet() || val.Subtype.IsSet() || val.Sync.IsSet() || val.Timestamp.IsSet() || val.TraceID.IsSet() || val.TransactionID.IsSet() || val.Type.IsSet() +} + +func (val *span) Reset() { + val.Action.Reset() + val.ChildIDs = val.ChildIDs[:0] + val.Composite.Reset() + val.Context.Reset() + val.Duration.Reset() + val.ID.Reset() + val.Name.Reset() + val.Outcome.Reset() + val.ParentID.Reset() + val.SampleRate.Reset() + for i := range val.Stacktrace { + val.Stacktrace[i].Reset() + } + val.Stacktrace = val.Stacktrace[:0] + val.Start.Reset() + val.Subtype.Reset() + val.Sync.Reset() + val.Timestamp.Reset() + val.TraceID.Reset() + val.TransactionID.Reset() + val.Type.Reset() +} + +func (val *span) validate() error { + if !val.IsSet() { + return nil + } + if val.Action.IsSet() && utf8.RuneCountInString(val.Action.Val) > 1024 { + return fmt.Errorf("'action': validation rule 'maxLength(1024)' violated") + } + for _, elem := range val.ChildIDs { + if utf8.RuneCountInString(elem) > 1024 { + return fmt.Errorf("'child_ids': validation rule 'maxLength(1024)' violated") + } + } + if err := val.Composite.validate(); err != nil { + return errors.Wrapf(err, "composite") + } + if err := val.Context.validate(); err != nil { + return errors.Wrapf(err, "context") + } + if val.Duration.IsSet() && val.Duration.Val < 0 { + return fmt.Errorf("'duration': validation rule 'min(0)' violated") + } + if !val.Duration.IsSet() { + return fmt.Errorf("'duration' required") + } + if val.ID.IsSet() && utf8.RuneCountInString(val.ID.Val) > 1024 { + return fmt.Errorf("'id': validation rule 'maxLength(1024)' violated") + } + if !val.ID.IsSet() { + return fmt.Errorf("'id' required") + } + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'name': validation rule 'maxLength(1024)' violated") + } + if !val.Name.IsSet() { + return fmt.Errorf("'name' required") + } + if val.Outcome.Val != "" { + var matchEnum bool + for _, s := range enumOutcome { + if val.Outcome.Val == s { + matchEnum = true + break + } + } + if !matchEnum { + return fmt.Errorf("'outcome': validation rule 'enum(enumOutcome)' violated") + } + } + if val.ParentID.IsSet() && utf8.RuneCountInString(val.ParentID.Val) > 1024 { + return fmt.Errorf("'parent_id': validation rule 'maxLength(1024)' violated") + } + if !val.ParentID.IsSet() { + return fmt.Errorf("'parent_id' required") + } + for _, elem := range val.Stacktrace { + if err := elem.validate(); err != nil { + return errors.Wrapf(err, "stacktrace") + } + } + if val.Subtype.IsSet() && utf8.RuneCountInString(val.Subtype.Val) > 1024 { + return fmt.Errorf("'subtype': validation rule 'maxLength(1024)' violated") + } + if val.TraceID.IsSet() && utf8.RuneCountInString(val.TraceID.Val) > 1024 { + return fmt.Errorf("'trace_id': validation rule 'maxLength(1024)' violated") + } + if !val.TraceID.IsSet() { + return fmt.Errorf("'trace_id' required") + } + if val.TransactionID.IsSet() && utf8.RuneCountInString(val.TransactionID.Val) > 1024 { + return fmt.Errorf("'transaction_id': validation rule 'maxLength(1024)' violated") + } + if val.Type.IsSet() && utf8.RuneCountInString(val.Type.Val) > 1024 { + return fmt.Errorf("'type': validation rule 'maxLength(1024)' violated") + } + if !val.Type.IsSet() { + return fmt.Errorf("'type' required") + } + if !val.Start.IsSet() && !val.Timestamp.IsSet() { + return fmt.Errorf("requires at least one of the fields 'start;timestamp'") + } + return nil +} + +func (val *spanComposite) IsSet() bool { + return val.Count.IsSet() || val.Sum.IsSet() || val.CompressionStrategy.IsSet() +} + +func (val *spanComposite) Reset() { + val.Count.Reset() + val.Sum.Reset() + val.CompressionStrategy.Reset() +} + +func (val *spanComposite) validate() error { + if !val.IsSet() { + return nil + } + if val.Count.IsSet() && val.Count.Val < 2 { + return fmt.Errorf("'count': validation rule 'min(2)' violated") + } + if !val.Count.IsSet() { + return fmt.Errorf("'count' required") + } + if val.Sum.IsSet() && val.Sum.Val < 0 { + return fmt.Errorf("'sum': validation rule 'min(0)' violated") + } + if !val.Sum.IsSet() { + return fmt.Errorf("'sum' required") + } + if !val.CompressionStrategy.IsSet() { + return fmt.Errorf("'compression_strategy' required") + } + return nil +} + +func (val *spanContext) IsSet() bool { + return val.Database.IsSet() || val.Destination.IsSet() || val.HTTP.IsSet() || val.Message.IsSet() || val.Service.IsSet() || (len(val.Tags) > 0) +} + +func (val *spanContext) Reset() { + val.Database.Reset() + val.Destination.Reset() + val.HTTP.Reset() + val.Message.Reset() + val.Service.Reset() + for k := range val.Tags { + delete(val.Tags, k) + } +} + +func (val *spanContext) validate() error { + if !val.IsSet() { + return nil + } + if err := val.Database.validate(); err != nil { + return errors.Wrapf(err, "db") + } + if err := val.Destination.validate(); err != nil { + return errors.Wrapf(err, "destination") + } + if err := val.HTTP.validate(); err != nil { + return errors.Wrapf(err, "http") + } + if err := val.Message.validate(); err != nil { + return errors.Wrapf(err, "message") + } + if err := val.Service.validate(); err != nil { + return errors.Wrapf(err, "service") + } + for k, v := range val.Tags { + switch t := v.(type) { + case nil: + case string: + if utf8.RuneCountInString(t) > 1024 { + return fmt.Errorf("'tags': validation rule 'maxLengthVals(1024)' violated") + } + case bool: + case json.Number: + default: + return fmt.Errorf("'tags': validation rule 'inputTypesVals(string;bool;number)' violated for key %s", k) + } + } + return nil +} + +func (val *spanContextDatabase) IsSet() bool { + return val.Instance.IsSet() || val.Link.IsSet() || val.RowsAffected.IsSet() || val.Statement.IsSet() || val.Type.IsSet() || val.User.IsSet() +} + +func (val *spanContextDatabase) Reset() { + val.Instance.Reset() + val.Link.Reset() + val.RowsAffected.Reset() + val.Statement.Reset() + val.Type.Reset() + val.User.Reset() +} + +func (val *spanContextDatabase) validate() error { + if !val.IsSet() { + return nil + } + if val.Link.IsSet() && utf8.RuneCountInString(val.Link.Val) > 1024 { + return fmt.Errorf("'link': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *spanContextDestination) IsSet() bool { + return val.Address.IsSet() || val.Port.IsSet() || val.Service.IsSet() +} + +func (val *spanContextDestination) Reset() { + val.Address.Reset() + val.Port.Reset() + val.Service.Reset() +} + +func (val *spanContextDestination) validate() error { + if !val.IsSet() { + return nil + } + if val.Address.IsSet() && utf8.RuneCountInString(val.Address.Val) > 1024 { + return fmt.Errorf("'address': validation rule 'maxLength(1024)' violated") + } + if err := val.Service.validate(); err != nil { + return errors.Wrapf(err, "service") + } + return nil +} + +func (val *spanContextDestinationService) IsSet() bool { + return val.Name.IsSet() || val.Resource.IsSet() || val.Type.IsSet() +} + +func (val *spanContextDestinationService) Reset() { + val.Name.Reset() + val.Resource.Reset() + val.Type.Reset() +} + +func (val *spanContextDestinationService) validate() error { + if !val.IsSet() { + return nil + } + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'name': validation rule 'maxLength(1024)' violated") + } + if val.Resource.IsSet() && utf8.RuneCountInString(val.Resource.Val) > 1024 { + return fmt.Errorf("'resource': validation rule 'maxLength(1024)' violated") + } + if !val.Resource.IsSet() { + return fmt.Errorf("'resource' required") + } + if val.Type.IsSet() && utf8.RuneCountInString(val.Type.Val) > 1024 { + return fmt.Errorf("'type': validation rule 'maxLength(1024)' violated") + } + return nil +} + +func (val *spanContextHTTP) IsSet() bool { + return val.Method.IsSet() || val.Response.IsSet() || val.StatusCode.IsSet() || val.URL.IsSet() +} + +func (val *spanContextHTTP) Reset() { + val.Method.Reset() + val.Response.Reset() + val.StatusCode.Reset() + val.URL.Reset() +} + +func (val *spanContextHTTP) validate() error { + if !val.IsSet() { + return nil + } + if val.Method.IsSet() && utf8.RuneCountInString(val.Method.Val) > 1024 { + return fmt.Errorf("'method': validation rule 'maxLength(1024)' violated") + } + if err := val.Response.validate(); err != nil { + return errors.Wrapf(err, "response") + } + return nil +} + +func (val *spanContextHTTPResponse) IsSet() bool { + return val.DecodedBodySize.IsSet() || val.EncodedBodySize.IsSet() || val.Headers.IsSet() || val.StatusCode.IsSet() || val.TransferSize.IsSet() +} + +func (val *spanContextHTTPResponse) Reset() { + val.DecodedBodySize.Reset() + val.EncodedBodySize.Reset() + val.Headers.Reset() + val.StatusCode.Reset() + val.TransferSize.Reset() +} + +func (val *spanContextHTTPResponse) validate() error { + if !val.IsSet() { + return nil + } + return nil +} + +func (val *transactionRoot) IsSet() bool { + return val.Transaction.IsSet() +} + +func (val *transactionRoot) Reset() { + val.Transaction.Reset() +} + +func (val *transactionRoot) validate() error { + if err := val.Transaction.validate(); err != nil { + return errors.Wrapf(err, "transaction") + } + if !val.Transaction.IsSet() { + return fmt.Errorf("'transaction' required") + } + return nil +} + +func (val *transaction) IsSet() bool { + return val.Context.IsSet() || val.Duration.IsSet() || val.ID.IsSet() || val.Marks.IsSet() || val.Name.IsSet() || val.Outcome.IsSet() || val.ParentID.IsSet() || val.Result.IsSet() || val.Sampled.IsSet() || val.SampleRate.IsSet() || val.Session.IsSet() || val.SpanCount.IsSet() || val.Timestamp.IsSet() || val.TraceID.IsSet() || val.Type.IsSet() || val.UserExperience.IsSet() +} + +func (val *transaction) Reset() { + val.Context.Reset() + val.Duration.Reset() + val.ID.Reset() + val.Marks.Reset() + val.Name.Reset() + val.Outcome.Reset() + val.ParentID.Reset() + val.Result.Reset() + val.Sampled.Reset() + val.SampleRate.Reset() + val.Session.Reset() + val.SpanCount.Reset() + val.Timestamp.Reset() + val.TraceID.Reset() + val.Type.Reset() + val.UserExperience.Reset() +} + +func (val *transaction) validate() error { + if !val.IsSet() { + return nil + } + if err := val.Context.validate(); err != nil { + return errors.Wrapf(err, "context") + } + if val.Duration.IsSet() && val.Duration.Val < 0 { + return fmt.Errorf("'duration': validation rule 'min(0)' violated") + } + if !val.Duration.IsSet() { + return fmt.Errorf("'duration' required") + } + if val.ID.IsSet() && utf8.RuneCountInString(val.ID.Val) > 1024 { + return fmt.Errorf("'id': validation rule 'maxLength(1024)' violated") + } + if !val.ID.IsSet() { + return fmt.Errorf("'id' required") + } + if err := val.Marks.validate(); err != nil { + return errors.Wrapf(err, "marks") + } + if val.Name.IsSet() && utf8.RuneCountInString(val.Name.Val) > 1024 { + return fmt.Errorf("'name': validation rule 'maxLength(1024)' violated") + } + if val.Outcome.Val != "" { + var matchEnum bool + for _, s := range enumOutcome { + if val.Outcome.Val == s { + matchEnum = true + break + } + } + if !matchEnum { + return fmt.Errorf("'outcome': validation rule 'enum(enumOutcome)' violated") + } + } + if val.ParentID.IsSet() && utf8.RuneCountInString(val.ParentID.Val) > 1024 { + return fmt.Errorf("'parent_id': validation rule 'maxLength(1024)' violated") + } + if val.Result.IsSet() && utf8.RuneCountInString(val.Result.Val) > 1024 { + return fmt.Errorf("'result': validation rule 'maxLength(1024)' violated") + } + if err := val.Session.validate(); err != nil { + return errors.Wrapf(err, "session") + } + if err := val.SpanCount.validate(); err != nil { + return errors.Wrapf(err, "span_count") + } + if !val.SpanCount.IsSet() { + return fmt.Errorf("'span_count' required") + } + if val.TraceID.IsSet() && utf8.RuneCountInString(val.TraceID.Val) > 1024 { + return fmt.Errorf("'trace_id': validation rule 'maxLength(1024)' violated") + } + if !val.TraceID.IsSet() { + return fmt.Errorf("'trace_id' required") + } + if val.Type.IsSet() && utf8.RuneCountInString(val.Type.Val) > 1024 { + return fmt.Errorf("'type': validation rule 'maxLength(1024)' violated") + } + if !val.Type.IsSet() { + return fmt.Errorf("'type' required") + } + if err := val.UserExperience.validate(); err != nil { + return errors.Wrapf(err, "experience") + } + return nil +} + +func (val *transactionMarks) IsSet() bool { + return (len(val.Events) > 0) +} + +func (val *transactionMarks) Reset() { + for k := range val.Events { + delete(val.Events, k) + } +} + +func (val *transactionMarks) validate() error { + if !val.IsSet() { + return nil + } + return nil +} + +func (val *transactionMarkEvents) IsSet() bool { + return (len(val.Measurements) > 0) +} + +func (val *transactionMarkEvents) Reset() { + for k := range val.Measurements { + delete(val.Measurements, k) + } +} + +func (val *transactionMarkEvents) validate() error { + if !val.IsSet() { + return nil + } + return nil +} + +func (val *transactionSession) IsSet() bool { + return val.ID.IsSet() || val.Sequence.IsSet() +} + +func (val *transactionSession) Reset() { + val.ID.Reset() + val.Sequence.Reset() +} + +func (val *transactionSession) validate() error { + if !val.IsSet() { + return nil + } + if val.ID.IsSet() && utf8.RuneCountInString(val.ID.Val) > 1024 { + return fmt.Errorf("'id': validation rule 'maxLength(1024)' violated") + } + if !val.ID.IsSet() { + return fmt.Errorf("'id' required") + } + if val.Sequence.IsSet() && val.Sequence.Val < 1 { + return fmt.Errorf("'sequence': validation rule 'min(1)' violated") + } + return nil +} + +func (val *transactionSpanCount) IsSet() bool { + return val.Dropped.IsSet() || val.Started.IsSet() +} + +func (val *transactionSpanCount) Reset() { + val.Dropped.Reset() + val.Started.Reset() +} + +func (val *transactionSpanCount) validate() error { + if !val.IsSet() { + return nil + } + if !val.Started.IsSet() { + return fmt.Errorf("'started' required") + } + return nil +} + +func (val *transactionUserExperience) IsSet() bool { + return val.CumulativeLayoutShift.IsSet() || val.FirstInputDelay.IsSet() || val.Longtask.IsSet() || val.TotalBlockingTime.IsSet() +} + +func (val *transactionUserExperience) Reset() { + val.CumulativeLayoutShift.Reset() + val.FirstInputDelay.Reset() + val.Longtask.Reset() + val.TotalBlockingTime.Reset() +} + +func (val *transactionUserExperience) validate() error { + if !val.IsSet() { + return nil + } + if val.CumulativeLayoutShift.IsSet() && val.CumulativeLayoutShift.Val < 0 { + return fmt.Errorf("'cls': validation rule 'min(0)' violated") + } + if val.FirstInputDelay.IsSet() && val.FirstInputDelay.Val < 0 { + return fmt.Errorf("'fid': validation rule 'min(0)' violated") + } + if err := val.Longtask.validate(); err != nil { + return errors.Wrapf(err, "longtask") + } + if val.TotalBlockingTime.IsSet() && val.TotalBlockingTime.Val < 0 { + return fmt.Errorf("'tbt': validation rule 'min(0)' violated") + } + return nil +} + +func (val *longtaskMetrics) IsSet() bool { + return val.Count.IsSet() || val.Max.IsSet() || val.Sum.IsSet() +} + +func (val *longtaskMetrics) Reset() { + val.Count.Reset() + val.Max.Reset() + val.Sum.Reset() +} + +func (val *longtaskMetrics) validate() error { + if !val.IsSet() { + return nil + } + if val.Count.IsSet() && val.Count.Val < 0 { + return fmt.Errorf("'count': validation rule 'min(0)' violated") + } + if !val.Count.IsSet() { + return fmt.Errorf("'count' required") + } + if val.Max.IsSet() && val.Max.Val < 0 { + return fmt.Errorf("'max': validation rule 'min(0)' violated") + } + if !val.Max.IsSet() { + return fmt.Errorf("'max' required") } - if utf8.RuneCountInString(m.Email.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.user.email'") + if val.Sum.IsSet() && val.Sum.Val < 0 { + return fmt.Errorf("'sum': validation rule 'min(0)' violated") } - if utf8.RuneCountInString(m.Name.Val) > 1024 { - return fmt.Errorf("validation rule 'max(1024)' violated for 'metadata.user.username'") + if !val.Sum.IsSet() { + return fmt.Errorf("'sum' required") } return nil } diff --git a/model/modeldecoder/v2/model_test.go b/model/modeldecoder/v2/model_test.go index 936bd9746fe..8a735ac3caf 100644 --- a/model/modeldecoder/v2/model_test.go +++ b/model/modeldecoder/v2/model_test.go @@ -18,10 +18,11 @@ package v2 import ( - "bytes" - "encoding/json" + "fmt" "io" "os" + "path/filepath" + "regexp" "strings" "testing" @@ -30,180 +31,629 @@ import ( "github.com/elastic/apm-server/decoder" "github.com/elastic/apm-server/model/modeldecoder/modeldecodertest" + "github.com/elastic/apm-server/model/modeldecoder/nullable" + "github.com/elastic/beats/v7/libbeat/common" ) -func testdata(t *testing.T) io.Reader { - r, err := os.Open("../../../testdata/intake-v2/metadata.ndjson") - require.NoError(t, err) - return r +// +// Test Validation rules +// + +func TestUserValidationRules(t *testing.T) { + testcases := []testcase{ + {name: "id-string", data: `{"id":"user123"}`}, + {name: "id-int", data: `{"id":44}`}, + {name: "id-float", errorKey: "inputTypes", data: `{"id":45.6}`}, + {name: "id-bool", errorKey: "inputTypes", data: `{"id":true}`}, + {name: "id-string-max-len", data: `{"id":"` + modeldecodertest.BuildString(1024) + `"}`}, + {name: "id-string-max-len-exceeded", errorKey: "max", data: `{"id":"` + modeldecodertest.BuildString(1025) + `"}`}, + } + testValidation(t, "metadata", testcases, "user") + testValidation(t, "transaction", testcases, "context", "user") } -func TestIsSet(t *testing.T) { - inp := `{"cloud":{"availability_zone":"eu-west-3","instance":{"id":"1234"}}}` - var m metadata - require.NoError(t, decoder.NewJSONIteratorDecoder(strings.NewReader(inp)).Decode(&m)) - assert.True(t, m.IsSet()) - assert.True(t, m.Cloud.IsSet()) - assert.True(t, m.Cloud.AvailabilityZone.IsSet()) - assert.True(t, m.Cloud.Instance.ID.IsSet()) - assert.False(t, m.Cloud.Instance.Name.IsSet()) -} - -func TestSetReset(t *testing.T) { - var m metadataRoot - require.NoError(t, decoder.NewJSONIteratorDecoder(testdata(t)).Decode(&m)) - require.True(t, m.IsSet()) - require.True(t, m.Metadata.Cloud.IsSet()) - require.NotEmpty(t, m.Metadata.Labels) - require.True(t, m.Metadata.Process.IsSet()) - require.True(t, m.Metadata.Service.IsSet()) - require.True(t, m.Metadata.System.IsSet()) - require.True(t, m.Metadata.User.IsSet()) - // call Reset and ensure initial state, except for array capacity - m.Reset() - assert.False(t, m.IsSet()) - assert.Equal(t, metadataCloud{}, m.Metadata.Cloud) - assert.Equal(t, metadataService{}, m.Metadata.Service) - assert.Equal(t, metadataSystem{}, m.Metadata.System) - assert.Equal(t, metadataUser{}, m.Metadata.User) - assert.Empty(t, m.Metadata.Labels) - assert.Empty(t, m.Metadata.Process.Pid) - assert.Empty(t, m.Metadata.Process.Ppid) - assert.Empty(t, m.Metadata.Process.Title) - // test that array len is set to zero, but not capacity - assert.Empty(t, m.Metadata.Process.Argv) - assert.Greater(t, cap(m.Metadata.Process.Argv), 0) -} - -func TestValidationRules(t *testing.T) { - type testcase struct { - name string - errorKey string - data string - } - - strBuilder := func(n int) string { - b := make([]rune, n) - for i := range b { - b[i] = '⌘' - } - return string(b) - } - - testMetadata := func(t *testing.T, key string, tc testcase) { - // load data - // set testcase data for given key - var data map[string]interface{} - require.NoError(t, decoder.NewJSONIteratorDecoder(testdata(t)).Decode(&data)) - meta := data["metadata"].(map[string]interface{}) - var keyData map[string]interface{} - require.NoError(t, json.Unmarshal([]byte(tc.data), &keyData)) - meta[key] = keyData - - // unmarshal data into metdata struct - var m metadata - b, err := json.Marshal(meta) - require.NoError(t, err) - require.NoError(t, decoder.NewJSONIteratorDecoder(bytes.NewReader(b)).Decode(&m)) - // run validation and checks - err = m.validate() - if tc.errorKey == "" { - assert.NoError(t, err) - } else { - require.Error(t, err) - assert.Contains(t, err.Error(), tc.errorKey) - } +func TestServiceValidationRules(t *testing.T) { + testcases := []testcase{ + {name: "service-name-az", data: `{"agent":{"name":"go","version":"1.0"},"name":"abcdefghijklmnopqrstuvwxyz"}`}, + {name: "service-name-AZ", data: `{"agent":{"name":"go","version":"1.0"},"name":"ABCDEFGHIJKLMNOPQRSTUVWXYZ"}`}, + {name: "service-name-09 _-", data: `{"agent":{"name":"go","version":"1.0"},"name":"0123456789 -_"}`}, + {name: "service-name-invalid", errorKey: "patternAlphaNumericExt", + data: `{"agent":{"name":"go","version":"1.0"},"name":"⌘"}`}, + {name: "service-name-max", data: `{"agent":{"name":"go","version":"1.0"},"name":"` + modeldecodertest.BuildStringWith(1024, '-') + `"}`}, + {name: "service-name-max-exceeded", errorKey: "max", + data: `{"agent":{"name":"go","version":"1.0"},"name":"` + modeldecodertest.BuildStringWith(1025, '-') + `"}`}, + } + testValidation(t, "metadata", testcases, "service") + testValidation(t, "transaction", testcases, "context", "service") + testValidation(t, "span", testcases, "context", "service") +} + +func TestLabelValidationRules(t *testing.T) { + testcases := []testcase{ + {name: "valid", data: `{"k1\".*":"v1.s*\"","k2":2.3,"k3":3,"k4":true,"k5":null}`}, + {name: "restricted-type", errorKey: "inputTypesVals", data: `{"k1":{"k2":"v1"}}`}, + {name: "restricted-type-slice", errorKey: "inputTypesVals", data: `{"k1":{"v1":[1,2,3]}}`}, + {name: "max-len", data: `{"k1":"` + modeldecodertest.BuildString(1024) + `"}`}, + {name: "max-len-exceeded", errorKey: "maxLengthVals", data: `{"k1":"` + modeldecodertest.BuildString(1025) + `"}`}, + } + testValidation(t, "metadata", testcases, "labels") + testValidation(t, "transaction", testcases, "context", "tags") + testValidation(t, "span", testcases, "context", "tags") + testValidation(t, "metricset", testcases, "tags") +} + +func TestSamplesValidationRules(t *testing.T) { + testcases := []testcase{ + {name: "valid", data: `{"k.1\\":{"value": 34.5},"k.2.a":{"value":5}}`}, + {name: "key-asterisk", errorKey: "patternNoAsteriskQuote", data: `{"k1*":{"value": 34.5},"k.2.a":{"value":5}}`}, + {name: "key-quotemark", errorKey: "patternNoAsteriskQuote", data: `{"k1\"":{"value": 34.5}}`}, + } + testValidation(t, "metricset", testcases, "samples") +} + +func TestMaxLenValidationRules(t *testing.T) { + // this tests an arbitrary field to ensure the `max` rule on strings works as expected + testcases := []testcase{ + {name: "title-max-len", data: `{"pid":1,"title":"` + modeldecodertest.BuildString(1024) + `"}`}, + {name: "title-max-len-exceeded", errorKey: "max", + data: `{"pid":1,"title":"` + modeldecodertest.BuildString(1025) + `"}`}, } + testValidation(t, "metadata", testcases, "process") +} - t.Run("user", func(t *testing.T) { - for _, tc := range []testcase{ - {name: "id-string", data: `{"id":"user123"}`}, - {name: "id-int", data: `{"id":44}`}, - {name: "id-float", errorKey: "types", data: `{"id":45.6}`}, - {name: "id-bool", errorKey: "types", data: `{"id":true}`}, - {name: "id-string-max-len", data: `{"id":"` + strBuilder(1024) + `"}`}, - {name: "id-string-max-len", errorKey: "max", data: `{"id":"` + strBuilder(1025) + `"}`}, - } { - t.Run(tc.name, func(t *testing.T) { - testMetadata(t, "user", tc) - }) +func TestContextValidationRules(t *testing.T) { + t.Run("custom", func(t *testing.T) { + testcases := []testcase{ + {name: "custom", data: `{"custom":{"k\\1":{"v.1":123,"v*2":"value\\"},"k\"2":34,"k3":[{"a.1":1,"b*\"":2}]}}`}, } + testValidation(t, "transaction", testcases, "context") }) - t.Run("service", func(t *testing.T) { - for _, tc := range []testcase{ - {name: "name-valid-lower", data: `"name":"abcdefghijklmnopqrstuvwxyz"`}, - {name: "name-valid-upper", data: `"name":"ABCDEFGHIJKLMNOPQRSTUVWXYZ"`}, - {name: "name-valid-digits", data: `"name":"0123456789"`}, - {name: "name-valid-special", data: `"name":"_ -"`}, - {name: "name-asterisk", errorKey: "service.name", data: `"name":"abc*"`}, - {name: "name-dot", errorKey: "service.name", data: `"name":"abc."`}, - } { - t.Run(tc.name, func(t *testing.T) { - tc.data = `{"agent":{"name":"go","version":"1.0"},` + tc.data + `}` - testMetadata(t, "service", tc) - }) + t.Run("request", func(t *testing.T) { + testcases := []testcase{ + {name: "request-body-string", data: `{"request":{"method":"get","body":"value"}}`}, + {name: "request-body-object", data: `{"request":{"method":"get","body":{"a":"b"}}}`}, + {name: "request-body-array", errorKey: "body", + data: `{"request":{"method":"get","body":[1,2]}}`}, } + testValidation(t, "transaction", testcases, "context") }) +} + +func TestDurationValidationRules(t *testing.T) { + testcases := []testcase{ + {name: "duration", data: `0.0`}, + {name: "duration", errorKey: "min", data: `-0.09`}, + } + testValidation(t, "transaction", testcases, "duration") +} + +func TestMarksValidationRules(t *testing.T) { + testcases := []testcase{ + {name: "marks", data: `{"k.1*\\\"":{"v.1*\\\"":12.3}}`}, + } + testValidation(t, "transaction", testcases, "marks") +} + +func TestOutcomeValidationRules(t *testing.T) { + testcases := []testcase{ + {name: "outcome-success", data: `"success"`}, + {name: "outcome-failure", data: `"failure"`}, + {name: "outcome-unknown", data: `"unknown"`}, + {name: "outcome-invalid", errorKey: "enum", data: `"anything"`}, + } + testValidation(t, "transaction", testcases, "outcome") + testValidation(t, "span", testcases, "outcome") +} - t.Run("labels", func(t *testing.T) { - for _, tc := range []testcase{ - {name: "valid", data: `{"k1":"v1","k2":2.3,"k3":3,"k4":true,"k5":null}`}, - {name: "restricted-type", errorKey: "typesVals", data: `{"k1":{"k2":"v1"}}`}, - {name: "key-dot", errorKey: "patternKeys", data: `{"k.1":"v1"}`}, - {name: "key-asterisk", errorKey: "patternKeys", data: `{"k*1":"v1"}`}, - {name: "key-quotemark", errorKey: "patternKeys", data: `{"k\"1":"v1"}`}, - {name: "max-len", data: `{"k1":"` + strBuilder(1024) + `"}`}, - {name: "max-len-exceeded", errorKey: "maxVals", data: `{"k1":"` + strBuilder(1025) + `"}`}, - } { - t.Run(tc.name, func(t *testing.T) { - testMetadata(t, "labels", tc) - }) +func TestURLValidationRules(t *testing.T) { + testcases := []testcase{ + {name: "port-string", data: `{"request":{"method":"get","url":{"port":"8200"}}}`}, + {name: "port-int", data: `{"request":{"method":"get","url":{"port":8200}}}`}, + {name: "port-invalid-string", errorKey: "targetType", + data: `{"request":{"method":"get","url":{"port":"invalid"}}}`}, + {name: "port-invalid-type", errorKey: "inputTypes", + data: `{"request":{"method":"get","url":{"port":[8200,8201]}}}`}, + {name: "port-invalid-type", errorKey: "inputTypes", + data: `{"request":{"method":"get","url":{"port":{"val":8200}}}}`}, + } + testValidation(t, "transaction", testcases, "context") +} + +// +// Test Reset() +// + +func TestReset(t *testing.T) { + addStr := func(s string) nullable.String { + ns := nullable.String{} + ns.Set(s) + return ns + } + decode := func(t *testing.T, inp string, out interface{}) { + require.NoError(t, decoder.NewJSONDecoder(strings.NewReader(inp)).Decode(&out)) + } + t.Run("struct", func(t *testing.T) { + var out metadataServiceNode + inputs := []string{`{"configured_name":"a"}`, `{"configured_name":"b"}`, `{}`} + expected := []metadataServiceNode{{Name: addStr("a")}, {Name: addStr("b")}, {}} + for i := 0; i < len(inputs); i++ { + out.Reset() + decode(t, inputs[i], &out) + assert.Equal(t, expected[i], out) } }) - - t.Run("max-len", func(t *testing.T) { - // check that `max` on strings is respected on an arbitrary field - for _, tc := range []testcase{ - {name: "title-max-len", data: `{"pid":1,"title":"` + strBuilder(1024) + `"}`}, - {name: "title-max-len-exceeded", errorKey: "max", - data: `{"pid":1,"title":"` + strBuilder(1025) + `"}`}, - } { - t.Run(tc.name, func(t *testing.T) { - testMetadata(t, "process", tc) - }) + t.Run("map", func(t *testing.T) { + var out metadata + inputs := []string{ + `{"labels":{"a":"1","b":"s","c":true}}}`, + `{"labels":{"a":"new"}}}`, + `{}`} + expected := []metadata{ + {Labels: common.MapStr{"a": "1", "b": "s", "c": true}}, + {Labels: common.MapStr{"a": "new"}}, + {Labels: common.MapStr{}}} + for i := 0; i < len(inputs); i++ { + out.Reset() + decode(t, inputs[i], &out) + assert.Equal(t, expected[i], out) + } + }) + t.Run("map-structs", func(t *testing.T) { + var out transaction + inputs := []string{ + `{"marks":{"group1":{"meas1":43.5,"meas2":23.5},"group2":{"a":1,"b":14}}}`, + `{"marks":{"group1":{"meas1":14}}}`, + `{}`, + } + expected := []transaction{ + {Marks: transactionMarks{Events: map[string]transactionMarkEvents{ + "group1": {Measurements: map[string]float64{"meas1": 43.5, "meas2": 23.5}}, + "group2": {Measurements: map[string]float64{"a": 1, "b": 14}}}}}, + {Marks: transactionMarks{Events: map[string]transactionMarkEvents{ + "group1": {Measurements: map[string]float64{"meas1": 14}}}}}, + {Marks: transactionMarks{Events: map[string]transactionMarkEvents{}}}} + for i := 0; i < len(inputs); i++ { + out.Reset() + decode(t, inputs[i], &out) + assert.Equal(t, expected[i], out) } }) + t.Run("slice", func(t *testing.T) { + var out metadataProcess + inputs := []string{ + `{"argv":["a","b"]}`, + `{"argv":["c"]}`, + `{}`} + expected := []metadataProcess{ + {Argv: []string{"a", "b"}}, + {Argv: []string{"c"}}, + {Argv: []string{}}} + for i := 0; i < len(inputs); i++ { + out.Reset() + decode(t, inputs[i], &out) + assert.Equal(t, expected[i], out) + } + }) + t.Run("slice-structs", func(t *testing.T) { + var out errorEvent + inputs := []string{ + `{"exception":{"message":"bang","cause":[{"message":"a","type":"runtime ex","cause":[{"message":"inner"}]},{"message":"b"}]},"log":{"message":"boom","stacktrace":[{"classname":"User::Common","filename":"a","post_context":["line4","line5"]},{"classname":"ABC","filename":"b"}]}}`, + `{"exception":{"message":"boom","cause":[{"message":"c","cause":[{"type":"a"}]}]},"log":{"message":"boom","stacktrace":[{"filename":"b"}]}}`, + `{}`} + expected := []errorEvent{ + {Exception: errorException{ + Message: addStr("bang"), + Cause: []errorException{ + {Message: addStr("a"), Type: addStr("runtime ex"), Cause: []errorException{{Message: addStr("inner")}}}, + {Message: addStr("b")}}}, + Log: errorLog{Message: addStr("boom"), Stacktrace: []stacktraceFrame{ + {Classname: addStr("User::Common"), Filename: addStr("a"), PostContext: []string{"line4", "line5"}}, + {Classname: addStr("ABC"), Filename: addStr("b")}}}}, + {Exception: errorException{ + Message: addStr("boom"), + Cause: []errorException{ + {Message: addStr("c"), Cause: []errorException{{Type: addStr("a")}}}}}, + Log: errorLog{Message: addStr("boom"), Stacktrace: []stacktraceFrame{ + {Filename: addStr("b"), PostContext: []string{}}}}}, + {Exception: errorException{Cause: []errorException{}}, Log: errorLog{Stacktrace: []stacktraceFrame{}}}} + for i := 0; i < len(inputs); i++ { + out.Reset() + decode(t, inputs[i], &out) + assert.Equal(t, expected[i], out) + } + }) +} + +// +// Test Required fields +// + +func TestErrorRequiredValidationRules(t *testing.T) { + // setup: create full struct with arbitrary values set + var event errorEvent + modeldecodertest.InitStructValues(&event) + // test vanilla struct is valid + require.NoError(t, event.validate()) + + // iterate through struct, remove every key one by one + // and test that validation behaves as expected + requiredKeys := map[string]interface{}{ + "context.request.method": nil, + "context.destination.service.resource": nil, + "id": nil, + "log.message": nil, + "parent_id": nil, //requiredIf + "trace_id": nil, //requiredIf + } + cb := assertRequiredFn(t, requiredKeys, event.validate) + modeldecodertest.SetZeroStructValue(&event, cb) +} + +func TestErrorRequiredOneOfValidationRules(t *testing.T) { + for _, tc := range []struct { + name string + setupFn func(event *errorEvent) + }{ + {name: "all", setupFn: func(e *errorEvent) { + e.Log = errorLog{} + e.Log.Message.Set("test message") + e.Exception = errorException{} + e.Exception.Message.Set("test message") + }}, + {name: "log", setupFn: func(e *errorEvent) { + e.Log = errorLog{} + e.Log.Message.Set("test message") + }}, + {name: "exception/message", setupFn: func(e *errorEvent) { + e.Exception = errorException{} + e.Exception.Message.Set("test message") + }}, + {name: "exception/type", setupFn: func(e *errorEvent) { + e.Exception = errorException{} + e.Exception.Type.Set("test type") + }}, + {name: "exception/cause", + setupFn: func(e *errorEvent) { + exception := errorException{} + exception.Type.Set("test type") + cause := errorException{} + cause.Type.Set("cause type") + exception.Cause = []errorException{cause} + e.Exception = exception + }, + }, + {name: "*/stacktrace/classname", setupFn: func(e *errorEvent) { + e.Log = errorLog{} + e.Log.Message.Set("test message") + frame := stacktraceFrame{} + frame.Classname.Set("myClass") + e.Log.Stacktrace = []stacktraceFrame{frame} + }}, + {name: "*/stacktrace/filename", setupFn: func(e *errorEvent) { + e.Exception = errorException{} + e.Exception.Message.Set("test message") + frame := stacktraceFrame{} + frame.Filename.Set("myFile") + e.Exception.Stacktrace = []stacktraceFrame{frame} + }}, + } { + t.Run("valid/"+tc.name, func(t *testing.T) { + var event errorEvent + event.ID.Set("123") + tc.setupFn(&event) + require.NoError(t, event.validate()) + }) + } + + for _, tc := range []struct { + name string + err string + setupFn func(event *errorEvent) + }{ + {name: "error", + err: "requires at least one of the fields 'exception;log'", + setupFn: func(e *errorEvent) {}}, + {name: "exception", + err: "exception: requires at least one of the fields 'message;type'", + setupFn: func(e *errorEvent) { + exception := errorException{} + exception.Handled.Set(true) + e.Exception = exception + }, + }, + {name: "exception/cause", + err: "exception: cause: requires at least one of the fields 'message;type'", + setupFn: func(e *errorEvent) { + exception := errorException{} + exception.Type.Set("test type") + cause := errorException{} + cause.Code.Set("400") + exception.Cause = []errorException{cause} + e.Exception = exception + }, + }, + {name: "*/stacktrace", + err: "log: stacktrace: requires at least one of the fields 'classname;filename'", + setupFn: func(e *errorEvent) { + frame := stacktraceFrame{} + frame.LibraryFrame.Set(false) + log := errorLog{} + log.Message.Set("true") + log.Stacktrace = []stacktraceFrame{frame} + e.Log = log + }, + }, + } { + t.Run("invalid/"+tc.name, func(t *testing.T) { + var event errorEvent + event.ID.Set("123") + tc.setupFn(&event) + err := event.validate() + require.Error(t, err) + assert.Contains(t, err.Error(), tc.err) + }) + } +} + +func TestErrorRequiredIfAnyValidationRules(t *testing.T) { + validErrorEvent := func() errorEvent { + var event errorEvent + event.ID.Set("123") + event.Exception = errorException{} + event.Exception.Message.Set("test message") + return event + } + for _, tc := range []struct { + name string + setupFn func(event *errorEvent) + }{ + {name: "traceID-nil", setupFn: func(*errorEvent) {}}, + {name: "traceID-parentID-transactionID", setupFn: func(e *errorEvent) { + e.TraceID.Set("abcd") + e.ParentID.Set("xxx") + e.TransactionID.Set("xxx") + }}, + {name: "traceID-parentID", setupFn: func(e *errorEvent) { + e.TraceID.Set("abcd") + e.ParentID.Set("xxx") + }}, + } { + t.Run("valid/"+tc.name, func(t *testing.T) { + event := validErrorEvent() + tc.setupFn(&event) + require.NoError(t, event.validate()) + }) + } + + for _, tc := range []struct { + name string + err string + setupFn func(event *errorEvent) + }{ + {name: "traceID", err: "'parent_id' required", + setupFn: func(e *errorEvent) { e.TraceID.Set("xxx") }}, + {name: "parentID", err: "'trace_id' required", + setupFn: func(e *errorEvent) { e.ParentID.Set("xxx") }}, + {name: "transactionID", err: "'parent_id' required", + setupFn: func(e *errorEvent) { e.TransactionID.Set("xxx") }}, + {name: "transactionID-parentID", err: "'trace_id' required", + setupFn: func(e *errorEvent) { + e.TransactionID.Set("xxx") + e.ParentID.Set("xxx") + }}, + {name: "transactionID-traceID", err: "'parent_id' required", + setupFn: func(e *errorEvent) { + e.TransactionID.Set("xxx") + e.TraceID.Set("xxx") + }}, + } { + t.Run("invalid/"+tc.name, func(t *testing.T) { + event := validErrorEvent() + tc.setupFn(&event) + err := event.validate() + require.Error(t, err) + require.Contains(t, err.Error(), tc.err) + }) + } +} + +func TestMetadataRequiredValidationRules(t *testing.T) { + // setup: create full event struct with arbitrary values set + var event metadata + modeldecodertest.InitStructValues(&event) + // test vanilla struct is valid + require.NoError(t, event.validate()) + + // iterate through struct, remove every key one by one + // and test that validation behaves as expected + requiredKeys := map[string]interface{}{ + "cloud.provider": nil, + "process.pid": nil, + "service": nil, + "service.agent": nil, + "service.agent.name": nil, + "service.agent.version": nil, + "service.language.name": nil, + "service.runtime.name": nil, + "service.runtime.version": nil, + "service.name": nil, + } + cb := assertRequiredFn(t, requiredKeys, event.validate) + modeldecodertest.SetZeroStructValue(&event, cb) +} + +func TestMetricsetRequiredValidationRules(t *testing.T) { + // setup: create full struct with sample values set + var root metricsetRoot + s := `{"metricset":{"samples":{"a.b.":{"value":2048}},"timestamp":1496170421366000,"transaction":{"type":"request","name":"GET /"},"span":{"type":"db","subtype":"mysql"},"tags":{"a":"b"}}}` + modeldecodertest.DecodeData(t, strings.NewReader(s), "metricset", &root) + // test vanilla struct is valid + event := root.Metricset + require.NoError(t, event.validate()) + + // iterate through struct, remove every key one by one + // and test that validation behaves as expected + requiredKeys := map[string]interface{}{ + "samples": nil, + "samples.value": nil, + } + modeldecodertest.SetZeroStructValue(&event, func(key string) { + assertRequiredFn(t, requiredKeys, event.validate) + }) +} + +func TestSpanRequiredValidationRules(t *testing.T) { + // setup: create full struct with arbitrary values set + var event span + modeldecodertest.InitStructValues(&event) + event.Outcome.Set("failure") + // Composite.Count must be > 1 + event.Composite.Count.Set(2) + // test vanilla struct is valid + require.NoError(t, event.validate()) + + // iterate through struct, remove every key one by one + // and test that validation behaves as expected + requiredKeys := map[string]interface{}{ + "id": nil, + "context.destination.service.resource": nil, + "duration": nil, + "name": nil, + "parent_id": nil, + "trace_id": nil, + "type": nil, + "composite.count": nil, + "composite.sum": nil, + "composite.compression_strategy": nil, + } + cb := assertRequiredFn(t, requiredKeys, event.validate) + modeldecodertest.SetZeroStructValue(&event, cb) +} + +func TestTransactionRequiredValidationRules(t *testing.T) { + // setup: create full struct with arbitrary values set + var event transaction + modeldecodertest.InitStructValues(&event) + event.Outcome.Set("success") + // test vanilla struct is valid + require.NoError(t, event.validate()) + + // iterate through struct, remove every key one by one + // and test that validation behaves as expected + requiredKeys := map[string]interface{}{ + "duration": nil, + "id": nil, + "span_count": nil, + "span_count.started": nil, + "trace_id": nil, + "type": nil, + "context.request.method": nil, + "experience.longtask.count": nil, + "experience.longtask.sum": nil, + "experience.longtask.max": nil, + "session.id": nil, + } + cb := assertRequiredFn(t, requiredKeys, event.validate) + modeldecodertest.SetZeroStructValue(&event, cb) +} + +var regexpArrayAccessor = regexp.MustCompile(`\[.*\]\.`) - t.Run("required", func(t *testing.T) { - // setup: create full metadata struct with arbitrary values set - var metadata metadata - modeldecodertest.InitStructValues(&metadata) - // test vanilla struct is valid - require.NoError(t, metadata.validate()) - - // iterate through struct, remove every key one by one - // and test that validation behaves as expected - requiredKeys := map[string]interface{}{ - "cloud.provider": nil, - "process.pid": nil, - "service": nil, - "service.agent": nil, - "service.agent.name": nil, - "service.agent.version": nil, - "service.language.name": nil, - "service.runtime.name": nil, - "service.runtime.version": nil, - "service.name": nil, +func assertRequiredFn(t *testing.T, keys map[string]interface{}, validate func() error) func(key string) { + return func(key string) { + s := regexpArrayAccessor.ReplaceAllString(key, "") + err := validate() + if _, ok := keys[s]; ok { + require.Error(t, err, key) + for _, part := range strings.Split(s, ".") { + assert.Contains(t, err.Error(), part) + } + } else { + assert.NoError(t, err, key) } - modeldecodertest.SetZeroStructValue(&metadata, func(key string) { - err := metadata.validate() - if _, ok := requiredKeys[key]; ok { - require.Error(t, err, key) - assert.Contains(t, err.Error(), key) + } +} + +// +// Test Set() and Reset() +// + +func TestResetIsSet(t *testing.T) { + for name, root := range map[string]setter{ + "error": &errorRoot{}, + "metadata": &metadataRoot{}, + "metricset": &metricsetRoot{}, + "span": &spanRoot{}, + "transaction": &transactionRoot{}, + } { + t.Run(name, func(t *testing.T) { + r := testdataReader(t, testFileName(name)) + modeldecodertest.DecodeData(t, r, name, &root) + require.True(t, root.IsSet()) + // call Reset and ensure initial state, except for array capacity + root.Reset() + assert.False(t, root.IsSet()) + }) + } +} + +type setter interface { + IsSet() bool + Reset() +} + +type validator interface { + validate() error +} + +type testcase struct { + name string + errorKey string + data string +} + +func testdataReader(t *testing.T, typ string) io.Reader { + p := filepath.Join("..", "..", "..", "testdata", "intake-v2", fmt.Sprintf("%s.ndjson", typ)) + r, err := os.Open(p) + require.NoError(t, err) + return r +} + +func testFileName(eventType string) string { + if eventType == "metadata" { + return eventType + } + return eventType + "s" +} + +func testValidation(t *testing.T, eventType string, testcases []testcase, keys ...string) { + for _, tc := range testcases { + t.Run(tc.name+"/"+eventType, func(t *testing.T) { + var event validator + switch eventType { + case "error": + event = &errorEvent{} + case "metadata": + event = &metadata{} + case "metricset": + event = &metricset{} + case "span": + event = &span{} + case "transaction": + event = &transaction{} + } + r := testdataReader(t, testFileName(eventType)) + modeldecodertest.DecodeDataWithReplacement(t, r, eventType, tc.data, event, keys...) + + // run validation and checks + err := event.validate() + if tc.errorKey == "" { + assert.NoError(t, err) } else { - assert.NoError(t, err, key) + require.Error(t, err) + assert.Contains(t, err.Error(), tc.errorKey) } }) - }) + } } diff --git a/model/modeldecoder/v2/span_test.go b/model/modeldecoder/v2/span_test.go new file mode 100644 index 00000000000..9861da88731 --- /dev/null +++ b/model/modeldecoder/v2/span_test.go @@ -0,0 +1,246 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "net/http" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/libbeat/common" + + "github.com/elastic/apm-server/decoder" + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modeldecoder" + "github.com/elastic/apm-server/model/modeldecoder/modeldecodertest" +) + +func TestResetSpanOnRelease(t *testing.T) { + inp := `{"span":{"name":"tr-a"}}` + root := fetchSpanRoot() + require.NoError(t, decoder.NewJSONDecoder(strings.NewReader(inp)).Decode(root)) + require.True(t, root.IsSet()) + releaseSpanRoot(root) + assert.False(t, root.IsSet()) +} + +func TestDecodeNestedSpan(t *testing.T) { + t.Run("decode", func(t *testing.T) { + defaultVal := modeldecodertest.DefaultValues() + _, eventBase := initializedInputMetadata(defaultVal) + input := modeldecoder.Input{Base: eventBase} + str := `{"span":{"duration":100,"id":"a-b-c","name":"s","parent_id":"parent-123","trace_id":"trace-ab","type":"db","start":143}}` + dec := decoder.NewJSONDecoder(strings.NewReader(str)) + var batch model.Batch + require.NoError(t, DecodeNestedSpan(dec, &input, &batch)) + require.Len(t, batch, 1) + require.NotNil(t, batch[0].Span) + defaultVal.Update(time.Time{}.Add(143 * time.Millisecond)) + modeldecodertest.AssertStructValues(t, &batch[0], isMetadataException, defaultVal) + + err := DecodeNestedSpan(decoder.NewJSONDecoder(strings.NewReader(`malformed`)), &input, &batch) + require.Error(t, err) + assert.Contains(t, err.Error(), "decode") + }) + + t.Run("validate", func(t *testing.T) { + var batch model.Batch + err := DecodeNestedSpan(decoder.NewJSONDecoder(strings.NewReader(`{}`)), &modeldecoder.Input{}, &batch) + require.Error(t, err) + assert.Contains(t, err.Error(), "validation") + }) +} + +func TestDecodeMapToSpanModel(t *testing.T) { + t.Run("set-metadata", func(t *testing.T) { + exceptions := func(key string) bool { return false } + var input span + defaultVal := modeldecodertest.DefaultValues() + modeldecodertest.SetStructValues(&input, defaultVal) + _, out := initializedInputMetadata(defaultVal) + mapToSpanModel(&input, &out) + modeldecodertest.AssertStructValues(t, &out.Service, exceptions, defaultVal) + }) + + t.Run("span-values", func(t *testing.T) { + exceptions := func(key string) bool { + switch key { + case + // RepresentativeCount is tested further down in test 'sample-rate' + "RepresentativeCount", + + // Not set for spans: + "DestinationService.ResponseTime", + "DestinationService.ResponseTime.Count", + "DestinationService.ResponseTime.Sum", + "SelfTime", + "SelfTime.Count", + "SelfTime.Sum": + return true + } + for _, s := range []string{ + // stacktrace values are set when sourcemapping is applied + "Stacktrace.Original", + "Stacktrace.Sourcemap", + "Stacktrace.ExcludeFromGrouping"} { + if strings.HasPrefix(key, s) { + return true + } + } + return false + } + + var input span + var out1, out2 model.APMEvent + defaultVal := modeldecodertest.DefaultValues() + modeldecodertest.SetStructValues(&input, defaultVal) + mapToSpanModel(&input, &out1) + input.Reset() + modeldecodertest.AssertStructValues(t, out1.Span, exceptions, defaultVal) + + // reuse input model for different event + // ensure memory is not shared by reusing input model + otherVal := modeldecodertest.NonDefaultValues() + modeldecodertest.SetStructValues(&input, otherVal) + mapToSpanModel(&input, &out2) + input.Reset() + modeldecodertest.AssertStructValues(t, out2.Span, exceptions, otherVal) + modeldecodertest.AssertStructValues(t, out1.Span, exceptions, defaultVal) + }) + + t.Run("outcome", func(t *testing.T) { + var input span + var out model.APMEvent + modeldecodertest.SetStructValues(&input, modeldecodertest.DefaultValues()) + // set from input, ignore status code + input.Outcome.Set("failure") + input.Context.HTTP.StatusCode.Set(http.StatusPermanentRedirect) + mapToSpanModel(&input, &out) + assert.Equal(t, "failure", out.Event.Outcome) + // derive from other fields - success + input.Outcome.Reset() + input.Context.HTTP.StatusCode.Set(http.StatusPermanentRedirect) + mapToSpanModel(&input, &out) + assert.Equal(t, "success", out.Event.Outcome) + // derive from other fields - failure + input.Outcome.Reset() + input.Context.HTTP.StatusCode.Set(http.StatusBadRequest) + mapToSpanModel(&input, &out) + assert.Equal(t, "failure", out.Event.Outcome) + // derive from other fields - unknown + input.Outcome.Reset() + input.Context.HTTP.StatusCode.Reset() + mapToSpanModel(&input, &out) + assert.Equal(t, "unknown", out.Event.Outcome) + }) + + t.Run("timestamp", func(t *testing.T) { + var input span + var out model.APMEvent + reqTime := time.Now().Add(time.Hour) + // add start to requestTime if eventTime is zero and start is given + defaultVal := modeldecodertest.DefaultValues() + defaultVal.Update(20.5, time.Time{}) + modeldecodertest.SetStructValues(&input, defaultVal) + out.Timestamp = reqTime + mapToSpanModel(&input, &out) + timestamp := reqTime.Add(time.Duration((20.5) * float64(time.Millisecond))) + assert.Equal(t, timestamp, out.Timestamp) + // leave event timestamp unmodified if eventTime is zero and start is not set + out = model.APMEvent{Timestamp: reqTime} + modeldecodertest.SetStructValues(&input, defaultVal) + input.Start.Reset() + mapToSpanModel(&input, &out) + require.Nil(t, out.Span.Start) + assert.Equal(t, reqTime, out.Timestamp) + }) + + t.Run("sample-rate", func(t *testing.T) { + var input span + var out model.APMEvent + modeldecodertest.SetStructValues(&input, modeldecodertest.DefaultValues()) + // sample rate is set to > 0 + input.SampleRate.Set(0.25) + mapToSpanModel(&input, &out) + assert.Equal(t, 4.0, out.Span.RepresentativeCount) + // sample rate is not set + out.Span.RepresentativeCount = 0.0 + input.SampleRate.Reset() + mapToSpanModel(&input, &out) + assert.Equal(t, 0.0, out.Span.RepresentativeCount) + // sample rate is set to 0 + input.SampleRate.Set(0) + mapToSpanModel(&input, &out) + assert.Equal(t, 0.0, out.Span.RepresentativeCount) + }) + + t.Run("type-subtype-action", func(t *testing.T) { + for _, tc := range []struct { + name string + inputType, inputSubtype, inputAction string + typ, subtype, action string + }{ + {name: "only-type", inputType: "xyz", + typ: "xyz"}, + {name: "derive-subtype", inputType: "x.y", + typ: "x", subtype: "y"}, + {name: "derive-subtype-action", inputType: "x.y.z.a", + typ: "x", subtype: "y", action: "z.a"}, + {name: "type-subtype", inputType: "x.y.z", inputSubtype: "a", + typ: "x.y.z", subtype: "a"}, + {name: "type-action", inputType: "x.y.z", inputAction: "b", + typ: "x.y.z", action: "b"}, + {name: "type-subtype-action", inputType: "x.y", inputSubtype: "a", inputAction: "b", + typ: "x.y", subtype: "a", action: "b"}, + } { + t.Run(tc.name, func(t *testing.T) { + var input span + defaultVal := modeldecodertest.DefaultValues() + modeldecodertest.SetStructValues(&input, defaultVal) + input.Type.Set(tc.inputType) + if tc.inputSubtype != "" { + input.Subtype.Set(tc.inputSubtype) + } else { + input.Subtype.Reset() + } + if tc.inputAction != "" { + input.Action.Set(tc.inputAction) + } else { + input.Action.Reset() + } + var out model.APMEvent + mapToSpanModel(&input, &out) + assert.Equal(t, tc.typ, out.Span.Type) + assert.Equal(t, tc.subtype, out.Span.Subtype) + assert.Equal(t, tc.action, out.Span.Action) + }) + } + }) + + t.Run("http-headers", func(t *testing.T) { + var input span + input.Context.HTTP.Response.Headers.Set(http.Header{"a": []string{"b", "c"}}) + var out model.APMEvent + mapToSpanModel(&input, &out) + assert.Equal(t, common.MapStr{"a": []string{"b", "c"}}, out.HTTP.Response.Headers) + }) +} diff --git a/model/modeldecoder/v2/transaction_test.go b/model/modeldecoder/v2/transaction_test.go new file mode 100644 index 00000000000..44aaa993b8f --- /dev/null +++ b/model/modeldecoder/v2/transaction_test.go @@ -0,0 +1,302 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "encoding/json" + "net" + "net/http" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/decoder" + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modeldecoder" + "github.com/elastic/apm-server/model/modeldecoder/modeldecodertest" + "github.com/elastic/beats/v7/libbeat/common" +) + +func TestResetTransactionOnRelease(t *testing.T) { + inp := `{"transaction":{"name":"tr-a"}}` + root := fetchTransactionRoot() + require.NoError(t, decoder.NewJSONDecoder(strings.NewReader(inp)).Decode(root)) + require.True(t, root.IsSet()) + releaseTransactionRoot(root) + assert.False(t, root.IsSet()) +} + +func TestDecodeNestedTransaction(t *testing.T) { + t.Run("decode", func(t *testing.T) { + now := time.Now() + input := modeldecoder.Input{} + str := `{"transaction":{"duration":100,"timestamp":1599996822281000,"id":"100","trace_id":"1","type":"request","span_count":{"started":2}}}` + dec := decoder.NewJSONDecoder(strings.NewReader(str)) + + var batch model.Batch + require.NoError(t, DecodeNestedTransaction(dec, &input, &batch)) + require.Len(t, batch, 1) + require.NotNil(t, batch[0].Transaction) + assert.Equal(t, "request", batch[0].Transaction.Type) + assert.Equal(t, "2020-09-13 11:33:42.281 +0000 UTC", batch[0].Timestamp.String()) + + input = modeldecoder.Input{Base: model.APMEvent{Timestamp: now}} + str = `{"transaction":{"duration":100,"id":"100","trace_id":"1","type":"request","span_count":{"started":2}}}` + dec = decoder.NewJSONDecoder(strings.NewReader(str)) + batch = model.Batch{} + require.NoError(t, DecodeNestedTransaction(dec, &input, &batch)) + // if no timestamp is provided, fall back to base event timestamp + assert.Equal(t, now, batch[0].Timestamp) + + err := DecodeNestedTransaction(decoder.NewJSONDecoder(strings.NewReader(`malformed`)), &input, &batch) + require.Error(t, err) + assert.Contains(t, err.Error(), "decode") + }) + + t.Run("validate", func(t *testing.T) { + var batch model.Batch + err := DecodeNestedTransaction(decoder.NewJSONDecoder(strings.NewReader(`{}`)), &modeldecoder.Input{}, &batch) + require.Error(t, err) + assert.Contains(t, err.Error(), "validation") + }) +} + +func TestDecodeMapToTransactionModel(t *testing.T) { + gatewayIP := net.ParseIP("192.168.0.1") + randomIP := net.ParseIP("71.0.54.1") + + t.Run("metadata-overwrite", func(t *testing.T) { + // overwrite defined metadata with event metadata values + var input transaction + _, out := initializedInputMetadata(modeldecodertest.DefaultValues()) + otherVal := modeldecodertest.NonDefaultValues() + modeldecodertest.SetStructValues(&input, otherVal) + mapToTransactionModel(&input, &out) + input.Reset() + + // ensure event Metadata are updated where expected + otherVal = modeldecodertest.NonDefaultValues() + userAgent := strings.Join(otherVal.HTTPHeader.Values("User-Agent"), ", ") + assert.Equal(t, userAgent, out.UserAgent.Original) + // do not overwrite client.ip if already set in metadata + ip := modeldecodertest.DefaultValues().IP + assert.Equal(t, ip, out.Client.IP, out.Client.IP.String()) + assert.Equal(t, common.MapStr{ + "init0": "init", "init1": "init", "init2": "init", + "overwritten0": "overwritten", "overwritten1": "overwritten", + }, out.Labels) + //assert.Equal(t, tLabels, out.Transaction.Labels) + exceptions := func(key string) bool { return false } + modeldecodertest.AssertStructValues(t, &out.Service, exceptions, otherVal) + modeldecodertest.AssertStructValues(t, &out.User, exceptions, otherVal) + }) + + t.Run("client-ip-header", func(t *testing.T) { + var input transaction + var out model.APMEvent + input.Context.Request.Headers.Set(http.Header{}) + input.Context.Request.Socket.RemoteAddress.Set(randomIP.String()) + // from headers (case insensitive) + input.Context.Request.Headers.Val.Add("x-Real-ip", gatewayIP.String()) + mapToTransactionModel(&input, &out) + assert.Equal(t, gatewayIP.String(), out.Client.IP.String()) + // ignore if set in event already + out = model.APMEvent{ + Client: model.Client{IP: net.ParseIP("192.17.1.1")}, + } + mapToTransactionModel(&input, &out) + assert.Equal(t, "192.17.1.1", out.Client.IP.String()) + }) + + t.Run("client-ip-socket", func(t *testing.T) { + var input transaction + var out model.APMEvent + // set invalid headers + input.Context.Request.Headers.Set(http.Header{}) + input.Context.Request.Headers.Val.Add("x-Real-ip", "192.13.14:8097") + input.Context.Request.Socket.RemoteAddress.Set(randomIP.String()) + mapToTransactionModel(&input, &out) + // ensure client ip is populated from socket + assert.Equal(t, randomIP.String(), out.Client.IP.String()) + }) + + t.Run("overwrite-user", func(t *testing.T) { + // user should be populated by metadata or event specific, but not merged + var input transaction + _, out := initializedInputMetadata(modeldecodertest.DefaultValues()) + input.Context.User.Email.Set("test@user.com") + mapToTransactionModel(&input, &out) + assert.Equal(t, "test@user.com", out.User.Email) + assert.Zero(t, out.User.ID) + assert.Zero(t, out.User.Name) + }) + + t.Run("transaction-values", func(t *testing.T) { + exceptions := func(key string) bool { + // All the below exceptions are tested separately + switch key { + case + // Tested separately + "RepresentativeCount", + + // Not set for transaction events, tested in metricset decoding: + "AggregatedDuration", + "AggregatedDuration.Count", + "AggregatedDuration.Sum", + "BreakdownCount", + "DurationHistogram", + "DurationHistogram.Counts", + "DurationHistogram.Values", + "Root": + return true + } + return false + } + + var input transaction + var out1, out2 model.APMEvent + reqTime := time.Now().Add(time.Second) + out1.Timestamp = reqTime + defaultVal := modeldecodertest.DefaultValues() + modeldecodertest.SetStructValues(&input, defaultVal) + mapToTransactionModel(&input, &out1) + input.Reset() + modeldecodertest.AssertStructValues(t, out1.Transaction, exceptions, defaultVal) + + // leave event timestamp unmodified if eventTime is zero + out1.Timestamp = reqTime + defaultVal.Update(time.Time{}) + modeldecodertest.SetStructValues(&input, defaultVal) + mapToTransactionModel(&input, &out1) + defaultVal.Update(reqTime) + input.Reset() + modeldecodertest.AssertStructValues(t, out1.Transaction, exceptions, defaultVal) + + // ensure memory is not shared by reusing input model + out2.Timestamp = reqTime + otherVal := modeldecodertest.NonDefaultValues() + modeldecodertest.SetStructValues(&input, otherVal) + mapToTransactionModel(&input, &out2) + modeldecodertest.AssertStructValues(t, out2.Transaction, exceptions, otherVal) + modeldecodertest.AssertStructValues(t, out1.Transaction, exceptions, defaultVal) + }) + + t.Run("http-headers", func(t *testing.T) { + var input transaction + input.Context.Request.Headers.Set(http.Header{"a": []string{"b"}, "c": []string{"d", "e"}}) + input.Context.Response.Headers.Set(http.Header{"f": []string{"g"}}) + var out model.APMEvent + mapToTransactionModel(&input, &out) + assert.Equal(t, common.MapStr{"a": []string{"b"}, "c": []string{"d", "e"}}, out.HTTP.Request.Headers) + assert.Equal(t, common.MapStr{"f": []string{"g"}}, out.HTTP.Response.Headers) + }) + + t.Run("http-request-body", func(t *testing.T) { + var input transaction + input.Context.Request.Body.Set(map[string]interface{}{ + "a": json.Number("123.456"), + "b": nil, + "c": "d", + }) + var out model.APMEvent + mapToTransactionModel(&input, &out) + assert.Equal(t, map[string]interface{}{"a": common.Float(123.456), "c": "d"}, out.HTTP.Request.Body) + }) + + t.Run("page.URL", func(t *testing.T) { + var input transaction + var out model.APMEvent + input.Context.Page.URL.Set("https://my.site.test:9201") + mapToTransactionModel(&input, &out) + assert.Equal(t, "https://my.site.test:9201", out.URL.Full) + }) + + t.Run("page.referer", func(t *testing.T) { + var input transaction + var out model.APMEvent + input.Context.Page.Referer.Set("https://my.site.test:9201") + mapToTransactionModel(&input, &out) + assert.Equal(t, "https://my.site.test:9201", out.HTTP.Request.Referrer) + }) + + t.Run("sample-rate", func(t *testing.T) { + var input transaction + var out model.APMEvent + modeldecodertest.SetStructValues(&input, modeldecodertest.DefaultValues()) + // sample rate is set to > 0 + input.SampleRate.Set(0.25) + mapToTransactionModel(&input, &out) + assert.Equal(t, 4.0, out.Transaction.RepresentativeCount) + // sample rate is not set -> Representative Count should be 1 by default + out.Transaction.RepresentativeCount = 0.0 //reset to zero value + input.SampleRate.Reset() + mapToTransactionModel(&input, &out) + assert.Equal(t, 1.0, out.Transaction.RepresentativeCount) + // sample rate is set to 0 + out.Transaction.RepresentativeCount = 0.0 //reset to zero value + input.SampleRate.Set(0) + mapToTransactionModel(&input, &out) + assert.Equal(t, 0.0, out.Transaction.RepresentativeCount) + }) + + t.Run("outcome", func(t *testing.T) { + var input transaction + var out model.APMEvent + modeldecodertest.SetStructValues(&input, modeldecodertest.DefaultValues()) + // set from input, ignore status code + input.Outcome.Set("failure") + input.Context.Response.StatusCode.Set(http.StatusBadRequest) + mapToTransactionModel(&input, &out) + assert.Equal(t, "failure", out.Event.Outcome) + // derive from other fields - success + input.Outcome.Reset() + input.Context.Response.StatusCode.Set(http.StatusBadRequest) + mapToTransactionModel(&input, &out) + assert.Equal(t, "success", out.Event.Outcome) + // derive from other fields - failure + input.Outcome.Reset() + input.Context.Response.StatusCode.Set(http.StatusInternalServerError) + mapToTransactionModel(&input, &out) + assert.Equal(t, "failure", out.Event.Outcome) + // derive from other fields - unknown + input.Outcome.Reset() + input.Context.Response.StatusCode.Reset() + mapToTransactionModel(&input, &out) + assert.Equal(t, "unknown", out.Event.Outcome) + }) + + t.Run("session", func(t *testing.T) { + var input transaction + var out model.APMEvent + modeldecodertest.SetStructValues(&input, modeldecodertest.DefaultValues()) + input.Session.ID.Reset() + mapToTransactionModel(&input, &out) + assert.Equal(t, model.Session{}, out.Session) + + input.Session.ID.Set("session_id") + input.Session.Sequence.Set(123) + mapToTransactionModel(&input, &out) + assert.Equal(t, model.Session{ + ID: "session_id", + Sequence: 123, + }, out.Session) + }) +} diff --git a/model/modelprocessor/chained.go b/model/modelprocessor/chained.go new file mode 100644 index 00000000000..ef91c5702bf --- /dev/null +++ b/model/modelprocessor/chained.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modelprocessor + +import ( + "context" + + "github.com/elastic/apm-server/model" +) + +// Chained is a chained model.BatchProcessor, calling each of +// the processors in the slice in series. +type Chained []model.BatchProcessor + +// ProcessBatch calls each of the processors in c in series. +func (c Chained) ProcessBatch(ctx context.Context, batch *model.Batch) error { + for _, p := range c { + if err := p.ProcessBatch(ctx, batch); err != nil { + return err + } + } + return nil +} diff --git a/model/modelprocessor/culprit.go b/model/modelprocessor/culprit.go new file mode 100644 index 00000000000..96479cb3319 --- /dev/null +++ b/model/modelprocessor/culprit.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modelprocessor + +import ( + "context" + + "github.com/elastic/apm-server/model" +) + +// SetCulprit is a model.BatchProcessor that sets or updates the culprit for RUM +// errors, after source mapping and identifying library frames. +type SetCulprit struct{} + +// ProcessBatch sets or updates the culprit for RUM errors. +func (s SetCulprit) ProcessBatch(ctx context.Context, b *model.Batch) error { + for _, event := range *b { + if event.Error != nil { + s.processError(ctx, event.Error) + } + } + return nil +} + +func (s SetCulprit) processError(ctx context.Context, event *model.Error) { + var culpritFrame *model.StacktraceFrame + if event.Log != nil { + culpritFrame = s.findSourceMappedNonLibraryFrame(event.Log.Stacktrace) + } + if culpritFrame == nil && event.Exception != nil { + culpritFrame = s.findSourceMappedNonLibraryFrame(event.Exception.Stacktrace) + } + if culpritFrame == nil { + return + } + culprit := culpritFrame.Filename + if culprit == "" { + culprit = culpritFrame.Classname + } + if culpritFrame.Function != "" { + culprit += " in " + culpritFrame.Function + } + event.Culprit = culprit +} + +func (s SetCulprit) findSourceMappedNonLibraryFrame(frames []*model.StacktraceFrame) *model.StacktraceFrame { + for _, frame := range frames { + if frame.SourcemapUpdated && !frame.LibraryFrame { + return frame + } + } + return nil +} diff --git a/model/modelprocessor/culprit_test.go b/model/modelprocessor/culprit_test.go new file mode 100644 index 00000000000..a514a427b97 --- /dev/null +++ b/model/modelprocessor/culprit_test.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modelprocessor_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modelprocessor" +) + +func TestSetCulprit(t *testing.T) { + tests := []struct { + input model.Error + culprit string + }{{ + input: model.Error{}, + culprit: "", + }, { + input: model.Error{Culprit: "already_set"}, + culprit: "already_set", + }, { + input: model.Error{ + Culprit: "already_set", + Log: &model.Log{ + Stacktrace: model.Stacktrace{{SourcemapUpdated: false, Filename: "foo.go"}}, + }, + }, + culprit: "already_set", + }, { + input: model.Error{ + Culprit: "already_set", + Log: &model.Log{ + Stacktrace: model.Stacktrace{{SourcemapUpdated: true, LibraryFrame: true, Filename: "foo.go"}}, + }, + }, + culprit: "already_set", + }, { + input: model.Error{ + Culprit: "already_set", + Log: &model.Log{ + Stacktrace: model.Stacktrace{ + {SourcemapUpdated: true, LibraryFrame: true, Filename: "foo.go"}, + {SourcemapUpdated: true, LibraryFrame: false, Filename: "foo2.go"}, + }, + }, + }, + culprit: "foo2.go", + }, { + input: model.Error{ + Culprit: "already_set", + Log: &model.Log{ + Stacktrace: model.Stacktrace{{SourcemapUpdated: true, LibraryFrame: true, Filename: "foo.go"}}, + }, + Exception: &model.Exception{ + Stacktrace: model.Stacktrace{{SourcemapUpdated: true, LibraryFrame: false, Filename: "foo2.go"}}, + }, + }, + culprit: "foo2.go", + }, { + input: model.Error{ + Log: &model.Log{ + Stacktrace: model.Stacktrace{ + {SourcemapUpdated: true, Classname: "AbstractFactoryManagerBean", Function: "toString"}, + }, + }, + }, + culprit: "AbstractFactoryManagerBean in toString", + }} + + for _, test := range tests { + batch := model.Batch{{Error: &test.input}} + processor := modelprocessor.SetCulprit{} + err := processor.ProcessBatch(context.Background(), &batch) + assert.NoError(t, err) + assert.Equal(t, test.culprit, batch[0].Error.Culprit) + } + +} diff --git a/model/modelprocessor/datastream.go b/model/modelprocessor/datastream.go new file mode 100644 index 00000000000..7add272b2ab --- /dev/null +++ b/model/modelprocessor/datastream.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modelprocessor + +import ( + "context" + "fmt" + + "github.com/elastic/apm-server/datastreams" + "github.com/elastic/apm-server/model" +) + +// SetDataStream is a model.BatchProcessor that sets the data stream for events. +type SetDataStream struct { + Namespace string +} + +// ProcessBatch sets data stream fields for each event in b. +func (s *SetDataStream) ProcessBatch(ctx context.Context, b *model.Batch) error { + for i := range *b { + s.setDataStream(&(*b)[i]) + } + return nil +} + +func (s *SetDataStream) setDataStream(event *model.APMEvent) { + switch event.Processor { + case model.SpanProcessor, model.TransactionProcessor: + event.DataStream.Type = datastreams.TracesType + event.DataStream.Dataset = model.TracesDataset + case model.ErrorProcessor: + event.DataStream.Type = datastreams.LogsType + event.DataStream.Dataset = model.ErrorsDataset + case model.LogProcessor: + event.DataStream.Type = datastreams.LogsType + event.DataStream.Dataset = model.AppLogsDataset + case model.MetricsetProcessor: + event.DataStream.Type = datastreams.MetricsType + // Metrics that include well-defined transaction/span fields + // (i.e. breakdown metrics, transaction and span metrics) will + // be stored separately from application and runtime metrics. + event.DataStream.Dataset = model.InternalMetricsDataset + if event.Transaction == nil && event.Span == nil { + event.DataStream.Dataset = fmt.Sprintf( + "%s.%s", model.AppMetricsDataset, + datastreams.NormalizeServiceName(event.Service.Name), + ) + } + case model.ProfileProcessor: + event.DataStream.Type = datastreams.MetricsType + event.DataStream.Dataset = model.ProfilesDataset + } + event.DataStream.Namespace = s.Namespace +} diff --git a/model/modelprocessor/datastream_test.go b/model/modelprocessor/datastream_test.go new file mode 100644 index 00000000000..cc11f88b439 --- /dev/null +++ b/model/modelprocessor/datastream_test.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modelprocessor_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modelprocessor" +) + +func TestSetDataStream(t *testing.T) { + tests := []struct { + input model.APMEvent + output model.DataStream + }{{ + input: model.APMEvent{}, + output: model.DataStream{Namespace: "custom"}, + }, { + input: model.APMEvent{Processor: model.TransactionProcessor}, + output: model.DataStream{Type: "traces", Dataset: "apm", Namespace: "custom"}, + }, { + input: model.APMEvent{Processor: model.SpanProcessor}, + output: model.DataStream{Type: "traces", Dataset: "apm", Namespace: "custom"}, + }, { + input: model.APMEvent{Processor: model.ErrorProcessor}, + output: model.DataStream{Type: "logs", Dataset: "apm.error", Namespace: "custom"}, + }, { + input: model.APMEvent{Processor: model.LogProcessor}, + output: model.DataStream{Type: "logs", Dataset: "apm.app", Namespace: "custom"}, + }, { + input: model.APMEvent{ + Processor: model.MetricsetProcessor, + Service: model.Service{Name: "service-name"}, + Metricset: &model.Metricset{}, + Transaction: &model.Transaction{Name: "foo"}, + }, + output: model.DataStream{Type: "metrics", Dataset: "apm.internal", Namespace: "custom"}, + }, { + input: model.APMEvent{ + Processor: model.MetricsetProcessor, + Service: model.Service{Name: "service-name"}, + Metricset: &model.Metricset{}, + }, + output: model.DataStream{Type: "metrics", Dataset: "apm.app.service_name", Namespace: "custom"}, + }, { + input: model.APMEvent{ + Processor: model.ProfileProcessor, + ProfileSample: &model.ProfileSample{}, + }, + output: model.DataStream{Type: "metrics", Dataset: "apm.profiling", Namespace: "custom"}, + }} + + for _, test := range tests { + batch := model.Batch{test.input} + processor := modelprocessor.SetDataStream{Namespace: "custom"} + err := processor.ProcessBatch(context.Background(), &batch) + assert.NoError(t, err) + assert.Equal(t, test.output, batch[0].DataStream) + } + +} diff --git a/model/modelprocessor/environment.go b/model/modelprocessor/environment.go new file mode 100644 index 00000000000..cccb70a241d --- /dev/null +++ b/model/modelprocessor/environment.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modelprocessor + +import ( + "context" + + "github.com/elastic/apm-server/model" +) + +// SetDefaultServiceEnvironment is a transform.Processor that sets a default +// service.environment value for events without one already set. +type SetDefaultServiceEnvironment struct { + // DefaultServiceEnvironment is the default service.environment value + // to set for events without one already set. + DefaultServiceEnvironment string +} + +// ProcessBatch sets a default service.value for events without one already set. +func (s *SetDefaultServiceEnvironment) ProcessBatch(ctx context.Context, b *model.Batch) error { + for i := range *b { + event := &(*b)[i] + if event.Service.Environment == "" { + event.Service.Environment = s.DefaultServiceEnvironment + } + } + return nil +} diff --git a/model/modelprocessor/environment_test.go b/model/modelprocessor/environment_test.go new file mode 100644 index 00000000000..43e6cb2be8d --- /dev/null +++ b/model/modelprocessor/environment_test.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modelprocessor_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modelprocessor" +) + +func TestSetDefaultServiceEnvironment(t *testing.T) { + nonEmptyServiceEnvironment := model.APMEvent{Service: model.Service{Environment: "nonempty"}} + defaultServiceEnvironment := model.APMEvent{Service: model.Service{Environment: "default"}} + + processor := modelprocessor.SetDefaultServiceEnvironment{ + DefaultServiceEnvironment: "default", + } + testProcessBatch(t, &processor, nonEmptyServiceEnvironment, nonEmptyServiceEnvironment) + testProcessBatch(t, &processor, model.APMEvent{}, defaultServiceEnvironment) +} + +func testProcessBatch(t *testing.T, processor model.BatchProcessor, in, out model.APMEvent) { + t.Helper() + + batch := &model.Batch{in} + err := processor.ProcessBatch(context.Background(), batch) + require.NoError(t, err) + + expected := &model.Batch{out} + assert.Equal(t, expected, batch) +} diff --git a/model/modelprocessor/errormessage.go b/model/modelprocessor/errormessage.go new file mode 100644 index 00000000000..17ce4c1ffc4 --- /dev/null +++ b/model/modelprocessor/errormessage.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modelprocessor + +import ( + "context" + + "github.com/elastic/apm-server/model" +) + +// SetErrorMessage is a model.BatchProcessor that sets the APMEvent.Message +// field for error events. +type SetErrorMessage struct{} + +// ProcessBatch sets the message for errors. +func (s SetErrorMessage) ProcessBatch(ctx context.Context, b *model.Batch) error { + for i := range *b { + event := &(*b)[i] + if event.Error != nil { + event.Message = s.setErrorMessage(event) + } + } + return nil +} + +func (s SetErrorMessage) setErrorMessage(event *model.APMEvent) string { + if event.Error.Log != nil && event.Error.Log.Message != "" { + return event.Error.Log.Message + } + if event.Error.Exception != nil && event.Error.Exception.Message != "" { + return event.Error.Exception.Message + } + return "" +} diff --git a/model/modelprocessor/errormessage_test.go b/model/modelprocessor/errormessage_test.go new file mode 100644 index 00000000000..2d75988242f --- /dev/null +++ b/model/modelprocessor/errormessage_test.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modelprocessor_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modelprocessor" +) + +func TestSetErrorMessage(t *testing.T) { + tests := []struct { + input model.Error + message string + }{{ + input: model.Error{}, + message: "", + }, { + input: model.Error{Log: &model.Log{Message: "log_message"}}, + message: "log_message", + }, { + input: model.Error{Exception: &model.Exception{Message: "exception_message"}}, + message: "exception_message", + }, { + input: model.Error{ + Log: &model.Log{}, + Exception: &model.Exception{Message: "exception_message"}, + }, + message: "exception_message", + }, { + input: model.Error{ + Log: &model.Log{Message: "log_message"}, + Exception: &model.Exception{Message: "exception_message"}, + }, + message: "log_message", + }} + + for _, test := range tests { + batch := model.Batch{{Error: &test.input}} + processor := modelprocessor.SetErrorMessage{} + err := processor.ProcessBatch(context.Background(), &batch) + assert.NoError(t, err) + assert.Equal(t, test.message, batch[0].Message) + } + +} diff --git a/model/modelprocessor/eventcounter.go b/model/modelprocessor/eventcounter.go new file mode 100644 index 00000000000..5f02e915a54 --- /dev/null +++ b/model/modelprocessor/eventcounter.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modelprocessor + +import ( + "context" + "sync" + + "github.com/elastic/beats/v7/libbeat/monitoring" + + "github.com/elastic/apm-server/model" +) + +// EventCounter is a model.BatchProcessor that counts the number of events processed, +// recording the counts as metrics in a monitoring.Registry. +// +// Metrics are named after the event type: `processor..transformations`. +// These metrics are used to populate the "Processed Events" graphs in Stack Monitoring. +type EventCounter struct { + registry *monitoring.Registry + + mu sync.RWMutex + eventCounters map[string]*monitoring.Int +} + +// NewEventCounter returns an EventCounter that counts events processed, recording +// them as `.transformations` under the given registry. +func NewEventCounter(registry *monitoring.Registry) *EventCounter { + return &EventCounter{ + registry: registry, + eventCounters: make(map[string]*monitoring.Int), + } +} + +// ProcessBatch counts events in b, grouping by APMEvent.Processor.Event. +func (c *EventCounter) ProcessBatch(ctx context.Context, b *model.Batch) error { + for _, event := range *b { + pe := event.Processor.Event + if pe == "" { + continue + } + c.mu.RLock() + eventCounter := c.eventCounters[pe] + c.mu.RUnlock() + if eventCounter == nil { + c.mu.Lock() + eventCounter = c.eventCounters[pe] + if eventCounter == nil { + // Metric may exist in the registry but not in our map, + // so first check if it exists before attempting to create. + name := "processor." + pe + ".transformations" + var ok bool + eventCounter, ok = c.registry.Get(name).(*monitoring.Int) + if !ok { + eventCounter = monitoring.NewInt(c.registry, name) + } + c.eventCounters[pe] = eventCounter + } + c.mu.Unlock() + } + eventCounter.Inc() + } + return nil +} diff --git a/model/modelprocessor/eventcounter_test.go b/model/modelprocessor/eventcounter_test.go new file mode 100644 index 00000000000..92b073e79af --- /dev/null +++ b/model/modelprocessor/eventcounter_test.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modelprocessor_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/v7/libbeat/monitoring" + + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modelprocessor" +) + +func TestEventCounter(t *testing.T) { + batch := model.Batch{ + {}, + {Processor: model.TransactionProcessor}, + {Processor: model.SpanProcessor}, + {Processor: model.TransactionProcessor}, + } + + expected := monitoring.MakeFlatSnapshot() + expected.Ints["processor.span.transformations"] = 1 + expected.Ints["processor.transaction.transformations"] = 2 + + registry := monitoring.NewRegistry() + processor := modelprocessor.NewEventCounter(registry) + err := processor.ProcessBatch(context.Background(), &batch) + assert.NoError(t, err) + snapshot := monitoring.CollectFlatSnapshot(registry, monitoring.Full, false) + assert.Equal(t, expected, snapshot) + +} diff --git a/model/modelprocessor/excludefromgrouping.go b/model/modelprocessor/excludefromgrouping.go new file mode 100644 index 00000000000..e8e29ac8c0b --- /dev/null +++ b/model/modelprocessor/excludefromgrouping.go @@ -0,0 +1,72 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modelprocessor + +import ( + "context" + "regexp" + + "github.com/elastic/apm-server/model" +) + +// SetExcludeFromGrouping is a model.BatchProcessor that identifies stack frames +// to exclude from error grouping for RUM, using a configurable regular expression. +type SetExcludeFromGrouping struct { + Pattern *regexp.Regexp +} + +// ProcessBatch processes the stack traces of spans and errors in b, updating +// the exclude_from_grouping for stack frames based on whether they have a filename +// matching the regular expression. +func (s SetExcludeFromGrouping) ProcessBatch(ctx context.Context, b *model.Batch) error { + for _, event := range *b { + switch { + case event.Span != nil: + s.processSpan(ctx, event.Span) + case event.Error != nil: + s.processError(ctx, event.Error) + } + } + return nil +} + +func (s SetExcludeFromGrouping) processSpan(ctx context.Context, event *model.Span) { + s.processStacktraceFrames(ctx, event.Stacktrace...) +} + +func (s SetExcludeFromGrouping) processError(ctx context.Context, event *model.Error) { + if event.Log != nil { + s.processStacktraceFrames(ctx, event.Log.Stacktrace...) + } + if event.Exception != nil { + s.processException(ctx, event.Exception) + } +} + +func (s SetExcludeFromGrouping) processException(ctx context.Context, exception *model.Exception) { + s.processStacktraceFrames(ctx, exception.Stacktrace...) + for _, cause := range exception.Cause { + s.processException(ctx, &cause) + } +} + +func (s SetExcludeFromGrouping) processStacktraceFrames(ctx context.Context, frames ...*model.StacktraceFrame) { + for _, frame := range frames { + frame.ExcludeFromGrouping = frame.Filename != "" && s.Pattern.MatchString(frame.Filename) + } +} diff --git a/model/modelprocessor/excludefromgrouping_test.go b/model/modelprocessor/excludefromgrouping_test.go new file mode 100644 index 00000000000..2399301ee2c --- /dev/null +++ b/model/modelprocessor/excludefromgrouping_test.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modelprocessor_test + +import ( + "context" + "regexp" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modelprocessor" +) + +func TestSetExcludeFromGrouping(t *testing.T) { + processor := modelprocessor.SetExcludeFromGrouping{ + Pattern: regexp.MustCompile("foo"), + } + + tests := []struct { + input, output model.Batch + }{{ + input: model.Batch{{Error: &model.Error{}}, {Transaction: &model.Transaction{}}}, + output: model.Batch{{Error: &model.Error{}}, {Transaction: &model.Transaction{}}}, + }, { + input: model.Batch{{ + Span: &model.Span{ + Stacktrace: model.Stacktrace{ + {Filename: "foo.go"}, + {Filename: "bar.go"}, + {}, + }, + }, + }}, + output: model.Batch{{ + Span: &model.Span{ + Stacktrace: model.Stacktrace{ + {ExcludeFromGrouping: true, Filename: "foo.go"}, + {Filename: "bar.go"}, + {}, + }, + }, + }}, + }, { + input: model.Batch{{ + Error: &model.Error{ + Log: &model.Log{ + Stacktrace: model.Stacktrace{ + {Filename: "foo.go"}, + }, + }, + }, + }, { + Error: &model.Error{ + Exception: &model.Exception{ + Stacktrace: model.Stacktrace{ + {Filename: "foo.go"}, + }, + Cause: []model.Exception{{ + Stacktrace: model.Stacktrace{ + {Filename: "foo.go"}, + }, + }}, + }, + }, + }}, + output: model.Batch{{ + Error: &model.Error{ + Log: &model.Log{ + Stacktrace: model.Stacktrace{ + {ExcludeFromGrouping: true, Filename: "foo.go"}, + }, + }, + }, + }, { + Error: &model.Error{ + Exception: &model.Exception{ + Stacktrace: model.Stacktrace{ + {ExcludeFromGrouping: true, Filename: "foo.go"}, + }, + Cause: []model.Exception{{ + Stacktrace: model.Stacktrace{ + {ExcludeFromGrouping: true, Filename: "foo.go"}, + }, + }}, + }, + }, + }}, + }} + + for _, test := range tests { + err := processor.ProcessBatch(context.Background(), &test.input) + assert.NoError(t, err) + assert.Equal(t, test.output, test.input) + } + +} diff --git a/model/modelprocessor/groupingkey.go b/model/modelprocessor/groupingkey.go new file mode 100644 index 00000000000..dec0fd15bcd --- /dev/null +++ b/model/modelprocessor/groupingkey.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modelprocessor + +import ( + "context" + "crypto/md5" + "encoding/hex" + "hash" + "io" + + "github.com/elastic/apm-server/model" +) + +// SetGroupingKey is a model.BatchProcessor that sets the grouping key for errors +// by hashing their stack frames. +type SetGroupingKey struct{} + +// ProcessBatch sets the grouping key for errors. +func (s SetGroupingKey) ProcessBatch(ctx context.Context, b *model.Batch) error { + for _, event := range *b { + if event.Error != nil { + s.processError(ctx, event.Error) + } + } + return nil +} + +func (s SetGroupingKey) processError(ctx context.Context, event *model.Error) { + hash := md5.New() + var updated bool + if event.Exception != nil { + if s.hashExceptionTree(event.Exception, hash, s.hashExceptionType) { + updated = true + } + } + if event.Log != nil { + if s.maybeWriteString(event.Log.ParamMessage, hash) { + updated = true + } + } + var haveExceptionStacktrace bool + if event.Exception != nil { + haveExceptionStacktrace = s.hashExceptionTree(event.Exception, hash, s.hashExceptionStacktrace) + updated = updated || haveExceptionStacktrace + } + if !haveExceptionStacktrace && event.Log != nil { + if s.hashStacktrace(event.Log.Stacktrace, hash) { + updated = true + } + } + if !updated { + if event.Exception != nil { + updated = s.hashExceptionTree(event.Exception, hash, s.hashExceptionMessage) + } + if !updated && event.Log != nil { + s.maybeWriteString(event.Log.Message, hash) + } + } + event.GroupingKey = hex.EncodeToString(hash.Sum(nil)) +} + +func (s SetGroupingKey) hashExceptionTree(e *model.Exception, out hash.Hash, f func(*model.Exception, hash.Hash) bool) bool { + updated := f(e, out) + for _, cause := range e.Cause { + if s.hashExceptionTree(&cause, out, f) { + updated = true + } + } + return updated +} + +func (s SetGroupingKey) hashExceptionType(e *model.Exception, out hash.Hash) bool { + return s.maybeWriteString(e.Type, out) +} + +func (s SetGroupingKey) hashExceptionMessage(e *model.Exception, out hash.Hash) bool { + return s.maybeWriteString(e.Message, out) +} + +func (s SetGroupingKey) hashExceptionStacktrace(e *model.Exception, out hash.Hash) bool { + return s.hashStacktrace(e.Stacktrace, out) +} + +func (s SetGroupingKey) hashStacktrace(stacktrace model.Stacktrace, out hash.Hash) bool { + var updated bool + for _, frame := range stacktrace { + if frame.ExcludeFromGrouping { + continue + } + switch { + case frame.Module != "": + io.WriteString(out, frame.Module) + updated = true + case frame.Filename != "": + io.WriteString(out, frame.Filename) + updated = true + case frame.Classname != "": + io.WriteString(out, frame.Classname) + updated = true + } + if s.maybeWriteString(frame.Function, out) { + updated = true + } + } + return updated +} + +func (SetGroupingKey) maybeWriteString(s string, out hash.Hash) bool { + if s == "" { + return false + } + io.WriteString(out, s) + return true +} diff --git a/model/modelprocessor/groupingkey_test.go b/model/modelprocessor/groupingkey_test.go new file mode 100644 index 00000000000..d6339d0aad6 --- /dev/null +++ b/model/modelprocessor/groupingkey_test.go @@ -0,0 +1,132 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modelprocessor_test + +import ( + "context" + "crypto/md5" + "encoding/hex" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modelprocessor" +) + +func TestSetGroupingKey(t *testing.T) { + tests := map[string]struct { + input model.Error + groupingKey string + }{ + "empty": { + input: model.Error{}, + groupingKey: hashStrings( /*empty*/ ), + }, + "exception_type_log_parammessage": { + input: model.Error{ + Exception: &model.Exception{ + Type: "exception_type", + }, + Log: &model.Log{ + ParamMessage: "log_parammessage", + }, + }, + groupingKey: hashStrings("exception_type", "log_parammessage"), + }, + "exception_stacktrace": { + input: model.Error{ + Exception: &model.Exception{ + Stacktrace: model.Stacktrace{ + {Module: "module", Filename: "filename", Classname: "classname", Function: "func_1"}, + {Filename: "filename", Classname: "classname", Function: "func_2"}, + {ExcludeFromGrouping: true, Function: "func_3"}, + }, + Cause: []model.Exception{{ + Stacktrace: model.Stacktrace{ + {Classname: "classname", Function: "func_4"}, + }, + Cause: []model.Exception{{ + Stacktrace: model.Stacktrace{ + {Function: "func_5"}, + }, + }}, + }, { + Stacktrace: model.Stacktrace{ + {Function: "func_6"}, + }, + }}, + }, + Log: &model.Log{Stacktrace: model.Stacktrace{{Filename: "abc"}}}, // ignored + }, + groupingKey: hashStrings( + "module", "func_1", "filename", "func_2", "classname", "func_4", "func_5", "func_6", + ), + }, + "log_stacktrace": { + input: model.Error{ + Log: &model.Log{ + Stacktrace: model.Stacktrace{{Function: "function"}}, + }, + }, + groupingKey: hashStrings("function"), + }, + "exception_message": { + input: model.Error{ + Exception: &model.Exception{ + Message: "message_1", + Cause: []model.Exception{{ + Message: "message_2", + Cause: []model.Exception{ + {Message: "message_3"}, + }, + }, { + Message: "message_4", + }}, + }, + Log: &model.Log{Message: "log_message"}, // ignored + }, + groupingKey: hashStrings("message_1", "message_2", "message_3", "message_4"), + }, + "log_message": { + input: model.Error{ + Log: &model.Log{Message: "log_message"}, // ignored + }, + groupingKey: hashStrings("log_message"), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + batch := model.Batch{{Error: &test.input}} + processor := modelprocessor.SetGroupingKey{} + err := processor.ProcessBatch(context.Background(), &batch) + assert.NoError(t, err) + assert.Equal(t, test.groupingKey, batch[0].Error.GroupingKey) + }) + } + +} + +func hashStrings(s ...string) string { + md5 := md5.New() + for _, s := range s { + md5.Write([]byte(s)) + } + return hex.EncodeToString(md5.Sum(nil)) +} diff --git a/model/modelprocessor/hostname.go b/model/modelprocessor/hostname.go new file mode 100644 index 00000000000..244852d442b --- /dev/null +++ b/model/modelprocessor/hostname.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modelprocessor + +import ( + "context" + + "github.com/elastic/apm-server/model" +) + +// SetHostHostname is a transform.Processor that sets the final +// host.name and host.hostname values, according to whether the +// event originated from within Kubernetes or not. +type SetHostHostname struct{} + +// ProcessBatch sets or overrides the host.name and host.hostname fields for events. +func (SetHostHostname) ProcessBatch(ctx context.Context, b *model.Batch) error { + for i := range *b { + setHostHostname(&(*b)[i]) + } + return nil +} + +func setHostHostname(event *model.APMEvent) { + switch { + case event.Kubernetes.NodeName != "": + // host.kubernetes.node.name is set: set host.hostname to its value. + event.Host.Hostname = event.Kubernetes.NodeName + case event.Kubernetes.PodName != "" || event.Kubernetes.PodUID != "" || event.Kubernetes.Namespace != "": + // kubernetes.* is set, but kubernetes.node.name is not: don't set host.hostname at all. + event.Host.Hostname = "" + default: + // Otherwise use the originally specified host.hostname value. + } + if event.Host.Name == "" { + event.Host.Name = event.Host.Hostname + } +} diff --git a/model/modelprocessor/hostname_test.go b/model/modelprocessor/hostname_test.go new file mode 100644 index 00000000000..9232f107b0b --- /dev/null +++ b/model/modelprocessor/hostname_test.go @@ -0,0 +1,72 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modelprocessor_test + +import ( + "testing" + + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modelprocessor" +) + +func TestSetHostHostname(t *testing.T) { + withConfiguredHostname := model.APMEvent{ + Host: model.Host{ + Name: "configured_hostname", + Hostname: "detected_hostname", + }, + } + withDetectedHostname := model.APMEvent{ + Host: model.Host{ + Hostname: "detected_hostname", + }, + } + withKubernetesPodName := withDetectedHostname + withKubernetesPodName.Kubernetes.PodName = "kubernetes.pod.name" + withKubernetesNodeName := withKubernetesPodName + withKubernetesNodeName.Kubernetes.NodeName = "kubernetes.node.name" + + processor := modelprocessor.SetHostHostname{} + + testProcessBatch(t, processor, withConfiguredHostname, withConfiguredHostname) // unchanged + testProcessBatch(t, processor, withDetectedHostname, + eventWithHostName( + eventWithHostHostname(withDetectedHostname, "detected_hostname"), + "detected_hostname", + ), + ) + testProcessBatch(t, processor, withKubernetesPodName, + eventWithHostHostname(withKubernetesPodName, ""), + ) + testProcessBatch(t, processor, withKubernetesNodeName, + eventWithHostName( + eventWithHostHostname(withKubernetesNodeName, "kubernetes.node.name"), + "kubernetes.node.name", + ), + ) +} + +func eventWithHostHostname(in model.APMEvent, detectedHostname string) model.APMEvent { + in.Host.Hostname = detectedHostname + return in +} + +func eventWithHostName(in model.APMEvent, configuredHostname string) model.APMEvent { + in.Host.Name = configuredHostname + return in +} diff --git a/model/modelprocessor/libraryframe.go b/model/modelprocessor/libraryframe.go new file mode 100644 index 00000000000..fa51727ff0a --- /dev/null +++ b/model/modelprocessor/libraryframe.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modelprocessor + +import ( + "context" + "regexp" + + "github.com/elastic/apm-server/model" +) + +// SetLibraryFrame is a model.BatchProcessor that identifies stack frames +// from library code for RUM, using a configurable regular expression. +type SetLibraryFrame struct { + Pattern *regexp.Regexp +} + +// ProcessBatch processes the stack traces of spans and errors in b, updating +// the library frame flag for stack frames based on whether they have a filename +// matching the regular expression. +func (s SetLibraryFrame) ProcessBatch(ctx context.Context, b *model.Batch) error { + for _, event := range *b { + switch { + case event.Span != nil: + s.processSpan(ctx, event.Span) + case event.Error != nil: + s.processError(ctx, event.Error) + } + } + return nil +} + +func (s SetLibraryFrame) processSpan(ctx context.Context, event *model.Span) { + s.processStacktraceFrames(ctx, event.Stacktrace...) +} + +func (s SetLibraryFrame) processError(ctx context.Context, event *model.Error) { + if event.Log != nil { + s.processStacktraceFrames(ctx, event.Log.Stacktrace...) + } + if event.Exception != nil { + s.processException(ctx, event.Exception) + } +} + +func (s SetLibraryFrame) processException(ctx context.Context, exception *model.Exception) { + s.processStacktraceFrames(ctx, exception.Stacktrace...) + for _, cause := range exception.Cause { + s.processException(ctx, &cause) + } +} + +func (s SetLibraryFrame) processStacktraceFrames(ctx context.Context, frames ...*model.StacktraceFrame) { + for _, frame := range frames { + frame.Original.LibraryFrame = frame.LibraryFrame + frame.LibraryFrame = frame.Filename != "" && s.Pattern.MatchString(frame.Filename) || + frame.AbsPath != "" && s.Pattern.MatchString(frame.AbsPath) + } +} diff --git a/model/modelprocessor/libraryframe_test.go b/model/modelprocessor/libraryframe_test.go new file mode 100644 index 00000000000..dcf3d8f6098 --- /dev/null +++ b/model/modelprocessor/libraryframe_test.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modelprocessor_test + +import ( + "context" + "regexp" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modelprocessor" +) + +func TestSetLibraryFrames(t *testing.T) { + processor := modelprocessor.SetLibraryFrame{ + Pattern: regexp.MustCompile("foo"), + } + + tests := []struct { + input, output model.Batch + }{{ + input: model.Batch{{Error: &model.Error{}}, {Transaction: &model.Transaction{}}}, + output: model.Batch{{Error: &model.Error{}}, {Transaction: &model.Transaction{}}}, + }, { + input: model.Batch{{ + Span: &model.Span{ + Stacktrace: model.Stacktrace{ + {LibraryFrame: true, Filename: "foo.go"}, + {LibraryFrame: false, AbsPath: "foobar.go"}, + {LibraryFrame: true, Filename: "bar.go"}, + {LibraryFrame: true}, + }, + }, + }}, + output: model.Batch{{ + Span: &model.Span{ + Stacktrace: model.Stacktrace{ + {LibraryFrame: true, Filename: "foo.go", Original: model.Original{LibraryFrame: true}}, + {LibraryFrame: true, AbsPath: "foobar.go", Original: model.Original{LibraryFrame: false}}, + {LibraryFrame: false, Filename: "bar.go", Original: model.Original{LibraryFrame: true}}, + {LibraryFrame: false, Original: model.Original{LibraryFrame: true}}, + }, + }, + }}, + }, { + input: model.Batch{{ + Error: &model.Error{ + Log: &model.Log{ + Stacktrace: model.Stacktrace{ + {LibraryFrame: true, Filename: "foo.go"}, + {LibraryFrame: false, AbsPath: "foobar.go"}, + {LibraryFrame: true, Filename: "bar.go"}, + {LibraryFrame: true}, + }, + }, + }, + }, { + Error: &model.Error{ + Exception: &model.Exception{ + Stacktrace: model.Stacktrace{ + {LibraryFrame: true, Filename: "foo.go"}, + }, + Cause: []model.Exception{{ + Stacktrace: model.Stacktrace{ + {LibraryFrame: true, Filename: "foo.go"}, + }, + }}, + }, + }, + }}, + output: model.Batch{{ + Error: &model.Error{ + Log: &model.Log{ + Stacktrace: model.Stacktrace{ + {LibraryFrame: true, Filename: "foo.go", Original: model.Original{LibraryFrame: true}}, + {LibraryFrame: true, AbsPath: "foobar.go", Original: model.Original{LibraryFrame: false}}, + {LibraryFrame: false, Filename: "bar.go", Original: model.Original{LibraryFrame: true}}, + {LibraryFrame: false, Original: model.Original{LibraryFrame: true}}, + }, + }, + }, + }, { + Error: &model.Error{ + Exception: &model.Exception{ + Stacktrace: model.Stacktrace{ + {LibraryFrame: true, Filename: "foo.go", Original: model.Original{LibraryFrame: true}}, + }, + Cause: []model.Exception{{ + Stacktrace: model.Stacktrace{ + {LibraryFrame: true, Filename: "foo.go", Original: model.Original{LibraryFrame: true}}, + }, + }}, + }, + }, + }}, + }} + + for _, test := range tests { + err := processor.ProcessBatch(context.Background(), &test.input) + assert.NoError(t, err) + assert.Equal(t, test.output, test.input) + } + +} diff --git a/model/modelprocessor/metricsetname.go b/model/modelprocessor/metricsetname.go new file mode 100644 index 00000000000..0e09eb3006e --- /dev/null +++ b/model/modelprocessor/metricsetname.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modelprocessor + +import ( + "context" + + "github.com/elastic/apm-server/model" +) + +const ( + spanBreakdownMetricsetName = "span_breakdown" + transactionBreakdownMetricsetName = "transaction_breakdown" + appMetricsetName = "app" +) + +// SetMetricsetName is a transform.Processor that sets a name for +// metricsets containing well-known agent metrics, such as breakdown +// metrics. +type SetMetricsetName struct{} + +// ProcessBatch sets the name for metricsets. Well-defined metrics (breakdowns) +// will be given a specific name, while all other metrics will be given the name +// "app". +func (SetMetricsetName) ProcessBatch(ctx context.Context, b *model.Batch) error { + for _, event := range *b { + ms := event.Metricset + if ms == nil || ms.Name != "" { + continue + } + ms.Name = appMetricsetName + if event.Transaction == nil { + // Not a breakdown metricset. + continue + } + ms.Name = transactionBreakdownMetricsetName + if event.Span != nil && event.Span.SelfTime.Count > 0 { + ms.Name = spanBreakdownMetricsetName + } + } + return nil +} diff --git a/model/modelprocessor/metricsetname_test.go b/model/modelprocessor/metricsetname_test.go new file mode 100644 index 00000000000..3b213504d99 --- /dev/null +++ b/model/modelprocessor/metricsetname_test.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modelprocessor_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modelprocessor" +) + +func TestSetMetricsetName(t *testing.T) { + tests := []struct { + event model.APMEvent + name string + }{{ + event: model.APMEvent{Metricset: &model.Metricset{}}, + name: "app", + }, { + event: model.APMEvent{ + Metricset: &model.Metricset{Name: "already_set"}, + }, + name: "already_set", + }, { + event: model.APMEvent{ + Metricset: &model.Metricset{ + Samples: map[string]model.MetricsetSample{}, + }, + Transaction: &model.Transaction{ + Type: "request", + BreakdownCount: 1, + }, + }, + name: "transaction_breakdown", + }, { + event: model.APMEvent{ + Metricset: &model.Metricset{}, + Transaction: &model.Transaction{Type: "request"}, + Span: &model.Span{ + SelfTime: model.AggregatedDuration{ + Count: 1, + }, + }, + }, + name: "span_breakdown", + }} + + for _, test := range tests { + batch := model.Batch{test.event} + processor := modelprocessor.SetMetricsetName{} + err := processor.ProcessBatch(context.Background(), &batch) + assert.NoError(t, err) + assert.Equal(t, test.name, batch[0].Metricset.Name) + } + +} diff --git a/model/modelprocessor/nodename.go b/model/modelprocessor/nodename.go new file mode 100644 index 00000000000..f31541118a9 --- /dev/null +++ b/model/modelprocessor/nodename.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modelprocessor + +import ( + "context" + + "github.com/elastic/apm-server/model" +) + +// SetServiceNodeName is a transform.Processor that sets the service +// node name value for events without one already set. +// +// SetServiceNodeName should be called after SetHostHostname, to +// ensure Name is set. +type SetServiceNodeName struct{} + +// ProcessBatch sets a default service.node.name for events without one already set. +func (SetServiceNodeName) ProcessBatch(ctx context.Context, b *model.Batch) error { + for i := range *b { + setServiceNodeName(&(*b)[i]) + } + return nil +} + +func setServiceNodeName(event *model.APMEvent) { + if event.Service.Node.Name != "" { + // Already set. + return + } + nodeName := event.Container.ID + if nodeName == "" { + nodeName = event.Host.Name + } + event.Service.Node.Name = nodeName +} diff --git a/model/modelprocessor/nodename_test.go b/model/modelprocessor/nodename_test.go new file mode 100644 index 00000000000..c0172f39249 --- /dev/null +++ b/model/modelprocessor/nodename_test.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modelprocessor_test + +import ( + "testing" + + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/model/modelprocessor" +) + +func TestSetServiceNodeName(t *testing.T) { + withServiceNodeName := model.APMEvent{ + Service: model.Service{ + Node: model.ServiceNode{ + Name: "node_name", + }, + }, + } + withConfiguredHostname := model.APMEvent{ + Host: model.Host{Name: "configured_hostname"}, + } + withContainerID := withConfiguredHostname + withContainerID.Container.ID = "container_id" + + processor := modelprocessor.SetServiceNodeName{} + + testProcessBatch(t, processor, withServiceNodeName, withServiceNodeName) // unchanged + testProcessBatch(t, processor, withConfiguredHostname, + eventWithServiceNodeName(withConfiguredHostname, "configured_hostname"), + ) + testProcessBatch(t, processor, withContainerID, + eventWithServiceNodeName(withContainerID, "container_id"), + ) +} + +func eventWithServiceNodeName(in model.APMEvent, nodeName string) model.APMEvent { + in.Service.Node.Name = nodeName + return in +} diff --git a/model/modelprocessor/nop.go b/model/modelprocessor/nop.go new file mode 100644 index 00000000000..55902b20641 --- /dev/null +++ b/model/modelprocessor/nop.go @@ -0,0 +1,32 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package modelprocessor + +import ( + "context" + + "github.com/elastic/apm-server/model" +) + +// Nop is a no-op model.BatchProcessor. +type Nop struct{} + +// ProcessBatch does nothing -- just returns nil. +func (Nop) ProcessBatch(ctx context.Context, batch *model.Batch) error { + return nil +} diff --git a/model/network.go b/model/network.go new file mode 100644 index 00000000000..4e52e2192fd --- /dev/null +++ b/model/network.go @@ -0,0 +1,78 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import "github.com/elastic/beats/v7/libbeat/common" + +type Network struct { + // Connection holds information about a network connection. + Connection NetworkConnection + + // Carrier holds information about a connection carrier. + Carrier NetworkCarrier +} + +type NetworkConnection struct { + // Type holds the connection type category, + // e.g. "wifi", "wired", and "cell". + Type string + + // Subtype holds more details of the connection type, + // specific to the connection type category. + // + // For example, if ConnectionType is "cell" then ConnectionSubtype + // may hold the cell technology, e.g. "LTE", or "GRPS". + Subtype string +} + +type NetworkCarrier struct { + // Name holds the carrier's name. + Name string + + // MCC holds the carrier's mobile country code. + MCC string + + // MNC holds the carrier's mobile network code. + MNC string + + // ICC holds the carrier's ISO 3166-1 alpha-2 2-character country code. + ICC string +} + +func (n *Network) fields() common.MapStr { + var network mapStr + network.maybeSetMapStr("connection", n.Connection.fields()) + network.maybeSetMapStr("carrier", n.Carrier.fields()) + return common.MapStr(network) +} + +func (c *NetworkConnection) fields() common.MapStr { + var connection mapStr + connection.maybeSetString("type", c.Type) + connection.maybeSetString("subtype", c.Subtype) + return common.MapStr(connection) +} + +func (c *NetworkCarrier) fields() common.MapStr { + var carrier mapStr + carrier.maybeSetString("mcc", c.MCC) + carrier.maybeSetString("mnc", c.MNC) + carrier.maybeSetString("icc", c.ICC) + carrier.maybeSetString("name", c.Name) + return common.MapStr(carrier) +} diff --git a/model/network_test.go b/model/network_test.go new file mode 100644 index 00000000000..42bf620c8a1 --- /dev/null +++ b/model/network_test.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/v7/libbeat/common" +) + +func TestNetworkTransform(t *testing.T) { + tests := []struct { + Network Network + Output common.MapStr + }{ + { + Network: Network{}, + Output: nil, + }, + { + Network: Network{ + Connection: NetworkConnection{ + Type: "cell", + Subtype: "LTE", + }, + Carrier: NetworkCarrier{ + Name: "Vodafone", + MCC: "234", + MNC: "03", + ICC: "UK", + }, + }, + Output: common.MapStr{ + "connection": common.MapStr{ + "type": "cell", + "subtype": "LTE", + }, + "carrier": common.MapStr{ + "name": "Vodafone", + "mcc": "234", + "mnc": "03", + "icc": "UK", + }, + }, + }, + } + + for _, test := range tests { + output := test.Network.fields() + assert.Equal(t, test.Output, output) + } +} diff --git a/model/observer.go b/model/observer.go new file mode 100644 index 00000000000..85a2bf010ec --- /dev/null +++ b/model/observer.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import ( + "github.com/elastic/beats/v7/libbeat/common" +) + +// Observer describes a special network, security, or application device used to detect, +// observe, or create network, security, or application-related events and metrics. +// +// https://www.elastic.co/guide/en/ecs/current/ecs-observer.html +type Observer struct { + EphemeralID string + Hostname string + ID string + Name string + Type string + Version string + VersionMajor int +} + +func (o *Observer) Fields() common.MapStr { + var fields mapStr + fields.maybeSetString("ephemeral_id", o.EphemeralID) + fields.maybeSetString("hostname", o.Hostname) + fields.maybeSetString("id", o.ID) + fields.maybeSetString("name", o.Name) + fields.maybeSetString("type", o.Type) + fields.maybeSetString("version", o.Version) + if o.VersionMajor > 0 { + fields.set("version_major", o.VersionMajor) + } + return common.MapStr(fields) +} diff --git a/model/observer_test.go b/model/observer_test.go new file mode 100644 index 00000000000..d76c9a30f97 --- /dev/null +++ b/model/observer_test.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/v7/libbeat/common" +) + +func TestObserverFields(t *testing.T) { + tests := []struct { + Observer Observer + Fields common.MapStr + }{ + { + Observer: Observer{}, + Fields: nil, + }, + { + Observer: Observer{ + EphemeralID: "observer_ephemeral_id", + Hostname: "observer_hostname", + ID: "observer_id", + Name: "observer_name", + Type: "observer_type", + Version: "observer_version", + }, + Fields: common.MapStr{ + "ephemeral_id": "observer_ephemeral_id", + "hostname": "observer_hostname", + "id": "observer_id", + "name": "observer_name", + "type": "observer_type", + "version": "observer_version", + }, + }, + } + + for _, test := range tests { + assert.Equal(t, test.Fields, test.Observer.Fields()) + } +} diff --git a/model/os.go b/model/os.go new file mode 100644 index 00000000000..bcd2b12b562 --- /dev/null +++ b/model/os.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import ( + "github.com/elastic/beats/v7/libbeat/common" +) + +// OS holds information about the operating system. +type OS struct { + // Platform holds the operating system platform, e.g. centos, ubuntu, windows. + Platform string + + // Full holds the full operating system name, including the version or code name. + Full string + + // Type categorizes the operating system into one of the broad commercial families. + // + // If specified, Type must beone of the following (lowercase): linux, macos, unix, windows. + // If the OS you’re dealing with is not in the list, the field should not be populated. + Type string +} + +func (o *OS) fields() common.MapStr { + var fields mapStr + fields.maybeSetString("platform", o.Platform) + fields.maybeSetString("full", o.Full) + fields.maybeSetString("type", o.Type) + return common.MapStr(fields) +} diff --git a/model/parent.go b/model/parent.go new file mode 100644 index 00000000000..e7264a67237 --- /dev/null +++ b/model/parent.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import ( + "github.com/elastic/beats/v7/libbeat/common" +) + +// Parent holds information about the parent of a trace event. +type Parent struct { + // ID holds the ID of the parent event. + ID string +} + +func (p *Parent) fields() common.MapStr { + var fields mapStr + fields.maybeSetString("id", p.ID) + return common.MapStr(fields) +} diff --git a/model/process.go b/model/process.go index 8a5f3695f01..370a4622a3e 100644 --- a/model/process.go +++ b/model/process.go @@ -22,10 +22,12 @@ import ( ) type Process struct { - Pid int - Ppid *int - Title string - Argv []string + Pid int + Ppid *int + Title string + Argv []string + CommandLine string + Executable string } func (p *Process) fields() common.MapStr { @@ -40,5 +42,7 @@ func (p *Process) fields() common.MapStr { proc.set("args", p.Argv) } proc.maybeSetString("title", p.Title) + proc.maybeSetString("command_line", p.CommandLine) + proc.maybeSetString("executable", p.Executable) return common.MapStr(proc) } diff --git a/model/process_test.go b/model/process_test.go index afa4231d8ba..bffb9bb132f 100644 --- a/model/process_test.go +++ b/model/process_test.go @@ -29,6 +29,8 @@ import ( func TestProcessTransform(t *testing.T) { processTitle := "node" + commandLine := "node run.js" + executablePath := "/usr/bin/node" argv := []string{ "node", "server.js", @@ -44,16 +46,20 @@ func TestProcessTransform(t *testing.T) { }, { Process: Process{ - Pid: 123, - Ppid: tests.IntPtr(456), - Title: processTitle, - Argv: argv, + Pid: 123, + Ppid: tests.IntPtr(456), + Title: processTitle, + Argv: argv, + CommandLine: commandLine, + Executable: executablePath, }, Output: common.MapStr{ - "pid": 123, - "ppid": 456, - "title": processTitle, - "args": argv, + "pid": 123, + "ppid": 456, + "title": processTitle, + "args": argv, + "command_line": commandLine, + "executable": executablePath, }, }, } diff --git a/model/processor.go b/model/processor.go new file mode 100644 index 00000000000..fb5116b25e1 --- /dev/null +++ b/model/processor.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import ( + "github.com/elastic/beats/v7/libbeat/common" +) + +// Processor identifies an event type, and is used for routing events +// to the appropriate data stream or index. +// +// TODO(axw) this should be replaced with ECS event categorisation fields. +type Processor struct { + Name string + Event string +} + +func (p *Processor) fields() common.MapStr { + var fields mapStr + fields.maybeSetString("name", p.Name) + fields.maybeSetString("event", p.Event) + return common.MapStr(fields) +} diff --git a/model/profile.go b/model/profile.go index 666ca477976..a5b08d696ce 100644 --- a/model/profile.go +++ b/model/profile.go @@ -18,125 +18,60 @@ package model import ( - "context" - "fmt" "time" - "github.com/cespare/xxhash/v2" - "github.com/gofrs/uuid" - "github.com/google/pprof/profile" - - "github.com/elastic/apm-server/transform" - "github.com/elastic/apm-server/utility" - "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" ) const ( - profileProcessorName = "profile" - profileDocType = "profile" + ProfilesDataset = "apm.profiling" ) -var profileProcessorEntry = common.MapStr{ - "name": profileProcessorName, - "event": profileDocType, -} +// ProfileProcessor is the Processor value that should be assigned to profile events. +var ProfileProcessor = Processor{Name: "profile", Event: "profile"} -// PprofProfile represents a resource profile. -type PprofProfile struct { - Metadata Metadata - Profile *profile.Profile +// ProfileSample holds a profiling sample. +type ProfileSample struct { + Duration time.Duration + ProfileID string + Stack []ProfileSampleStackframe + Values map[string]int64 } -// Transform transforms a Profile into a sequence of beat.Events: one per profile sample. -func (pp PprofProfile) Transform(ctx context.Context, _ *transform.Config) []beat.Event { - // Precompute value field names for use in each event. - // TODO(axw) limit to well-known value names? - profileTimestamp := time.Unix(0, pp.Profile.TimeNanos) - valueFieldNames := make([]string, len(pp.Profile.SampleType)) - for i, sampleType := range pp.Profile.SampleType { - sampleUnit := normalizeUnit(sampleType.Unit) - valueFieldNames[i] = sampleType.Type + "." + sampleUnit - } +// ProfileSampleStackframe holds details of a stack frame for a profile sample. +type ProfileSampleStackframe struct { + ID string + Function string + Filename string + Line int64 +} - // Generate a unique profile ID shared by all samples in the profile. - // If we can't generate a UUID for whatever reason, omit the profile ID. - var profileID string - if uuid, err := uuid.NewV4(); err == nil { - profileID = fmt.Sprintf("%x", uuid) +func (p *ProfileSample) setFields(fields *mapStr) { + var profileFields mapStr + profileFields.maybeSetString("id", p.ProfileID) + if p.Duration > 0 { + profileFields.set("duration", int64(p.Duration)) } - samples := make([]beat.Event, len(pp.Profile.Sample)) - for i, sample := range pp.Profile.Sample { - profileFields := common.MapStr{} - if profileID != "" { - profileFields["id"] = profileID - } - if pp.Profile.DurationNanos > 0 { - profileFields["duration"] = pp.Profile.DurationNanos - } - if len(sample.Location) > 0 { - hash := xxhash.New() - stack := make([]common.MapStr, len(sample.Location)) - for i := len(sample.Location) - 1; i >= 0; i-- { - loc := sample.Location[i] - line := loc.Line[0] // aggregated at function level - - // NOTE(axw) Currently we hash the function names so that - // we can aggregate stacks across multiple builds, or where - // binaries are not reproducible. - // - // If we decide to identify stack traces and frames using - // function addresses, then need to subtract the mapping's - // start address to eliminate the effects of ASLR, i.e. - // - // var buf [8]byte - // binary.BigEndian.PutUint64(buf[:], loc.Address-loc.Mapping.Start) - // hash.Write(buf[:]) - - hash.WriteString(line.Function.Name) - fields := common.MapStr{ - "id": fmt.Sprintf("%x", hash.Sum(nil)), - "function": line.Function.Name, - } - if line.Function.Filename != "" { - utility.Set(fields, "filename", line.Function.Filename) - if line.Line > 0 { - utility.Set(fields, "line", line.Line) - } - } - stack[i] = fields + if len(p.Stack) > 0 { + stackFields := make([]common.MapStr, len(p.Stack)) + for i, frame := range p.Stack { + frameFields := mapStr{ + "id": frame.ID, + "function": frame.Function, } - utility.Set(profileFields, "stack", stack) - utility.Set(profileFields, "top", stack[0]) - } - for i, v := range sample.Value { - utility.Set(profileFields, valueFieldNames[i], v) - } - event := beat.Event{ - Timestamp: profileTimestamp, - Fields: common.MapStr{ - "processor": profileProcessorEntry, - profileDocType: profileFields, - }, - } - pp.Metadata.Set(event.Fields) - if len(sample.Label) > 0 { - labels := make(common.MapStr) - for k, v := range sample.Label { - utility.Set(labels, k, v) + if frameFields.maybeSetString("filename", frame.Filename) { + if frame.Line > 0 { + frameFields.set("line", frame.Line) + } } - utility.DeepUpdate(event.Fields, "labels", labels) + stackFields[i] = common.MapStr(frameFields) } - samples[i] = event + profileFields.set("stack", stackFields) + profileFields.set("top", stackFields[0]) } - return samples -} - -func normalizeUnit(unit string) string { - switch unit { - case "nanoseconds": - unit = "ns" + for k, v := range p.Values { + profileFields.set(k, v) } - return unit + fields.set("profile", common.MapStr(profileFields)) } diff --git a/model/profile/_meta/fields.yml b/model/profile/_meta/fields.yml index 4821e008654..ed00ae70e88 100644 --- a/model/profile/_meta/fields.yml +++ b/model/profile/_meta/fields.yml @@ -2,6 +2,685 @@ title: APM Profile description: Profiling-specific data for APM. fields: + - name: processor.name + type: keyword + description: Processor name. + overwrite: true + + - name: processor.event + type: keyword + description: Processor event. + overwrite: true + + - name: timestamp + type: group + fields: + - name: us + type: long + count: 1 + description: > + Timestamp of the event in microseconds since Unix epoch. + overwrite: true + + - name: labels + type: object + object_type_params: + - object_type: keyword + - object_type: boolean + - object_type: scaled_float + scaling_factor: 1000000 + dynamic: true + overwrite: true + description: > + A flat mapping of user-defined labels with string, boolean or number values. + + - name: service + type: group + dynamic: false + description: > + Service fields. + fields: + - name: name + type: keyword + description: > + Immutable name of the service emitting this event. + overwrite: true + + - name: version + type: keyword + description: > + Version of the service emitting this event. + overwrite: true + + - name: environment + type: keyword + description: > + Service environment. + overwrite: true + + - name: node + type: group + fields: + - name: name + type: keyword + description: > + Unique meaningful name of the service node. + overwrite: true + + - name: language + type: group + fields: + + - name: name + type: keyword + description: > + Name of the programming language used. + overwrite: true + + - name: version + type: keyword + description: > + Version of the programming language used. + overwrite: true + + - name: runtime + type: group + fields: + + - name: name + type: keyword + description: > + Name of the runtime used. + overwrite: true + + - name: version + type: keyword + description: > + Version of the runtime used. + overwrite: true + + - name: framework + type: group + fields: + + - name: name + type: keyword + description: > + Name of the framework used. + overwrite: true + + - name: version + type: keyword + description: > + Version of the framework used. + overwrite: true + + - name: agent + type: group + dynamic: false + fields: + + - name: name + type: keyword + description: > + Name of the agent used. + overwrite: true + + - name: version + type: keyword + description: > + Version of the agent used. + overwrite: true + + - name: ephemeral_id + type: keyword + description: > + The Ephemeral ID identifies a running process. + overwrite: true + + - name: container + type: group + dynamic: false + title: Container + description: > + Container fields are used for meta information about the specific container + that is the source of information. These fields help correlate data based + containers from any runtime. + fields: + + - name: id + type: keyword + description: > + Unique container id. + overwrite: true + + - name: network + type: group + dynamic: false + description: > + Optional network fields + fields: + + - name: connection + type: group + description: > + Network connection details + fields: + + - name: type + type: keyword + description: > + Network connection type, eg. "wifi", "cell" + overwrite: true + + - name: subtype + type: keyword + description: > + Detailed network connection sub-type, e.g. "LTE", "CDMA" + overwrite: true + + - name: carrier + type: group + description: > + Network operator + fields: + + - name: name + type: keyword + overwrite: true + description: > + Carrier name, eg. Vodafone, T-Mobile, etc. + + - name: mcc + type: keyword + overwrite: true + description: > + Mobile country code + + - name: mnc + type: keyword + overwrite: true + description: > + Mobile network code + + - name: icc + type: keyword + overwrite: true + description: > + ISO country code, eg. US + + - name: kubernetes + type: group + dynamic: false + title: Kubernetes + description: > + Kubernetes metadata reported by agents + fields: + + - name: namespace + type: keyword + description: > + Kubernetes namespace + overwrite: true + + - name: node + type: group + fields: + - name: name + type: keyword + description: > + Kubernetes node name + overwrite: true + + - name: pod + type: group + fields: + + - name: name + type: keyword + description: > + Kubernetes pod name + overwrite: true + + - name: uid + type: keyword + description: > + Kubernetes Pod UID + overwrite: true + + - name: host + type: group + dynamic: false + description: > + Optional host fields. + fields: + + - name: architecture + type: keyword + description: > + The architecture of the host the event was recorded on. + overwrite: true + + - name: hostname + type: keyword + description: > + The hostname of the host the event was recorded on. + overwrite: true + + - name: name + type: keyword + description: > + Name of the host the event was recorded on. + It can contain same information as host.hostname or a name specified by the user. + overwrite: true + + - name: ip + type: ip + description: > + IP of the host that records the event. + overwrite: true + + - name: os + title: Operating System + group: 2 + description: > + The OS fields contain information about the operating system. + type: group + fields: + - name: platform + type: keyword + description: > + The platform of the host the event was recorded on. + overwrite: true + + - name: process + type: group + dynamic: false + description: > + Information pertaining to the running process where the data was collected + fields: + - name: args + level: extended + type: keyword + description: > + Process arguments. + May be filtered to protect sensitive information. + overwrite: true + + - name: pid + type: long + description: > + Numeric process ID of the service process. + overwrite: true + + - name: ppid + type: long + description: > + Numeric ID of the service's parent process. + overwrite: true + + - name: title + type: keyword + description: > + Service process title. + overwrite: true + + - name: observer + type: group + dynamic: false + fields: + + - name: listening + type: keyword + overwrite: true + description: > + Address the server is listening on. + + - name: hostname + type: keyword + overwrite: true + description: > + Hostname of the APM Server. + + - name: version + type: keyword + overwrite: true + description: > + APM Server version. + + - name: version_major + type: byte + overwrite: true + description: > + Major version number of the observer + + - name: type + type: keyword + overwrite: true + description: > + The type will be set to `apm-server`. + + - name: id + type: keyword + overwrite: true + description: > + Unique identifier of the APM Server. + + - name: ephemeral_id + type: keyword + overwrite: true + description: > + Ephemeral identifier of the APM Server. + + - name: user + type: group + dynamic: false + fields: + + - name: name + type: keyword + description: > + The username of the logged in user. + overwrite: true + + - name: id + type: keyword + description: > + Identifier of the logged in user. + overwrite: true + + - name: email + type: keyword + description: > + Email of the logged in user. + overwrite: true + + - name: client + dynamic: false + type: group + fields: + + - name: domain + type: keyword + ignore_above: 1024 + description: > + Client domain. + overwrite: true + + - name: ip + type: ip + description: > + IP address of the client of a recorded event. + This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + overwrite: true + + - name: port + type: long + description: > + Port of the client. + overwrite: true + + - name: source + dynamic: false + type: group + fields: + + - name: domain + type: keyword + ignore_above: 1024 + description: > + Source domain. + overwrite: true + + - name: ip + type: ip + description: > + IP address of the source of a recorded event. + This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + overwrite: true + + - name: port + type: long + description: > + Port of the source. + overwrite: true + + - name: destination + title: Destination + group: 2 + description: 'Destination fields describe details about the destination of a packet/event. + + Destination fields are usually populated in conjunction with source fields.' + type: group + fields: + - name: address + level: extended + type: keyword + ignore_above: 1024 + description: 'Some event destination addresses are defined ambiguously. The + event will sometimes list an IP, a domain or a unix socket. You should always + store the raw address in the `.address` field. + Then it should be duplicated to `.ip` or `.domain`, depending on which one + it is.' + overwrite: true + + - name: ip + level: core + type: ip + description: 'IP addess of the destination. + Can be one of multiple IPv4 or IPv6 addresses.' + overwrite: true + + - name: port + level: core + type: long + format: string + description: Port of the destination. + overwrite: true + + - name: user_agent + dynamic: false + title: User agent + description: > + The user_agent fields normally come from a browser request. They often + show up in web service logs coming from the parsed user agent string. + type: group + overwrite: true + fields: + + - name: original + type: keyword + description: > + Unparsed version of the user_agent. + example: "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1" + overwrite: true + + multi_fields: + - name: text + type: text + description: > + Software agent acting in behalf of a user, eg. a web browser / OS combination. + overwrite: true + + - name: name + type: keyword + overwrite: true + example: Safari + description: > + Name of the user agent. + + - name: version + type: keyword + overwrite: true + description: > + Version of the user agent. + example: 12.0 + + - name: device + type: group + overwrite: true + title: Device + description: > + Information concerning the device. + fields: + + - name: name + type: keyword + overwrite: true + example: iPhone + description: > + Name of the device. + + - name: os + type: group + overwrite: true + title: Operating System + description: > + The OS fields contain information about the operating system. + fields: + + - name: platform + type: keyword + overwrite: true + description: > + Operating system platform (such centos, ubuntu, windows). + example: darwin + + - name: name + type: keyword + overwrite: true + example: "Mac OS X" + description: > + Operating system name, without the version. + + - name: full + type: keyword + overwrite: true + example: "Mac OS Mojave" + description: > + Operating system name, including the version or code name. + + - name: family + type: keyword + overwrite: true + example: "debian" + description: > + OS family (such as redhat, debian, freebsd, windows). + + - name: version + type: keyword + overwrite: true + example: "10.14.1" + description: > + Operating system version as a raw string. + + - name: kernel + type: keyword + overwrite: true + example: "4.4.0-112-generic" + description: > + Operating system kernel version as a raw string. + + - name: cloud + title: Cloud + group: 2 + type: group + description: > + Cloud metadata reported by agents + fields: + - name: account + type: group + dynamic: false + fields: + - name: id + level: extended + type: keyword + ignore_above: 1024 + description: Cloud account ID + overwrite: true + - name: name + level: extended + type: keyword + ignore_above: 1024 + description: Cloud account name + overwrite: true + - name: availability_zone + level: extended + type: keyword + ignore_above: 1024 + description: Cloud availability zone name + example: us-east1-a + overwrite: true + - name: instance + type: group + dynamic: false + fields: + - name: id + level: extended + type: keyword + ignore_above: 1024 + description: Cloud instance/machine ID + overwrite: true + - name: name + level: extended + type: keyword + ignore_above: 1024 + description: Cloud instance/machine name + overwrite: true + - name: machine + type: group + dynamic: false + fields: + - name: type + level: extended + type: keyword + ignore_above: 1024 + description: Cloud instance/machine type + example: t2.medium + overwrite: true + - name: project + type: group + dynamic: false + fields: + - name: id + level: extended + type: keyword + ignore_above: 1024 + description: Cloud project ID + overwrite: true + - name: name + level: extended + type: keyword + ignore_above: 1024 + description: Cloud project name + overwrite: true + - name: provider + level: extended + type: keyword + ignore_above: 1024 + description: Cloud provider name + example: gcp + overwrite: true + - name: region + level: extended + type: keyword + ignore_above: 1024 + description: Cloud region name + example: us-east1 + overwrite: true + - name: service + type: group + dynamic: false + fields: + - name: name + level: extended + type: keyword + ignore_above: 1024 + description: > + Cloud service name, intended to distinguish services running on + different platforms within a provider. + overwrite: true + - name: profile type: group dynamic: false @@ -11,15 +690,14 @@ count: 1 description: > Unique ID for the profile. - All samples within a profile will have the same profile ID. - name: duration type: long + unit: nanos count: 1 description: > - Duration of the profile, in microseconds. - + Duration of the profile, in nanoseconds. All samples within a profile will have the same duration. To aggregate durations, you should first group by the profile ID. @@ -28,10 +706,21 @@ fields: - name: ns type: long + unit: nanos count: 1 description: > Amount of CPU time profiled, in nanoseconds. + - name: wall + type: group + fields: + - name: us + type: long + unit: micros + count: 1 + description: > + Amount of wall time profiled, in microseconds. + - name: samples type: group fields: diff --git a/model/profile_test.go b/model/profile_test.go index 0a021193e84..3d24f11a3a9 100644 --- a/model/profile_test.go +++ b/model/profile_test.go @@ -22,110 +22,73 @@ import ( "testing" "time" - pprof_profile "github.com/google/pprof/profile" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/transform" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" ) -func TestPprofProfileTransform(t *testing.T) { - serviceName, env := "myService", "staging" - service := model.Service{ - Name: serviceName, - Environment: env, - } - +func TestProfileSampleTransform(t *testing.T) { timestamp := time.Unix(123, 456) - pp := model.PprofProfile{ - Metadata: model.Metadata{Service: service}, - Profile: &pprof_profile.Profile{ - TimeNanos: timestamp.UnixNano(), - DurationNanos: int64(10 * time.Second), - SampleType: []*pprof_profile.ValueType{ - {Type: "cpu", Unit: "nanoseconds"}, - {Type: "inuse_space", Unit: "bytes"}, - }, - Sample: []*pprof_profile.Sample{{ - Value: []int64{123, 456}, - Label: map[string][]string{ - "key1": []string{"abc", "def"}, - "key2": []string{"ghi"}, - }, - Location: []*pprof_profile.Location{{ - Line: []pprof_profile.Line{{ - Function: &pprof_profile.Function{Name: "foo", Filename: "foo.go"}, - Line: 1, - }}, - }, { - Line: []pprof_profile.Line{{ - Function: &pprof_profile.Function{Name: "bar", Filename: "bar.go"}, - }}, - }}, - }, { - Value: []int64{123, 456}, - Label: map[string][]string{ - "key1": []string{"abc", "def"}, - "key2": []string{"ghi"}, - }, - Location: []*pprof_profile.Location{{ - Line: []pprof_profile.Line{{ - Function: &pprof_profile.Function{Name: "foo", Filename: "foo.go"}, - Line: 1, - }}, - }, { - Line: []pprof_profile.Line{{ - Function: &pprof_profile.Function{Name: "bar", Filename: "bar.go"}, - }}, - }}, - }}, + sample := model.ProfileSample{ + Duration: 10 * time.Second, + ProfileID: "profile_id", + Stack: []model.ProfileSampleStackframe{{ + ID: "foo_id", + Function: "foo", + Filename: "foo.go", + Line: 1, + }, { + ID: "bar_id", + Function: "bar", + Filename: "bar.go", + }}, + Values: map[string]int64{ + "samples.count": 1, + "cpu.ns": 123, + "wall.us": 789, + "inuse_space.bytes": 456, }, } - output := pp.Transform(context.Background(), &transform.Config{}) + batch := &model.Batch{{ + Timestamp: timestamp, + ProfileSample: &sample, + }, { + Timestamp: timestamp, + ProfileSample: &sample, + }} + output := batch.Transform(context.Background()) require.Len(t, output, 2) assert.Equal(t, output[0], output[1]) - if profileMap, ok := output[0].Fields["profile"].(common.MapStr); ok { - assert.NotZero(t, profileMap["id"]) - profileMap["id"] = "random" - } - assert.Equal(t, beat.Event{ Timestamp: timestamp, Fields: common.MapStr{ - "processor": common.MapStr{"event": "profile", "name": "profile"}, - "service": common.MapStr{ - "name": "myService", - "environment": "staging", - }, - "labels": common.MapStr{ - "key1": []string{"abc", "def"}, - "key2": []string{"ghi"}, - }, "profile": common.MapStr{ - "id": "random", + "id": "profile_id", "duration": int64(10 * time.Second), "cpu.ns": int64(123), + "wall.us": int64(789), "inuse_space.bytes": int64(456), + "samples.count": int64(1), "top": common.MapStr{ "function": "foo", "filename": "foo.go", "line": int64(1), - "id": "98430081820ed765", + "id": "foo_id", }, "stack": []common.MapStr{{ "function": "foo", "filename": "foo.go", "line": int64(1), - "id": "98430081820ed765", + "id": "foo_id", }, { "function": "bar", "filename": "bar.go", - "id": "48a37c90ad27a659", + "id": "bar_id", }}, }, }, diff --git a/model/service.go b/model/service.go index 373cb207ace..b0d7e4761be 100644 --- a/model/service.go +++ b/model/service.go @@ -29,7 +29,6 @@ type Service struct { Language Language Runtime Runtime Framework Framework - Agent Agent Node ServiceNode } @@ -51,19 +50,12 @@ type Framework struct { Version string } -//Agent has an optional version, name and an ephemeral id -type Agent struct { - Name string - Version string - EphemeralID string -} - type ServiceNode struct { Name string } //Fields transforms a service instance into a common.MapStr -func (s *Service) Fields(containerID, hostName string) common.MapStr { +func (s *Service) Fields() common.MapStr { if s == nil { return nil } @@ -72,7 +64,7 @@ func (s *Service) Fields(containerID, hostName string) common.MapStr { svc.maybeSetString("name", s.Name) svc.maybeSetString("version", s.Version) svc.maybeSetString("environment", s.Environment) - if node := s.Node.fields(containerID, hostName); node != nil { + if node := s.Node.fields(); node != nil { svc.set("node", node) } @@ -100,31 +92,9 @@ func (s *Service) Fields(containerID, hostName string) common.MapStr { return common.MapStr(svc) } -//AgentFields transforms all agent related information of a service into a common.MapStr -func (s *Service) AgentFields() common.MapStr { - if s == nil { - return nil - } - return s.Agent.fields() -} - -func (n *ServiceNode) fields(containerID, hostName string) common.MapStr { +func (n *ServiceNode) fields() common.MapStr { if n.Name != "" { return common.MapStr{"name": n.Name} } - if containerID != "" { - return common.MapStr{"name": containerID} - } - if hostName != "" { - return common.MapStr{"name": hostName} - } return nil } - -func (a *Agent) fields() common.MapStr { - var agent mapStr - agent.maybeSetString("name", a.Name) - agent.maybeSetString("version", a.Version) - agent.maybeSetString("ephemeral_id", a.EphemeralID) - return common.MapStr(agent) -} diff --git a/model/service_test.go b/model/service_test.go index e555c0546e9..08ce79a1110 100644 --- a/model/service_test.go +++ b/model/service_test.go @@ -26,37 +26,22 @@ import ( ) var ( - version, environment = "5.1.3", "staging" - langName, langVersion = "ecmascript", "8" - rtName, rtVersion = "node", "8.0.0" - fwName, fwVersion = "Express", "1.2.3" - agentName, agentVersion = "elastic-node", "1.0.0" + version, environment = "5.1.3", "staging" + langName, langVersion = "ecmascript", "8" + rtName, rtVersion = "node", "8.0.0" + fwName, fwVersion = "Express", "1.2.3" ) func TestServiceTransform(t *testing.T) { serviceName, serviceNodeName := "myService", "abc" tests := []struct { - Service Service - ContainerID, HostName string - Fields common.MapStr - AgentFields common.MapStr + Service Service + Fields common.MapStr }{ { - Service: Service{}, - AgentFields: nil, - Fields: nil, - }, { - Service: Service{}, - ContainerID: "foo", - HostName: "bar", - AgentFields: nil, - Fields: common.MapStr{"node": common.MapStr{"name": "foo"}}, - }, { - Service: Service{}, - HostName: "bar", - AgentFields: nil, - Fields: common.MapStr{"node": common.MapStr{"name": "bar"}}, + Service: Service{}, + Fields: nil, }, { Service: Service{ @@ -75,18 +60,8 @@ func TestServiceTransform(t *testing.T) { Name: fwName, Version: fwVersion, }, - Agent: Agent{ - Name: agentName, - Version: agentVersion, - }, Node: ServiceNode{Name: serviceNodeName}, }, - ContainerID: "foo", - HostName: "bar", - AgentFields: common.MapStr{ - "name": "elastic-node", - "version": "1.0.0", - }, Fields: common.MapStr{ "name": "myService", "version": "5.1.3", @@ -109,7 +84,6 @@ func TestServiceTransform(t *testing.T) { } for _, test := range tests { - assert.Equal(t, test.Fields, test.Service.Fields(test.ContainerID, test.HostName)) - assert.Equal(t, test.AgentFields, test.Service.AgentFields()) + assert.Equal(t, test.Fields, test.Service.Fields()) } } diff --git a/model/session.go b/model/session.go new file mode 100644 index 00000000000..e148a97c4d5 --- /dev/null +++ b/model/session.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import "github.com/elastic/beats/v7/libbeat/common" + +// Session holds information about a group of related transactions, such as +// a sequence of web interactions. +type Session struct { + // ID holds a session ID for grouping a set of related transactions. + ID string + + // Sequence holds an optional sequence number for a transaction + // within a session. Sequence is ignored if it is zero or if + // ID is empty. + Sequence int +} + +func (s *Session) fields() common.MapStr { + if s.ID == "" { + return nil + } + out := common.MapStr{"id": s.ID} + if s.Sequence > 0 { + out["sequence"] = s.Sequence + } + return out +} diff --git a/model/source.go b/model/source.go new file mode 100644 index 00000000000..e56596edd15 --- /dev/null +++ b/model/source.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import ( + "net" + + "github.com/elastic/beats/v7/libbeat/common" +) + +// Source holds information about the source of a network exchange. +type Source struct { + // Domain holds the client's domain (FQDN). + Domain string + + // IP holds the client's IP address. + IP net.IP + + // Port holds the client's IP port. + Port int +} + +func (s *Source) fields() common.MapStr { + var fields mapStr + fields.maybeSetString("domain", s.Domain) + if s.IP != nil { + fields.set("ip", s.IP.String()) + } + if s.Port > 0 { + fields.set("port", s.Port) + } + return common.MapStr(fields) +} diff --git a/model/sourcemap.go b/model/sourcemap.go deleted file mode 100644 index ce2de4de9a2..00000000000 --- a/model/sourcemap.go +++ /dev/null @@ -1,80 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package model - -import ( - "context" - "time" - - "github.com/elastic/beats/v7/libbeat/beat" - "github.com/elastic/beats/v7/libbeat/common" - "github.com/elastic/beats/v7/libbeat/logp" - "github.com/elastic/beats/v7/libbeat/monitoring" - - logs "github.com/elastic/apm-server/log" - "github.com/elastic/apm-server/transform" - "github.com/elastic/apm-server/utility" -) - -const ( - sourcemapProcessorName = "sourcemap" - sourcemapDocType = "sourcemap" -) - -var ( - // TODO(axw) SourcemapMetrics should be unexported, but it's - // being used for the decoder in processor/asset. We should - // give that its own metrics registry, and unexport this one. - - SourcemapMetrics = monitoring.Default.NewRegistry("apm-server.processor.sourcemap") - sourcemapCounter = monitoring.NewInt(SourcemapMetrics, "counter") - sourcemapProcessorEntry = common.MapStr{"name": sourcemapProcessorName, "event": sourcemapDocType} -) - -type Sourcemap struct { - ServiceName string - ServiceVersion string - Sourcemap string - BundleFilepath string -} - -func (pa *Sourcemap) Transform(ctx context.Context, cfg *transform.Config) []beat.Event { - sourcemapCounter.Inc() - if pa == nil { - return nil - } - - if cfg.RUM.SourcemapStore == nil { - logp.NewLogger(logs.Sourcemap).Error("Sourcemap Accessor is nil, cache cannot be invalidated.") - } else { - cfg.RUM.SourcemapStore.Added(ctx, pa.ServiceName, pa.ServiceVersion, pa.BundleFilepath) - } - - ev := beat.Event{ - Fields: common.MapStr{ - "processor": sourcemapProcessorEntry, - sourcemapDocType: common.MapStr{ - "bundle_filepath": utility.UrlPath(pa.BundleFilepath), - "service": common.MapStr{"name": pa.ServiceName, "version": pa.ServiceVersion}, - "sourcemap": pa.Sourcemap, - }, - }, - Timestamp: time.Now(), - } - return []beat.Event{ev} -} diff --git a/model/sourcemap/generated/schema/payload.go b/model/sourcemap/generated/schema/payload.go deleted file mode 100644 index 654207a71f2..00000000000 --- a/model/sourcemap/generated/schema/payload.go +++ /dev/null @@ -1,48 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package schema - -const PayloadSchema = `{ - "$id": "docs/spec/sourcemaps/sourcemap-metadata.json", - "title": "Sourcemap Metadata", - "description": "Sourcemap Metadata", - "type": "object", - "properties": { - "bundle_filepath": { - "description": "relative path of the minified bundle file", - "type": "string", - "maxLength": 1024, - "minLength": 1 - }, - "service_version": { - "description": "Version of the service emitting this event", - "type": "string", - "maxLength": 1024, - "minLength": 1 - }, - "service_name": { - "description": "Immutable name of the service emitting this event", - "type": "string", - "pattern": "^[a-zA-Z0-9 _-]+$", - "maxLength": 1024, - "minLength": 1 - } - }, - "required": ["bundle_filepath", "service_name", "service_version"] -} -` diff --git a/model/sourcemap_test.go b/model/sourcemap_test.go deleted file mode 100644 index 7921a53177d..00000000000 --- a/model/sourcemap_test.go +++ /dev/null @@ -1,137 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package model_test - -import ( - "context" - "net/http" - "testing" - "time" - - "go.uber.org/zap/zapcore" - - s "github.com/go-sourcemap/sourcemap" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/beats/v7/libbeat/common" - "github.com/elastic/beats/v7/libbeat/logp" - - "github.com/elastic/apm-server/elasticsearch/estest" - logs "github.com/elastic/apm-server/log" - "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/model/modeldecoder" - "github.com/elastic/apm-server/sourcemap" - "github.com/elastic/apm-server/tests/loader" - "github.com/elastic/apm-server/transform" -) - -func getStr(data common.MapStr, key string) string { - rs, _ := data.GetValue(key) - return rs.(string) -} - -func TestTransform(t *testing.T) { - p := model.Sourcemap{ - ServiceName: "myService", - ServiceVersion: "1.0", - BundleFilepath: "/my/path", - Sourcemap: "mysmap", - } - - events := p.Transform(context.Background(), &transform.Config{}) - assert.Len(t, events, 1) - event := events[0] - - assert.WithinDuration(t, time.Now(), event.Timestamp, time.Second) - output := event.Fields["sourcemap"].(common.MapStr) - - assert.Equal(t, "/my/path", getStr(output, "bundle_filepath")) - assert.Equal(t, "myService", getStr(output, "service.name")) - assert.Equal(t, "1.0", getStr(output, "service.version")) - assert.Equal(t, "mysmap", getStr(output, "sourcemap")) -} - -func TestParseSourcemaps(t *testing.T) { - fileBytes, err := loader.LoadDataAsBytes("../testdata/sourcemap/bundle.js.map") - assert.NoError(t, err) - parser, err := s.Parse("", fileBytes) - assert.NoError(t, err) - - source, _, _, _, ok := parser.Source(1, 9) - assert.True(t, ok) - assert.Equal(t, "webpack:///bundle.js", source) -} - -func TestInvalidateCache(t *testing.T) { - // load sourcemap from file and decode - // - // TODO(axw) this should be done without decoding, - // or moved to a separate integration test package. - data, err := loader.LoadData("../testdata/sourcemap/payload.json") - assert.NoError(t, err) - decoded, err := modeldecoder.DecodeSourcemap(data) - require.NoError(t, err) - event := decoded.(*model.Sourcemap) - - t.Run("withSourcemapStore", func(t *testing.T) { - // collect logs - require.NoError(t, logp.DevelopmentSetup(logp.ToObserverOutput())) - - // create sourcemap store - client, err := estest.NewElasticsearchClient(estest.NewTransport(t, http.StatusOK, nil)) - require.NoError(t, err) - store, err := sourcemap.NewStore(client, "foo", time.Minute) - require.NoError(t, err) - - // transform with sourcemap store - event.Transform(context.Background(), &transform.Config{RUM: transform.RUMConfig{SourcemapStore: store}}) - - logCollection := logp.ObserverLogs().TakeAll() - assert.Equal(t, 2, len(logCollection)) - - // first sourcemap was added - for i, entry := range logCollection { - assert.Equal(t, logs.Sourcemap, entry.LoggerName) - assert.Equal(t, zapcore.DebugLevel, entry.Level) - if i == 0 { - assert.Contains(t, entry.Message, "Added id service_1_js/bundle.js. Cache now has 1 entries.") - } else { - assert.Contains(t, entry.Message, "Removed id service_1_js/bundle.js. Cache now has 0 entries.") - } - } - - }) - - t.Run("noSourcemapStore", func(t *testing.T) { - // collect logs - require.NoError(t, logp.DevelopmentSetup(logp.ToObserverOutput())) - - // transform with no sourcemap store - event.Transform(context.Background(), &transform.Config{RUM: transform.RUMConfig{}}) - - logCollection := logp.ObserverLogs().TakeAll() - assert.Equal(t, 1, len(logCollection)) - for _, entry := range logCollection { - assert.Equal(t, logs.Sourcemap, entry.LoggerName) - assert.Equal(t, zapcore.ErrorLevel, entry.Level) - assert.Contains(t, entry.Message, "cache cannot be invalidated") - } - - }) -} diff --git a/model/span.go b/model/span.go index 41f9899579c..2fdc1412858 100644 --- a/model/span.go +++ b/model/span.go @@ -18,68 +18,50 @@ package model import ( - "context" - "net" "time" - "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" - "github.com/elastic/beats/v7/libbeat/monitoring" - - "github.com/elastic/apm-server/transform" - "github.com/elastic/apm-server/utility" -) - -const ( - spanDocType = "span" ) var ( - spanMetrics = monitoring.Default.NewRegistry("apm-server.processor.span") - spanTransformations = monitoring.NewInt(spanMetrics, "transformations") - spanStacktraceCounter = monitoring.NewInt(spanMetrics, "stacktraces") - spanFrameCounter = monitoring.NewInt(spanMetrics, "frames") - spanProcessorEntry = common.MapStr{"name": "transaction", "event": spanDocType} + // SpanProcessor is the Processor value that should be assigned to span events. + SpanProcessor = Processor{Name: "transaction", Event: "span"} ) type Span struct { - Metadata Metadata - ID string - TransactionID string - ParentID string - ChildIDs []string - TraceID string + ID string + + // Name holds the span name: "SELECT FROM table_name", etc. + Name string - Timestamp time.Time + // Type holds the span type: "external", "db", etc. + Type string + + // Subtype holds the span subtype: "http", "sql", etc. + Subtype string + + // Action holds the span action: "query", "execute", etc. + Action string + + // Start holds the span's offset from the transaction timestamp in milliseconds. + // + // TODO(axw) drop in 8.0. See https://github.com/elastic/apm-server/issues/6000) + Start *float64 + + // SelfTime holds the aggregated span durations, for breakdown metrics. + SelfTime AggregatedDuration Message *Message - Name string - Outcome string - Start *float64 - Duration float64 - Service *Service Stacktrace Stacktrace Sync *bool - Labels common.MapStr - - Type string - Subtype *string - Action *string DB *DB - HTTP *HTTP - Destination *Destination DestinationService *DestinationService + Composite *Composite - // RUM records whether or not this is a RUM span, - // and should have its stack frames sourcemapped. - RUM bool - - Experimental interface{} - - // RepresentativeCount, if positive, holds the approximate number of - // spans that this span represents for aggregation. This will only be set - // when the sampling rate is known. + // RepresentativeCount holds the approximate number of spans that + // this span represents for aggregation. This will only be set when + // the sampling rate is known. // // This may be used for scaling metrics; it is not indexed. RepresentativeCount float64 @@ -87,171 +69,103 @@ type Span struct { // DB contains information related to a database query of a span event type DB struct { - Instance *string - Statement *string - Type *string - UserName *string - Link *string + Instance string + Statement string + Type string + UserName string + Link string RowsAffected *int } -// HTTP contains information about the outgoing http request information of a span event -// -// TODO(axw) combine this and "Http", which is used by transaction and error, into one type. -type HTTP struct { - URL *string - StatusCode *int - Method *string - Response *MinimalResp -} - -// Destination contains contextual data about the destination of a span, such as address and port -type Destination struct { - Address *string - Port *int -} - // DestinationService contains information about the destination service of a span event type DestinationService struct { - Type *string - Name *string - Resource *string -} + Type string // Deprecated + Name string // Deprecated + Resource string -func (db *DB) fields() common.MapStr { - if db == nil { - return nil - } - var fields = common.MapStr{} - utility.Set(fields, "instance", db.Instance) - utility.Set(fields, "statement", db.Statement) - utility.Set(fields, "type", db.Type) - utility.Set(fields, "rows_affected", db.RowsAffected) - if db.UserName != nil { - utility.Set(fields, "user", common.MapStr{"name": db.UserName}) - } - utility.Set(fields, "link", db.Link) - return fields + // ResponseTime holds aggregated span durations for the destination service resource. + ResponseTime AggregatedDuration } -func (http *HTTP) fields() common.MapStr { - if http == nil { - return nil - } - var fields = common.MapStr{} - if http.URL != nil { - utility.Set(fields, "url", common.MapStr{"original": http.URL}) - } - response := http.Response.Fields() - if http.StatusCode != nil { - if response == nil { - response = common.MapStr{"status_code": *http.StatusCode} - } else if http.Response.StatusCode == nil { - response["status_code"] = *http.StatusCode - } - } - utility.Set(fields, "response", response) - utility.Set(fields, "method", http.Method) - return fields +// Composite holds details on a group of spans compressed into one. +type Composite struct { + Count int + Sum float64 // milliseconds + CompressionStrategy string } -func (d *Destination) fields() common.MapStr { - if d == nil { +func (db *DB) fields() common.MapStr { + if db == nil { return nil } - var fields = common.MapStr{} - if d.Address != nil { - address := *d.Address - fields["address"] = address - if ip := net.ParseIP(address); ip != nil { - fields["ip"] = address - } + var fields, user mapStr + fields.maybeSetString("instance", db.Instance) + fields.maybeSetString("statement", db.Statement) + fields.maybeSetString("type", db.Type) + fields.maybeSetString("link", db.Link) + fields.maybeSetIntptr("rows_affected", db.RowsAffected) + if user.maybeSetString("name", db.UserName) { + fields.set("user", common.MapStr(user)) } - utility.Set(fields, "port", d.Port) - return fields + return common.MapStr(fields) } func (d *DestinationService) fields() common.MapStr { if d == nil { return nil } - var fields = common.MapStr{} - utility.Set(fields, "type", d.Type) - utility.Set(fields, "name", d.Name) - utility.Set(fields, "resource", d.Resource) - return fields + var fields mapStr + fields.maybeSetString("type", d.Type) + fields.maybeSetString("name", d.Name) + fields.maybeSetString("resource", d.Resource) + fields.maybeSetMapStr("response_time", d.ResponseTime.fields()) + return common.MapStr(fields) } -func (e *Span) Transform(ctx context.Context, cfg *transform.Config) []beat.Event { - spanTransformations.Inc() - if frames := len(e.Stacktrace); frames > 0 { - spanStacktraceCounter.Inc() - spanFrameCounter.Add(int64(frames)) - } - - fields := common.MapStr{ - "processor": spanProcessorEntry, - spanDocType: e.fields(ctx, cfg), - } - - // first set the generic metadata - e.Metadata.Set(fields) - - // then add event specific information - utility.DeepUpdate(fields, "service", e.Service.Fields("", "")) - utility.DeepUpdate(fields, "agent", e.Service.AgentFields()) - // merges with metadata labels, overrides conflicting keys - utility.DeepUpdate(fields, "labels", e.Labels) - utility.AddID(fields, "parent", e.ParentID) - if e.ChildIDs != nil { - utility.Set(fields, "child", common.MapStr{"id": e.ChildIDs}) +func (c *Composite) fields() common.MapStr { + if c == nil { + return nil } - utility.AddID(fields, "trace", e.TraceID) - utility.AddID(fields, "transaction", e.TransactionID) - utility.Set(fields, "experimental", e.Experimental) - utility.Set(fields, "destination", e.Destination.fields()) - utility.Set(fields, "timestamp", utility.TimeAsMicros(e.Timestamp)) - utility.DeepUpdate(fields, "event.outcome", e.Outcome) + var fields mapStr + sumDuration := time.Duration(c.Sum * float64(time.Millisecond)) + fields.set("sum", common.MapStr{"us": int(sumDuration.Microseconds())}) + fields.set("count", c.Count) + fields.set("compression_strategy", c.CompressionStrategy) - return []beat.Event{ - { - Fields: fields, - Timestamp: e.Timestamp, - }, - } + return common.MapStr(fields) } -func (e *Span) fields(ctx context.Context, cfg *transform.Config) common.MapStr { - if e == nil { - return nil +func (e *Span) setFields(fields *mapStr, apmEvent *APMEvent) { + var span mapStr + span.maybeSetString("name", e.Name) + span.maybeSetString("type", e.Type) + span.maybeSetString("id", e.ID) + span.maybeSetString("subtype", e.Subtype) + span.maybeSetString("action", e.Action) + span.maybeSetBool("sync", e.Sync) + if e.Start != nil { + start := time.Duration(*e.Start * float64(time.Millisecond)) + span.set("start", common.MapStr{"us": int(start.Microseconds())}) } - fields := common.MapStr{} - if e.ID != "" { - utility.Set(fields, "id", e.ID) + if apmEvent.Processor == SpanProcessor { + // TODO(axw) set `event.duration` in 8.0, and remove this field. + // See https://github.com/elastic/apm-server/issues/5999 + span.set("duration", common.MapStr{"us": int(apmEvent.Event.Duration.Microseconds())}) } - utility.Set(fields, "subtype", e.Subtype) - utility.Set(fields, "action", e.Action) - - // common - utility.Set(fields, "name", e.Name) - utility.Set(fields, "type", e.Type) - utility.Set(fields, "sync", e.Sync) - - if e.Start != nil { - utility.Set(fields, "start", utility.MillisAsMicros(*e.Start)) + span.maybeSetMapStr("db", e.DB.fields()) + span.maybeSetMapStr("message", e.Message.Fields()) + span.maybeSetMapStr("composite", e.Composite.fields()) + if destinationServiceFields := e.DestinationService.fields(); len(destinationServiceFields) > 0 { + destinationMap, ok := span["destination"].(common.MapStr) + if !ok { + destinationMap = make(common.MapStr) + span.set("destination", destinationMap) + } + destinationMap["service"] = destinationServiceFields } - - utility.Set(fields, "duration", utility.MillisAsMicros(e.Duration)) - - utility.Set(fields, "db", e.DB.fields()) - utility.Set(fields, "http", e.HTTP.fields()) - utility.DeepUpdate(fields, "destination.service", e.DestinationService.fields()) - - utility.Set(fields, "message", e.Message.Fields()) - - // TODO(axw) we should be using a merged service object, combining - // the stream metadata and event-specific service info. - st := e.Stacktrace.transform(ctx, cfg, e.RUM, &e.Metadata.Service) - utility.Set(fields, "stacktrace", st) - return fields + if st := e.Stacktrace.transform(); len(st) > 0 { + span.set("stacktrace", st) + } + span.maybeSetMapStr("self_time", e.SelfTime.fields()) + fields.maybeSetMapStr("span", common.MapStr(span)) } diff --git a/model/span/_meta/fields.yml b/model/span/_meta/fields.yml index 9e460c9f569..84935c8b14f 100644 --- a/model/span/_meta/fields.yml +++ b/model/span/_meta/fields.yml @@ -2,9 +2,752 @@ title: APM Span description: Span-specific data for APM. fields: - - name: view spans - format: url - label_template: "View Spans" + - name: processor.name + type: keyword + description: Processor name. + overwrite: true + + - name: processor.event + type: keyword + description: Processor event. + overwrite: true + + - name: timestamp + type: group + fields: + - name: us + type: long + count: 1 + description: > + Timestamp of the event in microseconds since Unix epoch. + overwrite: true + + - name: labels + type: object + object_type_params: + - object_type: keyword + - object_type: boolean + - object_type: scaled_float + scaling_factor: 1000000 + dynamic: true + overwrite: true + description: > + A flat mapping of user-defined labels with string, boolean or number values. + + - name: service + type: group + dynamic: false + description: > + Service fields. + fields: + - name: name + type: keyword + description: > + Immutable name of the service emitting this event. + overwrite: true + + - name: version + type: keyword + description: > + Version of the service emitting this event. + overwrite: true + + - name: environment + type: keyword + description: > + Service environment. + overwrite: true + + - name: node + type: group + fields: + - name: name + type: keyword + description: > + Unique meaningful name of the service node. + overwrite: true + + - name: language + type: group + fields: + + - name: name + type: keyword + description: > + Name of the programming language used. + overwrite: true + + - name: version + type: keyword + description: > + Version of the programming language used. + overwrite: true + + - name: runtime + type: group + fields: + + - name: name + type: keyword + description: > + Name of the runtime used. + overwrite: true + + - name: version + type: keyword + description: > + Version of the runtime used. + overwrite: true + + - name: framework + type: group + fields: + + - name: name + type: keyword + description: > + Name of the framework used. + overwrite: true + + - name: version + type: keyword + description: > + Version of the framework used. + overwrite: true + + - name: transaction + type: group + dynamic: false + fields: + - name: id + type: keyword + description: > + The transaction ID. + overwrite: true + - name: sampled + type: boolean + description: > + Transactions that are 'sampled' will include all available information. Transactions that are not sampled will not have spans or context. + overwrite: true + - name: type + type: keyword + description: > + Keyword of specific relevance in the service's domain (eg. 'request', 'backgroundjob', etc) + overwrite: true + - name: name + type: keyword + multi_fields: + - name: text + type: text + description: > + Generic designation of a transaction in the scope of a single service (eg. 'GET /users/:id'). + overwrite: true + + - name: trace + type: group + dynamic: false + fields: + - name: id + type: keyword + description: > + The ID of the trace to which the event belongs to. + overwrite: true + + - name: parent + type: group + dynamic: false + fields: + - name: id + type: keyword + description: > + The ID of the parent event. + overwrite: true + + - name: agent + type: group + dynamic: false + fields: + + - name: name + type: keyword + description: > + Name of the agent used. + overwrite: true + + - name: version + type: keyword + description: > + Version of the agent used. + overwrite: true + + - name: ephemeral_id + type: keyword + description: > + The Ephemeral ID identifies a running process. + overwrite: true + + - name: container + type: group + dynamic: false + title: Container + description: > + Container fields are used for meta information about the specific container + that is the source of information. These fields help correlate data based + containers from any runtime. + fields: + + - name: id + type: keyword + description: > + Unique container id. + overwrite: true + + - name: kubernetes + type: group + dynamic: false + title: Kubernetes + description: > + Kubernetes metadata reported by agents + fields: + + - name: namespace + type: keyword + description: > + Kubernetes namespace + overwrite: true + + - name: node + type: group + fields: + - name: name + type: keyword + description: > + Kubernetes node name + overwrite: true + + - name: pod + type: group + fields: + + - name: name + type: keyword + description: > + Kubernetes pod name + overwrite: true + + - name: uid + type: keyword + description: > + Kubernetes Pod UID + overwrite: true + + - name: network + type: group + dynamic: false + description: > + Optional network fields + fields: + + - name: connection + type: group + description: > + Network connection details + fields: + + - name: type + type: keyword + description: > + Network connection type, eg. "wifi", "cell" + overwrite: true + + - name: subtype + type: keyword + description: > + Detailed network connection sub-type, e.g. "LTE", "CDMA" + overwrite: true + + - name: carrier + type: group + description: > + Network operator + fields: + + - name: name + type: keyword + overwrite: true + description: > + Carrier name, eg. Vodafone, T-Mobile, etc. + + - name: mcc + type: keyword + overwrite: true + description: > + Mobile country code + + - name: mnc + type: keyword + overwrite: true + description: > + Mobile network code + + - name: icc + type: keyword + overwrite: true + description: > + ISO country code, eg. US + + - name: host + type: group + dynamic: false + description: > + Optional host fields. + fields: + + - name: architecture + type: keyword + description: > + The architecture of the host the event was recorded on. + overwrite: true + + - name: hostname + type: keyword + description: > + The hostname of the host the event was recorded on. + overwrite: true + + - name: name + type: keyword + description: > + Name of the host the event was recorded on. + It can contain same information as host.hostname or a name specified by the user. + overwrite: true + + - name: ip + type: ip + description: > + IP of the host that records the event. + overwrite: true + + - name: os + title: Operating System + group: 2 + description: > + The OS fields contain information about the operating system. + type: group + fields: + - name: platform + type: keyword + description: > + The platform of the host the event was recorded on. + overwrite: true + + - name: process + type: group + dynamic: false + description: > + Information pertaining to the running process where the data was collected + fields: + - name: args + level: extended + type: keyword + description: > + Process arguments. + May be filtered to protect sensitive information. + overwrite: true + + - name: pid + type: long + description: > + Numeric process ID of the service process. + overwrite: true + + - name: ppid + type: long + description: > + Numeric ID of the service's parent process. + overwrite: true + + - name: title + type: keyword + description: > + Service process title. + overwrite: true + + - name: observer + type: group + dynamic: false + fields: + + - name: listening + type: keyword + overwrite: true + description: > + Address the server is listening on. + + - name: hostname + type: keyword + overwrite: true + description: > + Hostname of the APM Server. + + - name: version + type: keyword + overwrite: true + description: > + APM Server version. + + - name: version_major + type: byte + overwrite: true + description: > + Major version number of the observer + + - name: type + type: keyword + overwrite: true + description: > + The type will be set to `apm-server`. + + - name: id + type: keyword + overwrite: true + description: > + Unique identifier of the APM Server. + + - name: ephemeral_id + type: keyword + overwrite: true + description: > + Ephemeral identifier of the APM Server. + + - name: user + type: group + dynamic: false + fields: + + - name: name + type: keyword + description: > + The username of the logged in user. + overwrite: true + + - name: domain + type: keyword + description: > + Domain of the logged in user. + overwrite: true + + - name: id + type: keyword + description: > + Identifier of the logged in user. + overwrite: true + + - name: email + type: keyword + description: > + Email of the logged in user. + overwrite: true + + - name: client + dynamic: false + type: group + fields: + + - name: domain + type: keyword + ignore_above: 1024 + description: > + Client domain. + overwrite: true + + - name: ip + type: ip + description: > + IP address of the client of a recorded event. + This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + overwrite: true + + - name: port + type: long + description: > + Port of the client. + overwrite: true + + - name: source + dynamic: false + type: group + fields: + + - name: domain + type: keyword + ignore_above: 1024 + description: > + Source domain. + overwrite: true + + - name: ip + type: ip + description: > + IP address of the source of a recorded event. + This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + overwrite: true + + - name: port + type: long + description: > + Port of the source. + overwrite: true + + - name: destination + title: Destination + group: 2 + description: 'Destination fields describe details about the destination of a packet/event. + + Destination fields are usually populated in conjunction with source fields.' + type: group + fields: + - name: address + level: extended + type: keyword + ignore_above: 1024 + description: 'Some event destination addresses are defined ambiguously. The + event will sometimes list an IP, a domain or a unix socket. You should always + store the raw address in the `.address` field. + Then it should be duplicated to `.ip` or `.domain`, depending on which one + it is.' + overwrite: true + + - name: ip + level: core + type: ip + description: 'IP addess of the destination. + Can be one of multiple IPv4 or IPv6 addresses.' + overwrite: true + + - name: port + level: core + type: long + format: string + description: Port of the destination. + overwrite: true + + - name: user_agent + dynamic: false + title: User agent + description: > + The user_agent fields normally come from a browser request. They often + show up in web service logs coming from the parsed user agent string. + type: group + overwrite: true + fields: + + - name: original + type: keyword + description: > + Unparsed version of the user_agent. + example: "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1" + overwrite: true + + multi_fields: + - name: text + type: text + description: > + Software agent acting in behalf of a user, eg. a web browser / OS combination. + overwrite: true + + - name: name + type: keyword + overwrite: true + example: Safari + description: > + Name of the user agent. + + - name: version + type: keyword + overwrite: true + description: > + Version of the user agent. + example: 12.0 + + - name: device + type: group + overwrite: true + title: Device + description: > + Information concerning the device. + fields: + + - name: name + type: keyword + overwrite: true + example: iPhone + description: > + Name of the device. + + - name: os + type: group + overwrite: true + title: Operating System + description: > + The OS fields contain information about the operating system. + fields: + + - name: platform + type: keyword + overwrite: true + description: > + Operating system platform (such centos, ubuntu, windows). + example: darwin + + - name: name + type: keyword + overwrite: true + example: "Mac OS X" + description: > + Operating system name, without the version. + + - name: full + type: keyword + overwrite: true + example: "Mac OS Mojave" + description: > + Operating system name, including the version or code name. + + - name: family + type: keyword + overwrite: true + example: "debian" + description: > + OS family (such as redhat, debian, freebsd, windows). + + - name: version + type: keyword + overwrite: true + example: "10.14.1" + description: > + Operating system version as a raw string. + + - name: kernel + type: keyword + overwrite: true + example: "4.4.0-112-generic" + description: > + Operating system kernel version as a raw string. + + - name: cloud + title: Cloud + group: 2 + type: group + description: > + Cloud metadata reported by agents + fields: + - name: account + type: group + dynamic: false + fields: + - name: id + level: extended + type: keyword + ignore_above: 1024 + description: Cloud account ID + overwrite: true + - name: name + level: extended + type: keyword + ignore_above: 1024 + description: Cloud account name + overwrite: true + - name: availability_zone + level: extended + type: keyword + ignore_above: 1024 + description: Cloud availability zone name + example: us-east1-a + overwrite: true + - name: instance + type: group + dynamic: false + fields: + - name: id + level: extended + type: keyword + ignore_above: 1024 + description: Cloud instance/machine ID + overwrite: true + - name: name + level: extended + type: keyword + ignore_above: 1024 + description: Cloud instance/machine name + overwrite: true + - name: machine + type: group + dynamic: false + fields: + - name: type + level: extended + type: keyword + ignore_above: 1024 + description: Cloud instance/machine type + example: t2.medium + overwrite: true + - name: project + type: group + dynamic: false + fields: + - name: id + level: extended + type: keyword + ignore_above: 1024 + description: Cloud project ID + overwrite: true + - name: name + level: extended + type: keyword + ignore_above: 1024 + description: Cloud project name + overwrite: true + - name: provider + level: extended + type: keyword + ignore_above: 1024 + description: Cloud provider name + example: gcp + overwrite: true + - name: region + level: extended + type: keyword + ignore_above: 1024 + description: Cloud region name + example: us-east1 + overwrite: true + - name: service + type: group + dynamic: false + fields: + - name: name + level: extended + type: keyword + ignore_above: 1024 + description: > + Cloud service name, intended to distinguish services running on + different platforms within a provider. + overwrite: true + + - name: event + type: group + fields: + + - name: outcome + level: core + type: keyword + ignore_above: 1024 + description: > + `event.outcome` simply denotes whether the event represents a success or a + failure from the perspective of the entity that produced the event. + example: success + overwrite: true - name: child type: group @@ -13,15 +756,29 @@ - name: id type: keyword description: > - The ID(s)s of the child event(s). + The ID(s) of the child event(s). - name: span type: group dynamic: false fields: + - name: type + type: keyword + count: 1 + description: > + Keyword of specific relevance in the service's domain (eg: 'db.postgresql.query', 'template.erb', 'cache', etc). + overwrite: true + + - name: subtype + type: keyword + count: 1 + description: > + A further sub-division of the type (e.g. postgresql, elasticsearch) + overwrite: true - name: id type: keyword + overwrite: true description: > The ID of the span stored as hex encoded string. @@ -91,11 +848,13 @@ type: keyword description: > Type of the destination service (e.g. 'db', 'elasticsearch'). Should typically be the same as span.type. + DEPRECATED: this field will be removed in a future release - name: name type: keyword description: > Identifier for the destination service (e.g. 'http://elastic.co', 'elasticsearch', 'rabbitmq') + DEPRECATED: this field will be removed in a future release - name: resource type: keyword @@ -124,3 +883,28 @@ type: long description: > Age of a message in milliseconds. + + - name: composite + type: group + dynamic: false + fields: + + - name: count + type: long + description: > + Number of compressed spans the composite span represents. + + - name: sum + type: group + fields: + + - name: us + type: long + count: 1 + description: > + Sum of the durations of the compressed spans, in microseconds. + + - name: compression_strategy + type: keyword + description: > + The compression strategy that was used. diff --git a/model/span/generated/schema/rum_v3_span.go b/model/span/generated/schema/rum_v3_span.go deleted file mode 100644 index e4488df7195..00000000000 --- a/model/span/generated/schema/rum_v3_span.go +++ /dev/null @@ -1,364 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package schema - -const RUMV3Schema = `{ - "$id": "docs/spec/spans/rum_v3_span.json", - "description": "An event captured by an agent occurring in a monitored service", - "allOf": [ - { - "properties": { - "id": { - "description": "Hex encoded 64 random bits ID of the span.", - "type": "string", - "maxLength": 1024 - }, - "pi": { - "description": "Index of the parent span in the list. Absent when the parent is a transaction.", - "type": ["integer", "null"], - "maxLength": 1024 - }, - "s": { - "type": [ - "number", - "null" - ], - "description": "Offset relative to the transaction's timestamp identifying the start of the span, in milliseconds" - }, - "sr": { - "description": "Sampling rate", - "type": ["number", "null"] - }, - "t": { - "type": "string", - "description": "Keyword of specific relevance in the service's domain (eg: 'db.postgresql.query', 'template.erb', etc)", - "maxLength": 1024 - }, - "su": { - "type": [ - "string", - "null" - ], - "description": "A further sub-division of the type (e.g. postgresql, elasticsearch)", - "maxLength": 1024 - }, - "ac": { - "type": [ - "string", - "null" - ], - "description": "The specific kind of event within the sub-type represented by the span (e.g. query, connect)", - "maxLength": 1024 - }, - "o": { - "$id": "docs/spec/outcome.json", - "title": "Outcome", - "type": ["string", "null"], - "enum": [null, "success", "failure", "unknown"], - "description": "The outcome of the transaction: success, failure, or unknown. This is similar to 'result', but has a limited set of permitted values describing the success or failure of the transaction from the service's perspective. This field can be used for calculating error rates.", - "description": "The outcome of the span: success, failure, or unknown. Outcome may be one of a limited set of permitted values describing the success or failure of the span. This field can be used for calculating error rates for outgoing requests." - }, - "c": { - "type": [ - "object", - "null" - ], - "description": "Any other arbitrary data captured by the agent, optionally provided by the user", - "properties": { - "dt": { - "type": [ - "object", - "null" - ], - "description": "An object containing contextual data about the destination for spans", - "properties": { - "ad": { - "type": [ - "string", - "null" - ], - "description": "Destination network address: hostname (e.g. 'localhost'), FQDN (e.g. 'elastic.co'), IPv4 (e.g. '127.0.0.1') or IPv6 (e.g. '::1')", - "maxLength": 1024 - }, - "po": { - "type": [ - "integer", - "null" - ], - "description": "Destination network port (e.g. 443)" - }, - "se": { - "description": "Destination service context", - "type": [ - "object", - "null" - ], - "properties": { - "t": { - "description": "Type of the destination service (e.g. 'db', 'elasticsearch'). Should typically be the same as span.type.", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "n": { - "description": "Identifier for the destination service (e.g. 'http://elastic.co', 'elasticsearch', 'rabbitmq')", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "rc": { - "description": "Identifier for the destination service resource being operated on (e.g. 'http://elastic.co:80', 'elasticsearch', 'rabbitmq/queue_name')", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - }, - "required": [ - "t", - "n", - "rc" - ] - } - } - }, - "h": { - "type": [ - "object", - "null" - ], - "description": "An object containing contextual data of the related http request.", - "properties": { - "url": { - "type": [ - "string", - "null" - ], - "description": "The raw url of the correlating http request." - }, - "sc": { - "type": [ - "integer", - "null" - ], - "description": "The status code of the http request." - }, - "mt": { - "type": [ - "string", - "null" - ], - "maxLength": 1024, - "description": "The method of the http request." - } - } - }, - "g": { - "$id": "docs/spec/tags.json", - "title": "Tags", - "type": ["object", "null"], - "description": "A flat mapping of user-defined tags with string, boolean or number values.", - "patternProperties": { - "^[^.*\"]*$": { - "type": ["string", "boolean", "number", "null"], - "maxLength": 1024 - } - }, - "additionalProperties": false - }, - "se": { - "description": "Service related information can be sent per event. Provided information will override the more generic information from metadata, non provided fields will be set according to the metadata information.", - "properties": { - "a": { - "description": "Name and version of the Elastic APM agent", - "type": [ - "object", - "null" - ], - "properties": { - "n": { - "description": "Name of the Elastic APM agent, e.g. \"Python\"", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "ve": { - "description": "Version of the Elastic APM agent, e.g.\"1.0.0\"", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "n": { - "description": "Immutable name of the service emitting this event", - "type": [ - "string", - "null" - ], - "pattern": "^[a-zA-Z0-9 _-]+$", - "maxLength": 1024 - } - } - } - } - }, - "d": { - "type": "number", - "description": "Duration of the span in milliseconds", - "minimum": 0 - }, - "n": { - "type": "string", - "description": "Generic designation of a span in the scope of a transaction", - "maxLength": 1024 - }, - "st": { - "type": [ - "array", - "null" - ], - "description": "List of stack frames with variable attributes (eg: lineno, filename, etc)", - "items": { - "$id": "docs/spec/rum_v3_stacktrace_frame.json", - "title": "Stacktrace", - "type": "object", - "description": "A stacktrace frame, contains various bits (most optional) describing the context of the frame", - "properties": { - "ap": { - "description": "The absolute path of the file involved in the stack frame", - "type": [ - "string", - "null" - ] - }, - "co": { - "description": "Column number", - "type": [ - "integer", - "null" - ] - }, - "cli": { - "description": "The line of code part of the stack frame", - "type": [ - "string", - "null" - ] - }, - "f": { - "description": "The relative filename of the code involved in the stack frame, used e.g. to do error checksumming", - "type": [ - "string", - "null" - ] - }, - "cn": { - "description": "The classname of the code involved in the stack frame", - "type": [ - "string", - "null" - ] - }, - "fn": { - "description": "The function involved in the stack frame", - "type": [ - "string", - "null" - ] - }, - "li": { - "description": "The line number of code part of the stack frame, used e.g. to do error checksumming", - "type": [ - "integer", - "null" - ] - }, - "mo": { - "description": "The module to which frame belongs to", - "type": [ - "string", - "null" - ] - }, - "poc": { - "description": "The lines of code after the stack frame", - "type": [ - "array", - "null" - ], - "minItems": 0, - "items": { - "type": "string" - } - }, - "prc": { - "description": "The lines of code before the stack frame", - "type": [ - "array", - "null" - ], - "minItems": 0, - "items": { - "type": "string" - } - } - }, - "required": [ - "f" - ] - }, - "minItems": 0 - }, - "sy": { - "type": [ - "boolean", - "null" - ], - "description": "Indicates whether the span was executed synchronously or asynchronously." - } - }, - "required": [ - "d", - "n", - "t", - "id" - ] - }, - { - "required": [ - "s" - ], - "properties": { - "s": { - "type": "number" - } - } - } - ] -} -` diff --git a/model/span/generated/schema/span.go b/model/span/generated/schema/span.go deleted file mode 100644 index 06f1f6239fe..00000000000 --- a/model/span/generated/schema/span.go +++ /dev/null @@ -1,434 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package schema - -const ModelSchema = `{ - "$id": "docs/spec/spans/span.json", - "type": "object", - "description": "An event captured by an agent occurring in a monitored service", - "allOf": [ - { "$id": "docs/spec/timestamp_epoch.json", - "title": "Timestamp Epoch", - "description": "Object with 'timestamp' property.", - "type": ["object"], - "properties": { - "timestamp": { - "description": "Recorded time of the event, UTC based and formatted as microseconds since Unix epoch", - "type": ["integer", "null"] - } - } }, - { "$id": "docs/spec/span_type.json", - "title": "Span Type", - "type": ["object"], - "properties": { - "type": { - "type": "string", - "description": "Keyword of specific relevance in the service's domain (eg: 'db.postgresql.query', 'template.erb', etc)", - "maxLength": 1024 - } - } }, - { "$id": "docs/spec/span_subtype.json", - "title": "Span Subtype", - "type": ["object"], - "properties": { - "subtype": { - "type": ["string", "null"], - "description": "A further sub-division of the type (e.g. postgresql, elasticsearch)", - "maxLength": 1024 - } - } }, - { - "properties": { - "id": { - "description": "Hex encoded 64 random bits ID of the span.", - "type": "string", - "maxLength": 1024 - }, - "transaction_id": { - "type": ["string", "null"], - "description": "Hex encoded 64 random bits ID of the correlated transaction.", - "maxLength": 1024 - }, - "trace_id": { - "description": "Hex encoded 128 random bits ID of the correlated trace.", - "type": "string", - "maxLength": 1024 - }, - "parent_id": { - "description": "Hex encoded 64 random bits ID of the parent transaction or span.", - "type": "string", - "maxLength": 1024 - }, - "child_ids": { - "description": "List of successor transactions and/or spans.", - "type": ["array", "null"], - "minItems": 0, - "maxLength": 1024, - "items": { - "type": "string", - "maxLength": 1024 - } - }, - "start": { - "type": ["number", "null"], - "description": "Offset relative to the transaction's timestamp identifying the start of the span, in milliseconds" - }, - "sample_rate": { - "description": "Sampling rate", - "type": ["number", "null"] - }, - "action": { - "type": ["string", "null"], - "description": "The specific kind of event within the sub-type represented by the span (e.g. query, connect)", - "maxLength": 1024 - }, - "outcome": { - "$id": "docs/spec/outcome.json", - "title": "Outcome", - "type": ["string", "null"], - "enum": [null, "success", "failure", "unknown"], - "description": "The outcome of the transaction: success, failure, or unknown. This is similar to 'result', but has a limited set of permitted values describing the success or failure of the transaction from the service's perspective. This field can be used for calculating error rates.", - "description": "The outcome of the span: success, failure, or unknown. Outcome may be one of a limited set of permitted values describing the success or failure of the span. This field can be used for calculating error rates for outgoing requests." - }, - "context": { - "type": ["object", "null"], - "description": "Any other arbitrary data captured by the agent, optionally provided by the user", - "properties": { - "destination": { - "type": ["object", "null"], - "description": "An object containing contextual data about the destination for spans", - "properties": { - "address": { - "type": ["string", "null"], - "description": "Destination network address: hostname (e.g. 'localhost'), FQDN (e.g. 'elastic.co'), IPv4 (e.g. '127.0.0.1') or IPv6 (e.g. '::1')", - "maxLength": 1024 - }, - "port": { - "type": ["integer", "null"], - "description": "Destination network port (e.g. 443)" - }, - "service": { - "description": "Destination service context", - "type": ["object", "null"], - "properties": { - "type": { - "description": "Type of the destination service (e.g. 'db', 'elasticsearch'). Should typically be the same as span.type.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "name": { - "description": "Identifier for the destination service (e.g. 'http://elastic.co', 'elasticsearch', 'rabbitmq')", - "type": ["string", "null"], - "maxLength": 1024 - }, - "resource": { - "description": "Identifier for the destination service resource being operated on (e.g. 'http://elastic.co:80', 'elasticsearch', 'rabbitmq/queue_name')", - "type": ["string", "null"], - "maxLength": 1024 - } - }, - "required": ["type", "name", "resource"] - } - } - }, - "db": { - "type": ["object", "null"], - "description": "An object containing contextual data for database spans", - "properties": { - "instance": { - "type": ["string", "null"], - "description": "Database instance name" - }, - "link": { - "type": ["string", "null"], - "maxLength": 1024, - "description": "Database link" - }, - "statement": { - "type": ["string", "null"], - "description": "A database statement (e.g. query) for the given database type" - }, - "type": { - "type": ["string", "null"], - "description": "Database type. For any SQL database, \"sql\". For others, the lower-case database category, e.g. \"cassandra\", \"hbase\", or \"redis\"" - }, - "user": { - "type": ["string", "null"], - "description": "Username for accessing database" - }, - "rows_affected": { - "type": ["integer", "null"], - "description": "Number of rows affected by the SQL statement (if applicable)" - } - } - }, - "http": { - "type": ["object", "null"], - "description": "An object containing contextual data of the related http request.", - "properties": { - "url": { - "type": ["string", "null"], - "description": "The raw url of the correlating http request." - }, - "status_code": { - "type": ["integer", "null"], - "description": "Deprecated: Use span.context.http.response.status_code instead." - }, - "method": { - "type": ["string", "null"], - "maxLength": 1024, - "description": "The method of the http request." - }, - "response": { - "$id": "docs/spec/http_response.json", - "title": "HTTP response object", - "description": "HTTP response object, used by error, span and transction documents", - "type": ["object", "null"], - "properties": { - "status_code": { - "type": ["integer", "null"], - "description": "The status code of the http request." - }, - "transfer_size": { - "type": ["number", "null"], - "description": "Total size of the payload." - }, - "encoded_body_size": { - "type": ["number", "null"], - "description": "The encoded size of the payload." - }, - "decoded_body_size": { - "type": ["number", "null"], - "description": "The decoded size of the payload." - }, - "headers": { - "type": ["object", "null"], - "patternProperties": { - "[.*]*$": { - "type": ["string", "array", "null"], - "items": { - "type": ["string"] - } - } - } - } - } - } - } - }, - "tags": { - "$id": "docs/spec/tags.json", - "title": "Tags", - "type": ["object", "null"], - "description": "A flat mapping of user-defined tags with string, boolean or number values.", - "patternProperties": { - "^[^.*\"]*$": { - "type": ["string", "boolean", "number", "null"], - "maxLength": 1024 - } - }, - "additionalProperties": false - }, - "service": { - "description": "Service related information can be sent per event. Provided information will override the more generic information from metadata, non provided fields will be set according to the metadata information.", - "properties": { - "agent": { - "description": "Name and version of the Elastic APM agent", - "type": [ - "object", - "null" - ], - "properties": { - "name": { - "description": "Name of the Elastic APM agent, e.g. \"Python\"", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "version": { - "description": "Version of the Elastic APM agent, e.g.\"1.0.0\"", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "ephemeral_id": { - "description": "Free format ID used for metrics correlation by some agents", - "type": ["string", "null"], - "maxLength": 1024 - } - } - }, - "name": { - "description": "Immutable name of the service emitting this event", - "type": [ - "string", - "null" - ], - "pattern": "^[a-zA-Z0-9 _-]+$", - "maxLength": 1024 - } - } - }, - "message": { - "$id": "docs/spec/message.json", - "title": "Message", - "description": "Details related to message receiving and publishing if the captured event integrates with a messaging system", - "type": ["object", "null"], - "properties": { - "queue": { - "type": ["object", "null"], - "properties": { - "name": { - "description": "Name of the message queue where the message is received.", - "type": ["string","null"], - "maxLength": 1024 - } - } - }, - "age": { - "type": ["object", "null"], - "properties": { - "ms": { - "description": "The age of the message in milliseconds. If the instrumented messaging framework provides a timestamp for the message, agents may use it. Otherwise, the sending agent can add a timestamp in milliseconds since the Unix epoch to the message's metadata to be retrieved by the receiving agent. If a timestamp is not available, agents should omit this field.", - "type": ["integer", "null"] - } - } - }, - "body": { - "description": "messsage body, similar to an http request body", - "type": ["string", "null"] - }, - "headers": { - "description": "messsage headers, similar to http request headers", - "type": ["object", "null"], - "patternProperties": { - "[.*]*$": { - "type": ["string", "array", "null"], - "items": { - "type": ["string"] - } - } - } - } - } - } - } - }, - "duration": { - "type": "number", - "description": "Duration of the span in milliseconds", - "minimum": 0 - }, - "name": { - "type": "string", - "description": "Generic designation of a span in the scope of a transaction", - "maxLength": 1024 - }, - "stacktrace": { - "type": ["array", "null"], - "description": "List of stack frames with variable attributes (eg: lineno, filename, etc)", - "items": { - "$id": "docs/spec/stacktrace_frame.json", - "title": "Stacktrace", - "type": "object", - "description": "A stacktrace frame, contains various bits (most optional) describing the context of the frame", - "properties": { - "abs_path": { - "description": "The absolute path of the file involved in the stack frame", - "type": ["string", "null"] - }, - "colno": { - "description": "Column number", - "type": ["integer", "null"] - }, - "context_line": { - "description": "The line of code part of the stack frame", - "type": ["string", "null"] - }, - "filename": { - "description": "The relative filename of the code involved in the stack frame, used e.g. to do error checksumming", - "type": ["string", "null"] - }, - "classname": { - "description": "The classname of the code involved in the stack frame", - "type": ["string", "null"] - }, - "function": { - "description": "The function involved in the stack frame", - "type": ["string", "null"] - }, - "library_frame": { - "description": "A boolean, indicating if this frame is from a library or user code", - "type": ["boolean", "null"] - }, - "lineno": { - "description": "The line number of code part of the stack frame, used e.g. to do error checksumming", - "type": ["integer", "null"] - }, - "module": { - "description": "The module to which frame belongs to", - "type": ["string", "null"] - }, - "post_context": { - "description": "The lines of code after the stack frame", - "type": ["array", "null"], - "minItems": 0, - "items": { - "type": "string" - } - }, - "pre_context": { - "description": "The lines of code before the stack frame", - "type": ["array", "null"], - "minItems": 0, - "items": { - "type": "string" - } - }, - "vars": { - "description": "Local variables for this stack frame", - "type": ["object", "null"], - "properties": {} - } - }, - "anyOf": [ - { "required": ["filename"], "properties": {"filename": { "type": "string" }} }, - { "required": ["classname"], "properties": {"classname": { "type": "string" }} } - ] - }, - "minItems": 0 - }, - "sync": { - "type": ["boolean", "null"], - "description": "Indicates whether the span was executed synchronously or asynchronously." - } - }, - "required": ["duration", "name", "type", "id","trace_id", "parent_id"] - }, - { "anyOf":[ - {"required": ["timestamp"], "properties": {"timestamp": { "type": "integer" }}}, - {"required": ["start"], "properties": {"start": { "type": "number" }}} - ] - } - ] -} -` diff --git a/model/span_test.go b/model/span_test.go index d1e2a85ced6..e62d6f6e6af 100644 --- a/model/span_test.go +++ b/model/span_test.go @@ -25,28 +25,25 @@ import ( "github.com/stretchr/testify/assert" "github.com/elastic/beats/v7/libbeat/common" - - "github.com/elastic/apm-server/sourcemap" - "github.com/elastic/apm-server/tests" - "github.com/elastic/apm-server/transform" ) +func TestSpanTransformEmpty(t *testing.T) { + var event APMEvent + event.Span = &Span{} + beatEvent := event.BeatEvent(context.Background()) + assert.Empty(t, beatEvent.Fields) +} + func TestSpanTransform(t *testing.T) { path := "test/path" start := 0.65 - serviceName, serviceVersion, env := "myService", "1.2", "staging" - service := Service{Name: serviceName, Version: serviceVersion, Environment: env} - hexID, parentID, traceID := "0147258369012345", "abcdef0123456789", "01234567890123456789abcdefa" + hexID := "0147258369012345" subtype := "amqp" action := "publish" - timestamp := time.Date(2019, 1, 3, 15, 17, 4, 908.596*1e6, - time.FixedZone("+0100", 3600)) + duration := time.Millisecond * 1500 + timestamp := time.Date(2019, 1, 3, 15, 17, 4, 908.596*1e6, time.FixedZone("+0100", 3600)) timestampUs := timestamp.UnixNano() / 1000 - method, statusCode, url := "get", 200, "http://localhost" instance, statement, dbType, user, rowsAffected := "db01", "select *", "sql", "jane", 5 - metadataLabels := common.MapStr{"label.a": "a", "label.b": "b", "c": 1} - metadata := Metadata{Service: service, Labels: metadataLabels} - address, port := "127.0.0.1", 8080 destServiceType, destServiceName, destServiceResource := "db", "elasticsearch", "elasticsearch" tests := []struct { @@ -54,76 +51,37 @@ func TestSpanTransform(t *testing.T) { Output common.MapStr Msg string }{ - { - Msg: "Span without a Stacktrace", - Span: Span{Timestamp: timestamp, Metadata: metadata}, - Output: common.MapStr{ - "processor": common.MapStr{"event": "span", "name": "transaction"}, - "service": common.MapStr{"name": serviceName, "environment": env, "version": serviceVersion}, - "span": common.MapStr{ - "duration": common.MapStr{"us": 0}, - "name": "", - "type": "", - }, - "event": common.MapStr{"outcome": ""}, - "labels": metadataLabels, - "timestamp": common.MapStr{"us": timestampUs}, - }, - }, - { - Msg: "Span with outcome", - Span: Span{Timestamp: timestamp, Metadata: metadata, Outcome: "success"}, - Output: common.MapStr{ - "processor": common.MapStr{"event": "span", "name": "transaction"}, - "service": common.MapStr{"name": serviceName, "environment": env, "version": serviceVersion}, - "span": common.MapStr{ - "duration": common.MapStr{"us": 0}, - "name": "", - "type": "", - }, - "timestamp": common.MapStr{"us": timestampUs}, - "labels": metadataLabels, - "event": common.MapStr{"outcome": "success"}, - }, - }, { Msg: "Full Span", Span: Span{ - Metadata: metadata, ID: hexID, - TraceID: traceID, - ParentID: parentID, Name: "myspan", Type: "myspantype", - Subtype: &subtype, - Action: &action, - Timestamp: timestamp, + Subtype: subtype, + Action: action, Start: &start, - Outcome: "unknown", RepresentativeCount: 5, - Duration: 1.20, - RUM: true, - Stacktrace: Stacktrace{{AbsPath: &path}}, - Labels: common.MapStr{"label.a": 12}, - HTTP: &HTTP{Method: &method, StatusCode: &statusCode, URL: &url}, + Stacktrace: Stacktrace{{AbsPath: path}}, DB: &DB{ - Instance: &instance, - Statement: &statement, - Type: &dbType, - UserName: &user, - RowsAffected: &rowsAffected}, - Destination: &Destination{Address: &address, Port: &port}, + Instance: instance, + Statement: statement, + Type: dbType, + UserName: user, + RowsAffected: &rowsAffected, + }, DestinationService: &DestinationService{ - Type: &destServiceType, - Name: &destServiceName, - Resource: &destServiceResource, + Type: destServiceType, + Name: destServiceName, + Resource: destServiceResource, }, - Message: &Message{QueueName: tests.StringPtr("users")}, + Message: &Message{QueueName: "users"}, + Composite: &Composite{Count: 10, Sum: 1.1, CompressionStrategy: "exact_match"}, }, Output: common.MapStr{ + "processor": common.MapStr{"name": "transaction", "event": "span"}, "span": common.MapStr{ "id": hexID, - "duration": common.MapStr{"us": 1200}, + "duration": common.MapStr{"us": int(duration.Microseconds())}, "name": "myspan", "start": common.MapStr{"us": 650}, "type": "myspantype", @@ -132,10 +90,7 @@ func TestSpanTransform(t *testing.T) { "stacktrace": []common.MapStr{{ "exclude_from_grouping": false, "abs_path": path, - "sourcemap": common.MapStr{ - "error": "Colno mandatory for sourcemapping.", - "updated": false, - }}}, + }}, "db": common.MapStr{ "instance": instance, "statement": statement, @@ -143,11 +98,6 @@ func TestSpanTransform(t *testing.T) { "user": common.MapStr{"name": user}, "rows_affected": rowsAffected, }, - "http": common.MapStr{ - "url": common.MapStr{"original": url}, - "response": common.MapStr{"status_code": statusCode}, - "method": "get", - }, "destination": common.MapStr{ "service": common.MapStr{ "type": destServiceType, @@ -156,24 +106,73 @@ func TestSpanTransform(t *testing.T) { }, }, "message": common.MapStr{"queue": common.MapStr{"name": "users"}}, + "composite": common.MapStr{ + "count": 10, + "sum": common.MapStr{"us": 1100}, + "compression_strategy": "exact_match", + }, }, - "labels": common.MapStr{"label.a": 12, "label.b": "b", "c": 1}, - "processor": common.MapStr{"event": "span", "name": "transaction"}, - "service": common.MapStr{"name": serviceName, "environment": env, "version": serviceVersion}, - "timestamp": common.MapStr{"us": timestampUs}, - "trace": common.MapStr{"id": traceID}, - "parent": common.MapStr{"id": parentID}, - "destination": common.MapStr{"address": address, "ip": address, "port": port}, - "event": common.MapStr{"outcome": "unknown"}, + "timestamp": common.MapStr{"us": int(timestampUs)}, }, }, } for _, test := range tests { - output := test.Span.Transform(context.Background(), &transform.Config{ - RUM: transform.RUMConfig{SourcemapStore: &sourcemap.Store{}}, - }) - fields := output[0].Fields - assert.Equal(t, test.Output, fields, test.Msg) + event := APMEvent{ + Processor: SpanProcessor, + Span: &test.Span, + Timestamp: timestamp, + Event: Event{Duration: duration}, + } + output := event.BeatEvent(context.Background()) + assert.Equal(t, test.Output, output.Fields, test.Msg) + } +} + +func TestSpanHTTPFields(t *testing.T) { + event := APMEvent{ + Processor: SpanProcessor, + Span: &Span{}, + HTTP: HTTP{ + Version: "2.0", + Request: &HTTPRequest{ + Method: "get", + }, + Response: &HTTPResponse{ + StatusCode: 200, + }, + }, + URL: URL{Original: "http://localhost"}, } + + output := event.BeatEvent(context.Background()) + assert.Equal(t, common.MapStr{ + "processor": common.MapStr{ + "name": "transaction", + "event": "span", + }, + "http": common.MapStr{ + "version": event.HTTP.Version, + "request": common.MapStr{ + "method": event.HTTP.Request.Method, + }, + "response": common.MapStr{ + "status_code": event.HTTP.Response.StatusCode, + }, + }, + "url": common.MapStr{ + "original": event.URL.Original, + }, + "span": common.MapStr{ + "duration": common.MapStr{"us": 0}, + "http.url.original": event.URL.Original, + "http": common.MapStr{ + "version": event.HTTP.Version, + "method": event.HTTP.Request.Method, + "response": common.MapStr{ + "status_code": event.HTTP.Response.StatusCode, + }, + }, + }, + }, output.Fields) } diff --git a/model/stacktrace.go b/model/stacktrace.go index ba15c8c2a52..422e944ff05 100644 --- a/model/stacktrace.go +++ b/model/stacktrace.go @@ -18,76 +18,102 @@ package model import ( - "context" - "github.com/elastic/beats/v7/libbeat/common" - "github.com/elastic/beats/v7/libbeat/logp" - - logs "github.com/elastic/apm-server/log" - - "github.com/elastic/apm-server/transform" ) type Stacktrace []*StacktraceFrame -func (st *Stacktrace) transform(ctx context.Context, cfg *transform.Config, rum bool, service *Service) []common.MapStr { - if st == nil { +type StacktraceFrame struct { + AbsPath string + Filename string + Classname string + Lineno *int + Colno *int + ContextLine string + Module string + Function string + LibraryFrame bool + Vars common.MapStr + PreContext []string + PostContext []string + + ExcludeFromGrouping bool + + SourcemapUpdated bool + SourcemapError string + Original Original +} + +type Original struct { + AbsPath string + Filename string + Classname string + Lineno *int + Colno *int + Function string + LibraryFrame bool +} + +func (st Stacktrace) transform() []common.MapStr { + if len(st) == 0 { return nil } - // source map algorithm: - // apply source mapping frame by frame - // if no source map could be found, set updated to false and set sourcemap error - // otherwise use source map library for mapping and update - // - filename: only if it was found - // - function: - // * should be moved down one stack trace frame, - // * the function name of the first frame is set to - // * if one frame is not found in the source map, this frame is left out and - // the function name from the previous frame is used - // * if a mapping could be applied but no function name is found, the - // function name for the next frame is set to - // - colno - // - lineno - // - abs_path is set to the cleaned abs_path - // - sourcmeap.updated is set to true + frames := make([]common.MapStr, len(st)) + for i, frame := range st { + frames[i] = frame.transform() + } + return frames +} + +func (s *StacktraceFrame) transform() common.MapStr { + var m mapStr + m.maybeSetString("filename", s.Filename) + m.maybeSetString("classname", s.Classname) + m.maybeSetString("abs_path", s.AbsPath) + m.maybeSetString("module", s.Module) + m.maybeSetString("function", s.Function) + m.maybeSetMapStr("vars", s.Vars) - if !rum || cfg.RUM.SourcemapStore == nil { - return st.transformFrames(cfg, rum, noSourcemapping) + if s.LibraryFrame { + m.set("library_frame", s.LibraryFrame) } - if service == nil || service.Name == "" || service.Version == "" { - return st.transformFrames(cfg, rum, noSourcemapping) + m.set("exclude_from_grouping", s.ExcludeFromGrouping) + + var context mapStr + if len(s.PreContext) > 0 { + context.set("pre", s.PreContext) } + if len(s.PostContext) > 0 { + context.set("post", s.PostContext) + } + m.maybeSetMapStr("context", common.MapStr(context)) - var errMsg string - var sourcemapErrorSet = map[string]interface{}{} - logger := logp.NewLogger(logs.Stacktrace) - fct := "" - return st.transformFrames(cfg, rum, func(frame *StacktraceFrame) { - fct, errMsg = frame.applySourcemap(ctx, cfg.RUM.SourcemapStore, service, fct) - if errMsg == "" || !logger.IsDebug() { - return - } - if _, ok := sourcemapErrorSet[errMsg]; !ok { - logger.Debug(errMsg) - sourcemapErrorSet[errMsg] = nil - } - }) -} + var line mapStr + line.maybeSetIntptr("number", s.Lineno) + line.maybeSetIntptr("column", s.Colno) + line.maybeSetString("context", s.ContextLine) + m.maybeSetMapStr("line", common.MapStr(line)) -func (st *Stacktrace) transformFrames(cfg *transform.Config, rum bool, apply func(*StacktraceFrame)) []common.MapStr { - frameCount := len(*st) - if frameCount == 0 { - return nil + var sm mapStr + if s.SourcemapUpdated { + sm.set("updated", true) } + sm.maybeSetString("error", s.SourcemapError) + m.maybeSetMapStr("sourcemap", common.MapStr(sm)) - var fr *StacktraceFrame - frames := make([]common.MapStr, frameCount) - for idx := frameCount - 1; idx >= 0; idx-- { - fr = (*st)[idx] - apply(fr) - frames[idx] = fr.transform(cfg, rum) + var orig mapStr + if s.Original.LibraryFrame { + orig.set("library_frame", s.Original.LibraryFrame) } - return frames -} + if s.SourcemapUpdated { + orig.maybeSetString("filename", s.Original.Filename) + orig.maybeSetString("classname", s.Original.Classname) + orig.maybeSetString("abs_path", s.Original.AbsPath) + orig.maybeSetString("function", s.Original.Function) + orig.maybeSetIntptr("colno", s.Original.Colno) + orig.maybeSetIntptr("lineno", s.Original.Lineno) + } + m.maybeSetMapStr("original", common.MapStr(orig)) -func noSourcemapping(_ *StacktraceFrame) {} + return common.MapStr(m) +} diff --git a/model/stacktrace_frame.go b/model/stacktrace_frame.go deleted file mode 100644 index a0cea3963d5..00000000000 --- a/model/stacktrace_frame.go +++ /dev/null @@ -1,225 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package model - -import ( - "context" - "fmt" - "regexp" - - "github.com/elastic/beats/v7/libbeat/common" - - "github.com/elastic/apm-server/sourcemap" - "github.com/elastic/apm-server/transform" - "github.com/elastic/apm-server/utility" -) - -const ( - errMsgSourcemapColumnMandatory = "Colno mandatory for sourcemapping." - errMsgSourcemapLineMandatory = "Lineno mandatory for sourcemapping." - errMsgSourcemapPathMandatory = "AbsPath mandatory for sourcemapping." -) - -type StacktraceFrame struct { - AbsPath *string - Filename *string - Classname *string - Lineno *int - Colno *int - ContextLine *string - Module *string - Function *string - LibraryFrame *bool - Vars common.MapStr - PreContext []string - PostContext []string - - ExcludeFromGrouping bool - - SourcemapUpdated *bool - SourcemapError *string - Original Original -} - -type Original struct { - AbsPath *string - Filename *string - Classname *string - Lineno *int - Colno *int - Function *string - LibraryFrame *bool - - sourcemapCopied bool -} - -func (s *StacktraceFrame) transform(cfg *transform.Config, rum bool) common.MapStr { - m := common.MapStr{} - utility.Set(m, "filename", s.Filename) - utility.Set(m, "classname", s.Classname) - utility.Set(m, "abs_path", s.AbsPath) - utility.Set(m, "module", s.Module) - utility.Set(m, "function", s.Function) - utility.Set(m, "vars", s.Vars) - if rum && cfg.RUM.LibraryPattern != nil { - s.setLibraryFrame(cfg.RUM.LibraryPattern) - } - utility.Set(m, "library_frame", s.LibraryFrame) - - if rum && cfg.RUM.ExcludeFromGrouping != nil { - s.setExcludeFromGrouping(cfg.RUM.ExcludeFromGrouping) - } - utility.Set(m, "exclude_from_grouping", s.ExcludeFromGrouping) - - context := common.MapStr{} - utility.Set(context, "pre", s.PreContext) - utility.Set(context, "post", s.PostContext) - utility.Set(m, "context", context) - - line := common.MapStr{} - utility.Set(line, "number", s.Lineno) - utility.Set(line, "column", s.Colno) - utility.Set(line, "context", s.ContextLine) - utility.Set(m, "line", line) - - sm := common.MapStr{} - utility.Set(sm, "updated", s.SourcemapUpdated) - utility.Set(sm, "error", s.SourcemapError) - utility.Set(m, "sourcemap", sm) - - orig := common.MapStr{} - utility.Set(orig, "library_frame", s.Original.LibraryFrame) - if s.SourcemapUpdated != nil && *(s.SourcemapUpdated) { - utility.Set(orig, "filename", s.Original.Filename) - utility.Set(orig, "classname", s.Original.Classname) - utility.Set(orig, "abs_path", s.Original.AbsPath) - utility.Set(orig, "function", s.Original.Function) - utility.Set(orig, "colno", s.Original.Colno) - utility.Set(orig, "lineno", s.Original.Lineno) - } - utility.Set(m, "original", orig) - - return m -} - -func (s *StacktraceFrame) IsLibraryFrame() bool { - return s.LibraryFrame != nil && *s.LibraryFrame -} - -func (s *StacktraceFrame) IsSourcemapApplied() bool { - return s.SourcemapUpdated != nil && *s.SourcemapUpdated -} - -func (s *StacktraceFrame) setExcludeFromGrouping(pattern *regexp.Regexp) { - s.ExcludeFromGrouping = s.Filename != nil && pattern.MatchString(*s.Filename) -} - -func (s *StacktraceFrame) setLibraryFrame(pattern *regexp.Regexp) { - s.Original.LibraryFrame = s.LibraryFrame - libraryFrame := (s.Filename != nil && pattern.MatchString(*s.Filename)) || - (s.AbsPath != nil && pattern.MatchString(*s.AbsPath)) - s.LibraryFrame = &libraryFrame -} - -func (s *StacktraceFrame) applySourcemap(ctx context.Context, store *sourcemap.Store, service *Service, prevFunction string) (function string, errMsg string) { - function = prevFunction - - var valid bool - if valid, errMsg = s.validForSourcemapping(); !valid { - s.updateError(errMsg) - return - } - - s.setOriginalSourcemapData() - - path := utility.CleanUrlPath(*s.Original.AbsPath) - mapper, err := store.Fetch(ctx, service.Name, service.Version, path) - if err != nil { - errMsg = err.Error() - return - } - if mapper == nil { - errMsg = fmt.Sprintf("No Sourcemap available for ServiceName %s, ServiceVersion %s, Path %s.", - service.Name, service.Version, path) - s.updateError(errMsg) - return - } - - file, fct, line, col, ctxLine, preCtx, postCtx, ok := sourcemap.Map(mapper, *s.Original.Lineno, *s.Original.Colno) - if !ok { - errMsg = fmt.Sprintf("No Sourcemap found for Lineno %v, Colno %v", *s.Original.Lineno, *s.Original.Colno) - s.updateError(errMsg) - return - } - - if file != "" { - s.Filename = &file - } - - s.Colno = &col - s.Lineno = &line - s.AbsPath = &path - s.updateSmap(true) - s.Function = &prevFunction - s.ContextLine = &ctxLine - s.PreContext = preCtx - s.PostContext = postCtx - - if fct != "" { - function = fct - return - } - function = "" - return -} - -func (s *StacktraceFrame) validForSourcemapping() (bool, string) { - if s.Colno == nil { - return false, errMsgSourcemapColumnMandatory - } - if s.Lineno == nil { - return false, errMsgSourcemapLineMandatory - } - if s.AbsPath == nil { - return false, errMsgSourcemapPathMandatory - } - return true, "" -} - -func (s *StacktraceFrame) setOriginalSourcemapData() { - if s.Original.sourcemapCopied { - return - } - s.Original.Colno = s.Colno - s.Original.AbsPath = s.AbsPath - s.Original.Function = s.Function - s.Original.Lineno = s.Lineno - s.Original.Filename = s.Filename - s.Original.Classname = s.Classname - - s.Original.sourcemapCopied = true -} - -func (s *StacktraceFrame) updateError(errMsg string) { - s.SourcemapError = &errMsg - s.updateSmap(false) -} - -func (s *StacktraceFrame) updateSmap(updated bool) { - s.SourcemapUpdated = &updated -} diff --git a/model/stacktrace_frame_test.go b/model/stacktrace_frame_test.go deleted file mode 100644 index 9d5cda5d1de..00000000000 --- a/model/stacktrace_frame_test.go +++ /dev/null @@ -1,432 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package model - -import ( - "context" - "fmt" - "regexp" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/apm-server/elasticsearch" - "github.com/elastic/apm-server/tests" - - "github.com/elastic/beats/v7/libbeat/common" - - "github.com/elastic/apm-server/sourcemap" - "github.com/elastic/apm-server/sourcemap/test" - "github.com/elastic/apm-server/transform" -) - -func TestStacktraceFrameTransform(t *testing.T) { - filename, classname := "some file", "foo" - lineno := 1 - colno := 55 - path := "~/./some/abs_path" - context := "context" - fct := "some function" - module := "some_module" - libraryFrame := true - tests := []struct { - StFrame StacktraceFrame - Output common.MapStr - Msg string - }{ - { - StFrame: StacktraceFrame{Filename: &filename, Lineno: &lineno}, - Output: common.MapStr{ - "filename": filename, - "line": common.MapStr{"number": lineno}, - "exclude_from_grouping": false, - }, - Msg: "Minimal StacktraceFrame", - }, - { - StFrame: StacktraceFrame{ - AbsPath: &path, - Filename: &filename, - Classname: &classname, - Lineno: &lineno, - Colno: &colno, - ContextLine: &context, - Module: &module, - Function: &fct, - LibraryFrame: &libraryFrame, - Vars: map[string]interface{}{"k1": "v1", "k2": "v2"}, - PreContext: []string{"prec1", "prec2"}, - PostContext: []string{"postc1", "postc2"}, - }, - Output: common.MapStr{ - "abs_path": "~/./some/abs_path", - "filename": "some file", - "classname": "foo", - "function": "some function", - "module": "some_module", - "library_frame": true, - "vars": common.MapStr{"k1": "v1", "k2": "v2"}, - "context": common.MapStr{ - "pre": []string{"prec1", "prec2"}, - "post": []string{"postc1", "postc2"}, - }, - "line": common.MapStr{ - "number": 1, - "column": 55, - "context": "context", - }, - "exclude_from_grouping": false, - }, - Msg: "Full StacktraceFrame", - }, - } - - for idx, test := range tests { - output := test.StFrame.transform(&transform.Config{}, true) - assert.Equal(t, test.Output, output, fmt.Sprintf("Failed at idx %v; %s", idx, test.Msg)) - } -} - -func TestSourcemap_Apply(t *testing.T) { - - name, version, col, line, path := "myservice", "2.1.4", 10, 15, "/../a/path" - validService := func() *Service { - return &Service{Name: name, Version: version} - } - validFrame := func() *StacktraceFrame { - return &StacktraceFrame{Colno: &col, Lineno: &line, AbsPath: &path} - } - - t.Run("frame", func(t *testing.T) { - for name, tc := range map[string]struct { - frame *StacktraceFrame - - expectedErrorMsg string - }{ - "noColumn": { - frame: &StacktraceFrame{}, - expectedErrorMsg: "Colno mandatory"}, - "noLine": { - frame: &StacktraceFrame{Colno: &col}, - expectedErrorMsg: "Lineno mandatory"}, - "noPath": { - frame: &StacktraceFrame{Colno: &col, Lineno: &line}, - expectedErrorMsg: "AbsPath mandatory", - }, - } { - t.Run(name, func(t *testing.T) { - function, msg := tc.frame.applySourcemap(context.Background(), &sourcemap.Store{}, validService(), "foo") - assert.Equal(t, "foo", function) - assert.Contains(t, msg, tc.expectedErrorMsg) - assert.Equal(t, new(bool), tc.frame.SourcemapUpdated) - require.NotNil(t, tc.frame.SourcemapError) - assert.Contains(t, *tc.frame.SourcemapError, msg) - assert.Zero(t, tc.frame.Original) - }) - } - }) - - t.Run("errorPerFrame", func(t *testing.T) { - for name, tc := range map[string]struct { - store *sourcemap.Store - expectedErrorMsg string - }{ - "noSourcemap": {store: testSourcemapStore(t, test.ESClientWithSourcemapNotFound(t)), - expectedErrorMsg: "No Sourcemap available"}, - "noMapping": {store: testSourcemapStore(t, test.ESClientWithValidSourcemap(t)), - expectedErrorMsg: "No Sourcemap found for Lineno", - }, - } { - t.Run(name, func(t *testing.T) { - frame := validFrame() - function, msg := frame.applySourcemap(context.Background(), tc.store, validService(), "xyz") - assert.Equal(t, "xyz", function) - require.Contains(t, msg, tc.expectedErrorMsg) - assert.NotZero(t, frame.SourcemapError) - assert.Equal(t, new(bool), frame.SourcemapUpdated) - }) - } - }) - - t.Run("mappingError", func(t *testing.T) { - for name, tc := range map[string]struct { - store *sourcemap.Store - expectedErrorMsg string - }{ - "ESUnavailable": {store: testSourcemapStore(t, test.ESClientUnavailable(t)), - expectedErrorMsg: "client error"}, - "invalidSourcemap": {store: testSourcemapStore(t, test.ESClientWithInvalidSourcemap(t)), - expectedErrorMsg: "Could not parse Sourcemap."}, - "unsupportedSourcemap": {store: testSourcemapStore(t, test.ESClientWithUnsupportedSourcemap(t)), - expectedErrorMsg: "only 3rd version is supported"}, - } { - t.Run(name, func(t *testing.T) { - frame := validFrame() - function, msg := frame.applySourcemap(context.Background(), tc.store, validService(), "xyz") - assert.Equal(t, "xyz", function) - require.Contains(t, msg, tc.expectedErrorMsg) - assert.NotZero(t, msg) - assert.Zero(t, frame.SourcemapUpdated) - assert.Zero(t, frame.SourcemapError) - }) - } - }) - - t.Run("mapping", func(t *testing.T) { - - for name, tc := range map[string]struct { - origCol, origLine int - origPath string - - function, file, path, ctxLine string - preCtx, postCtx []string - col, line int - }{ - "withFunction": {origCol: 67, origLine: 1, origPath: "/../a/path", - function: "exports", file: "", path: "/a/path", ctxLine: " \t\t\texports: {},", col: 0, line: 13, - preCtx: []string{" \t\tif(installedModules[moduleId])", " \t\t\treturn installedModules[moduleId].exports;", "", " \t\t// Create a new module (and put it into the cache)", " \t\tvar module = installedModules[moduleId] = {"}, - postCtx: []string{" \t\t\tid: moduleId,", " \t\t\tloaded: false", " \t\t};", "", " \t\t// Execute the module function"}}, - "withFilename": {origCol: 7, origLine: 1, origPath: "/../a/path", - function: "", file: "webpack:///bundle.js", path: "/a/path", - ctxLine: "/******/ (function(modules) { // webpackBootstrap", preCtx: []string{}, - postCtx: []string{"/******/ \t// The module cache", "/******/ \tvar installedModules = {};", "/******/", "/******/ \t// The require function", "/******/ \tfunction __webpack_require__(moduleId) {"}, - col: 9, line: 1}, - "withoutFilename": {origCol: 23, origLine: 1, origPath: "/../a/path", - function: "__webpack_require__", file: "", path: "/a/path", ctxLine: " \tfunction __webpack_require__(moduleId) {", - preCtx: []string{" \t// The module cache", " \tvar installedModules = {};", "", " \t// The require function"}, - postCtx: []string{"", " \t\t// Check if module is in cache", " \t\tif(installedModules[moduleId])", " \t\t\treturn installedModules[moduleId].exports;", ""}, - col: 0, line: 5}, - } { - t.Run(name, func(t *testing.T) { - frame := &StacktraceFrame{Colno: &tc.origCol, Lineno: &tc.origLine, AbsPath: &tc.origPath} - - prevFunction := "xyz" - function, msg := frame.applySourcemap(context.Background(), testSourcemapStore(t, test.ESClientWithValidSourcemap(t)), validService(), prevFunction) - require.Empty(t, msg) - assert.Zero(t, frame.SourcemapError) - updated := true - assert.Equal(t, &updated, frame.SourcemapUpdated) - - assert.Equal(t, tc.function, function) - assert.Equal(t, prevFunction, *frame.Function) - assert.Equal(t, tc.col, *frame.Colno) - assert.Equal(t, tc.line, *frame.Lineno) - assert.Equal(t, tc.path, *frame.AbsPath) - assert.Equal(t, tc.ctxLine, *frame.ContextLine) - assert.Equal(t, tc.preCtx, frame.PreContext) - assert.Equal(t, tc.postCtx, frame.PostContext) - if tc.file == "" { - assert.Nil(t, frame.Filename) - } else { - assert.Equal(t, tc.file, *frame.Filename) - } - assert.NotZero(t, frame.Original) - }) - } - }) -} - -func TestIsLibraryFrame(t *testing.T) { - assert.False(t, (&StacktraceFrame{}).IsLibraryFrame()) - assert.False(t, (&StacktraceFrame{LibraryFrame: new(bool)}).IsLibraryFrame()) - libFrame := true - assert.True(t, (&StacktraceFrame{LibraryFrame: &libFrame}).IsLibraryFrame()) -} - -func TestIsSourcemapApplied(t *testing.T) { - assert.False(t, (&StacktraceFrame{}).IsSourcemapApplied()) - - fr := StacktraceFrame{SourcemapUpdated: new(bool)} - assert.False(t, fr.IsSourcemapApplied()) - - libFrame := true - fr = StacktraceFrame{SourcemapUpdated: &libFrame} - assert.True(t, fr.IsSourcemapApplied()) -} - -func TestExcludeFromGroupingKey(t *testing.T) { - tests := []struct { - fr StacktraceFrame - pattern string - exclude bool - }{ - { - fr: StacktraceFrame{}, - pattern: "", - exclude: false, - }, - { - fr: StacktraceFrame{Filename: tests.StringPtr("/webpack")}, - pattern: "", - exclude: false, - }, - { - fr: StacktraceFrame{Filename: tests.StringPtr("/webpack")}, - pattern: "/webpack/tmp", - exclude: false, - }, - { - fr: StacktraceFrame{Filename: tests.StringPtr("")}, - pattern: "^/webpack", - exclude: false, - }, - { - fr: StacktraceFrame{Filename: tests.StringPtr("/webpack")}, - pattern: "^/webpack", - exclude: true, - }, - { - fr: StacktraceFrame{Filename: tests.StringPtr("/webpack/test/e2e/general-usecase/app.e2e-bundle.js")}, - pattern: "^/webpack", - exclude: true, - }, - { - fr: StacktraceFrame{Filename: tests.StringPtr("/filename")}, - pattern: "^/webpack", - exclude: false, - }, - { - fr: StacktraceFrame{Filename: tests.StringPtr("/filename/a")}, - pattern: "^/webpack", - exclude: false, - }, - { - fr: StacktraceFrame{Filename: tests.StringPtr("webpack")}, - pattern: "^/webpack", - exclude: false, - }, - } - - for idx, test := range tests { - var excludePattern *regexp.Regexp - if test.pattern != "" { - excludePattern = regexp.MustCompile(test.pattern) - } - - out := test.fr.transform(&transform.Config{ - RUM: transform.RUMConfig{ExcludeFromGrouping: excludePattern}, - }, true) - exclude := out["exclude_from_grouping"] - assert.Equal(t, test.exclude, exclude, - fmt.Sprintf("(%v): Pattern: %v, Filename: %v, expected to be excluded: %v", idx, test.pattern, test.fr.Filename, test.exclude)) - } -} - -func TestLibraryFrame(t *testing.T) { - - truthy := true - falsy := false - path := "/~/a/b" - tests := []struct { - fr StacktraceFrame - libraryPattern *regexp.Regexp - libraryFrame *bool - origLibraryFrame *bool - msg string - }{ - {fr: StacktraceFrame{}, - libraryFrame: nil, - origLibraryFrame: nil, - msg: "Empty StacktraceFrame, empty config"}, - {fr: StacktraceFrame{AbsPath: &path}, - libraryFrame: nil, - origLibraryFrame: nil, - msg: "No pattern"}, - {fr: StacktraceFrame{AbsPath: &path}, - libraryPattern: regexp.MustCompile(""), - libraryFrame: &truthy, - origLibraryFrame: nil, - msg: "Empty pattern"}, - {fr: StacktraceFrame{LibraryFrame: &falsy}, - libraryPattern: regexp.MustCompile("~"), - libraryFrame: &falsy, - origLibraryFrame: &falsy, - msg: "Empty StacktraceFrame"}, - {fr: StacktraceFrame{AbsPath: &path, LibraryFrame: &truthy}, - libraryPattern: regexp.MustCompile("^~/"), - libraryFrame: &falsy, - origLibraryFrame: &truthy, - msg: "AbsPath given, no Match"}, - {fr: StacktraceFrame{Filename: tests.StringPtr("myFile.js"), LibraryFrame: &truthy}, - libraryPattern: regexp.MustCompile("^~/"), - libraryFrame: &falsy, - origLibraryFrame: &truthy, - msg: "Filename given, no Match"}, - {fr: StacktraceFrame{AbsPath: &path, Filename: tests.StringPtr("myFile.js")}, - libraryPattern: regexp.MustCompile("^~/"), - libraryFrame: &falsy, - origLibraryFrame: nil, - msg: "AbsPath and Filename given, no Match"}, - {fr: StacktraceFrame{Filename: tests.StringPtr("/tmp")}, - libraryPattern: regexp.MustCompile("/tmp"), - libraryFrame: &truthy, - origLibraryFrame: nil, - msg: "Filename matching"}, - {fr: StacktraceFrame{AbsPath: &path, LibraryFrame: &falsy}, - libraryPattern: regexp.MustCompile("~/"), - libraryFrame: &truthy, - origLibraryFrame: &falsy, - msg: "AbsPath matching"}, - {fr: StacktraceFrame{AbsPath: &path, Filename: tests.StringPtr("/a/b/c")}, - libraryPattern: regexp.MustCompile("~/"), - libraryFrame: &truthy, - origLibraryFrame: nil, - msg: "AbsPath matching, Filename not matching"}, - {fr: StacktraceFrame{AbsPath: &path, Filename: tests.StringPtr("/a/b/c")}, - libraryPattern: regexp.MustCompile("/a/b/c"), - libraryFrame: &truthy, - origLibraryFrame: nil, - msg: "AbsPath not matching, Filename matching"}, - {fr: StacktraceFrame{AbsPath: &path, Filename: tests.StringPtr("~/a/b/c")}, - libraryPattern: regexp.MustCompile("~/"), - libraryFrame: &truthy, - origLibraryFrame: nil, - msg: "AbsPath and Filename matching"}, - } - - for _, test := range tests { - cfg := transform.Config{ - RUM: transform.RUMConfig{ - LibraryPattern: test.libraryPattern, - }, - } - out := test.fr.transform(&cfg, true)["library_frame"] - libFrame := test.fr.LibraryFrame - origLibFrame := test.fr.Original.LibraryFrame - if test.libraryFrame == nil { - assert.Nil(t, out, test.msg) - assert.Nil(t, libFrame, test.msg) - } else { - assert.Equal(t, *test.libraryFrame, out, test.msg) - assert.Equal(t, *test.libraryFrame, *libFrame, test.msg) - } - if test.origLibraryFrame == nil { - assert.Nil(t, origLibFrame, test.msg) - } else { - assert.Equal(t, *test.origLibraryFrame, *origLibFrame, test.msg) - } - } -} - -func testSourcemapStore(t *testing.T, client elasticsearch.Client) *sourcemap.Store { - store, err := sourcemap.NewStore(client, "apm-*sourcemap*", time.Minute) - require.NoError(t, err) - return store -} diff --git a/model/stacktrace_test.go b/model/stacktrace_test.go index d5d65a43491..ae5588ac817 100644 --- a/model/stacktrace_test.go +++ b/model/stacktrace_test.go @@ -18,25 +18,35 @@ package model import ( - "context" "fmt" "testing" "github.com/stretchr/testify/assert" "github.com/elastic/beats/v7/libbeat/common" - - "github.com/elastic/apm-server/sourcemap/test" - "github.com/elastic/apm-server/transform" ) func TestStacktraceTransform(t *testing.T) { - colno := 1 - l4, l5, l6, l8 := 4, 5, 6, 8 - fct := "original function" - origFilename, webpackFilename := "original filename", "/webpack" - absPath, serviceName := "original path", "service1" - service := Service{Name: serviceName} + originalLineno := 111 + originalColno := 222 + originalFunction := "original function" + originalFilename := "original filename" + originalModule := "original module" + originalClassname := "original classname" + originalAbsPath := "original path" + + mappedLineno := 333 + mappedColno := 444 + mappedFunction := "mapped function" + mappedFilename := "mapped filename" + mappedClassname := "mapped classname" + mappedAbsPath := "mapped path" + + vars := common.MapStr{"a": "abc", "b": 123} + + contextLine := "context line" + preContext := []string{"before1", "before2"} + postContext := []string{"after1", "after2"} tests := []struct { Stacktrace Stacktrace @@ -54,214 +64,90 @@ func TestStacktraceTransform(t *testing.T) { Msg: "Stacktrace with empty Frame", }, { - Stacktrace: Stacktrace{ - &StacktraceFrame{ - Colno: &colno, - Lineno: &l4, - Filename: &origFilename, - Function: &fct, - AbsPath: &absPath, - }, - &StacktraceFrame{Colno: &colno, Lineno: &l6, Function: &fct, AbsPath: &absPath}, - &StacktraceFrame{Colno: &colno, Lineno: &l8, Function: &fct, AbsPath: &absPath}, - &StacktraceFrame{ - Colno: &colno, - Lineno: &l5, - Filename: &origFilename, - Function: &fct, - AbsPath: &absPath, - }, - &StacktraceFrame{ - Colno: &colno, - Lineno: &l4, - Filename: &webpackFilename, - AbsPath: &absPath, - }, - }, - Output: []common.MapStr{ - { - "abs_path": "original path", "filename": "original filename", "function": "original function", - "line": common.MapStr{"column": 1, "number": 4}, - "exclude_from_grouping": false, - }, - { - "abs_path": "original path", "function": "original function", - "line": common.MapStr{"column": 1, "number": 6}, - "exclude_from_grouping": false, - }, - { - "abs_path": "original path", "function": "original function", - "line": common.MapStr{"column": 1, "number": 8}, - "exclude_from_grouping": false, - }, - { - "abs_path": "original path", "filename": "original filename", "function": "original function", - "line": common.MapStr{"column": 1, "number": 5}, - "exclude_from_grouping": false, - }, - { - "abs_path": "original path", "filename": "/webpack", - "line": common.MapStr{"column": 1, "number": 4}, - "exclude_from_grouping": false, - }, - }, - Msg: "Stacktrace with sourcemapping", + Stacktrace: Stacktrace{{ + Colno: &originalColno, + Lineno: &originalLineno, + Filename: originalFilename, + Function: originalFunction, + Classname: originalClassname, + Module: originalModule, + AbsPath: originalAbsPath, + LibraryFrame: true, + Vars: vars, + }}, + Output: []common.MapStr{{ + "abs_path": "original path", + "filename": "original filename", + "function": "original function", + "classname": "original classname", + "module": "original module", + "line": common.MapStr{ + "number": 111, + "column": 222, + }, + "exclude_from_grouping": false, + "library_frame": true, + "vars": vars, + }}, + Msg: "unmapped stacktrace", + }, + { + Stacktrace: Stacktrace{{ + Colno: &mappedColno, + Lineno: &mappedLineno, + Filename: mappedFilename, + Function: mappedFunction, + Classname: mappedClassname, + AbsPath: mappedAbsPath, + Original: Original{ + Colno: &originalColno, + Lineno: &originalLineno, + Filename: originalFilename, + Function: originalFunction, + Classname: originalClassname, + AbsPath: originalAbsPath, + }, + ExcludeFromGrouping: true, + SourcemapUpdated: true, + SourcemapError: "boom", + ContextLine: contextLine, + PreContext: preContext, + PostContext: postContext, + }}, + Output: []common.MapStr{{ + "abs_path": "mapped path", + "filename": "mapped filename", + "function": "mapped function", + "classname": "mapped classname", + "line": common.MapStr{ + "number": 333, + "column": 444, + "context": "context line", + }, + "context": common.MapStr{ + "pre": preContext, + "post": postContext, + }, + "original": common.MapStr{ + "abs_path": "original path", + "filename": "original filename", + "function": "original function", + "classname": "original classname", + "lineno": 111, + "colno": 222, + }, + "exclude_from_grouping": true, + "sourcemap": common.MapStr{ + "updated": true, + "error": "boom", + }, + }}, + Msg: "mapped stacktrace", }, } for idx, test := range tests { - output := test.Stacktrace.transform(context.Background(), &transform.Config{}, false, &service) + output := test.Stacktrace.transform() assert.Equal(t, test.Output, output, fmt.Sprintf("Failed at idx %v; %s", idx, test.Msg)) } } - -func TestStacktraceTransformWithSourcemapping(t *testing.T) { - int1, int6, int7, int67 := 1, 6, 7, 67 - fct1, fct2 := "function foo", "function bar" - absPath, serviceName, serviceVersion := "/../a/c", "service1", "2.4.1" - origFilename, appliedFilename := "original filename", "myfilename" - service := Service{Name: serviceName, Version: serviceVersion} - - for name, tc := range map[string]struct { - Stacktrace Stacktrace - Output []common.MapStr - Msg string - }{ - "emptyStacktrace": { - Stacktrace: Stacktrace{}, - Output: nil, - }, - "emptyFrame": { - Stacktrace: Stacktrace{&StacktraceFrame{}}, - Output: []common.MapStr{ - {"exclude_from_grouping": false, - "sourcemap": common.MapStr{ - "error": "Colno mandatory for sourcemapping.", - "updated": false, - }, - }, - }, - }, - "noLineno": { - Stacktrace: Stacktrace{&StacktraceFrame{Colno: &int1}}, - Output: []common.MapStr{ - {"line": common.MapStr{"column": 1}, - "exclude_from_grouping": false, - "sourcemap": common.MapStr{ - "error": "Lineno mandatory for sourcemapping.", - "updated": false, - }, - }, - }, - }, - "sourcemapApplied": { - Stacktrace: Stacktrace{ - &StacktraceFrame{ - Colno: &int7, - Lineno: &int1, - Filename: &origFilename, - Function: &fct1, - AbsPath: &absPath, - }, - &StacktraceFrame{ - Colno: &int67, - Lineno: &int1, - Filename: &appliedFilename, - Function: &fct2, - AbsPath: &absPath, - }, - &StacktraceFrame{ - Colno: &int7, - Lineno: &int1, - Filename: &appliedFilename, - Function: &fct2, - AbsPath: &absPath, - }, - &StacktraceFrame{Colno: &int1, Lineno: &int6, Function: &fct2, AbsPath: &absPath}, - }, - Output: []common.MapStr{ - { - "abs_path": "/a/c", - "filename": "webpack:///bundle.js", - "function": "exports", - "context": common.MapStr{ - "post": []string{"/******/ \t// The module cache", "/******/ \tvar installedModules = {};", "/******/", "/******/ \t// The require function", "/******/ \tfunction __webpack_require__(moduleId) {"}}, - "line": common.MapStr{ - "column": 9, - "number": 1, - "context": "/******/ (function(modules) { // webpackBootstrap"}, - "exclude_from_grouping": false, - "sourcemap": common.MapStr{"updated": true}, - "original": common.MapStr{ - "abs_path": "/../a/c", - "colno": 7, - "filename": "original filename", - "function": "function foo", - "lineno": 1, - }, - }, - { - "abs_path": "/a/c", - "filename": "myfilename", - "function": "", //prev function - "context": common.MapStr{ - "post": []string{" \t\t\tid: moduleId,", " \t\t\tloaded: false", " \t\t};", "", " \t\t// Execute the module function"}, - "pre": []string{" \t\tif(installedModules[moduleId])", " \t\t\treturn installedModules[moduleId].exports;", "", " \t\t// Create a new module (and put it into the cache)", " \t\tvar module = installedModules[moduleId] = {"}}, - "line": common.MapStr{ - "column": 0, - "number": 13, - "context": " \t\t\texports: {},"}, - "exclude_from_grouping": false, - "sourcemap": common.MapStr{"updated": true}, - "original": common.MapStr{ - "abs_path": "/../a/c", - "colno": 67, - "filename": "myfilename", - "function": "function bar", - "lineno": 1, - }, - }, - { - "abs_path": "/a/c", - "filename": "webpack:///bundle.js", - "function": "", //prev function - "context": common.MapStr{ - "post": []string{"/******/ \t// The module cache", "/******/ \tvar installedModules = {};", "/******/", "/******/ \t// The require function", "/******/ \tfunction __webpack_require__(moduleId) {"}}, - "line": common.MapStr{ - "column": 9, - "number": 1, - "context": "/******/ (function(modules) { // webpackBootstrap"}, - "exclude_from_grouping": false, - "sourcemap": common.MapStr{"updated": true}, - "original": common.MapStr{ - "abs_path": "/../a/c", - "colno": 7, - "filename": "myfilename", - "function": "function bar", - "lineno": 1, - }, - }, - { - "abs_path": "/../a/c", - "function": fct2, - "line": common.MapStr{"column": 1, "number": 6}, - "exclude_from_grouping": false, - "sourcemap": common.MapStr{"updated": false, "error": "No Sourcemap found for Lineno 6, Colno 1"}, - }, - }, - }, - } { - t.Run(name, func(t *testing.T) { - cfg := &transform.Config{ - RUM: transform.RUMConfig{ - SourcemapStore: testSourcemapStore(t, test.ESClientWithValidSourcemap(t)), - }, - } - - // run `Stacktrace.transform` twice to ensure method is idempotent - tc.Stacktrace.transform(context.Background(), cfg, true, &service) - output := tc.Stacktrace.transform(context.Background(), cfg, true, &service) - assert.Equal(t, tc.Output, output) - }) - } -} diff --git a/model/system.go b/model/system.go deleted file mode 100644 index 1961cd9c0c1..00000000000 --- a/model/system.go +++ /dev/null @@ -1,94 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package model - -import ( - "net" - - "github.com/elastic/beats/v7/libbeat/common" -) - -type System struct { - DetectedHostname string - ConfiguredHostname string - Architecture string - Platform string - IP net.IP - - Container Container - Kubernetes Kubernetes -} - -func (s *System) name() string { - if s.ConfiguredHostname != "" { - return s.ConfiguredHostname - } - return s.Hostname() -} - -// Hostname returns the value to store in `host.hostname`. -func (s *System) Hostname() string { - if s == nil { - return "" - } - - // if system.kubernetes.node.name is set in the metadata, set host.hostname in the event to its value - if s.Kubernetes.NodeName != "" { - return s.Kubernetes.NodeName - } - - // If system.kubernetes.* is set, but system.kubernetes.node.name is not, then don't set host.hostname at all. - // some day this could be a hook to discover the right node name using these values - if s.Kubernetes.PodName != "" || s.Kubernetes.PodUID != "" || s.Kubernetes.Namespace != "" { - return "" - } - - // Otherwise set host.hostname to system.hostname - return s.DetectedHostname -} - -func (s *System) fields() common.MapStr { - if s == nil { - return nil - } - var system mapStr - system.maybeSetString("hostname", s.Hostname()) - system.maybeSetString("name", s.name()) - system.maybeSetString("architecture", s.Architecture) - if s.Platform != "" { - system.set("os", common.MapStr{"platform": s.Platform}) - } - if s.IP != nil { - system.set("ip", s.IP.String()) - } - return common.MapStr(system) -} - -func (s *System) containerFields() common.MapStr { - if s == nil { - return nil - } - return s.Container.fields() -} - -func (s *System) kubernetesFields() common.MapStr { - if s == nil { - return nil - } - return s.Kubernetes.fields() -} diff --git a/model/test_approved/host/full.approved.json b/model/test_approved/host/full.approved.json new file mode 100644 index 00000000000..9a9774ecdf5 --- /dev/null +++ b/model/test_approved/host/full.approved.json @@ -0,0 +1,12 @@ +{ + "architecture": "amd", + "hostname": "host", + "ip": "127.0.0.1", + "name": "custom hostname", + "os": { + "full": "Mac OS Mojave", + "platform": "osx", + "type": "macos" + }, + "type": "t2.medium" +} diff --git a/model/test_approved/host/full_hostname_info.approved.json b/model/test_approved/host/full_hostname_info.approved.json new file mode 100644 index 00000000000..b8f83e70e46 --- /dev/null +++ b/model/test_approved/host/full_hostname_info.approved.json @@ -0,0 +1,4 @@ +{ + "hostname": "host", + "name": "custom hostname" +} diff --git a/model/test_approved/host/hostname.approved.json b/model/test_approved/host/hostname.approved.json new file mode 100644 index 00000000000..f55fb3e2ff9 --- /dev/null +++ b/model/test_approved/host/hostname.approved.json @@ -0,0 +1,3 @@ +{ + "hostname": "host" +} diff --git a/model/modeldecoder/test_approved_system/transform_ignored_hostname.approved.json b/model/test_approved/host/ignored_hostname.approved.json similarity index 100% rename from model/modeldecoder/test_approved_system/transform_ignored_hostname.approved.json rename to model/test_approved/host/ignored_hostname.approved.json diff --git a/model/trace.go b/model/trace.go new file mode 100644 index 00000000000..3933d24894c --- /dev/null +++ b/model/trace.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import ( + "github.com/elastic/beats/v7/libbeat/common" +) + +// Trace holds information about a distributed trace. +type Trace struct { + // ID holds a unique identifier of the trace. + ID string +} + +func (t *Trace) fields() common.MapStr { + var fields mapStr + fields.maybeSetString("id", t.ID) + return common.MapStr(fields) +} diff --git a/model/transaction.go b/model/transaction.go index 105470dd1fd..1743471ea9e 100644 --- a/model/transaction.go +++ b/model/transaction.go @@ -18,60 +18,67 @@ package model import ( - "context" - "time" - - "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" - "github.com/elastic/beats/v7/libbeat/monitoring" - - "github.com/elastic/apm-server/transform" - "github.com/elastic/apm-server/utility" ) const ( - transactionProcessorName = "transaction" - transactionDocType = "transaction" + TracesDataset = "apm" ) var ( - transactionMetrics = monitoring.Default.NewRegistry("apm-server.processor.transaction") - transactionTransformations = monitoring.NewInt(transactionMetrics, "transformations") - transactionProcessorEntry = common.MapStr{"name": transactionProcessorName, "event": transactionDocType} + // TransactionProcessor is the Processor value that should be assigned to transaction events. + TransactionProcessor = Processor{Name: "transaction", Event: "transaction"} ) +// Transaction holds values for transaction.* fields. This may be used in +// transaction, span, and error events (i.e. transaction.id), as well as +// internal metrics such as breakdowns (i.e. including transaction.name). type Transaction struct { - Metadata Metadata + ID string + + // Type holds the transaction type: "request", "message", etc. + Type string - ID string - ParentID string - TraceID string + // Name holds the transaction name: "GET /foo", etc. + Name string - Timestamp time.Time + // Result holds the transaction result: "HTTP 2xx", "OK", "Error", etc. + Result string + + // Sampled holds the transaction's sampling decision. + // + // If Sampled is false, then it will be omitted from the output event. + Sampled bool + + // DurationHistogram holds a transaction duration histogram, + // with bucket values measured in microseconds, for transaction + // duration metrics. + DurationHistogram Histogram + + // BreakdownCount holds transaction breakdown count, for + // breakdown metrics. + BreakdownCount int + + // AggregatedDuration holds aggregated transaction durations, + // for breakdown metrics. + AggregatedDuration AggregatedDuration - Type string - Name string - Result string - Outcome string - Duration float64 Marks TransactionMarks Message *Message - Sampled *bool SpanCount SpanCount - Page *Page - HTTP *Http - URL *URL - Labels *Labels - Custom *Custom + Custom common.MapStr UserExperience *UserExperience - Experimental interface{} - - // RepresentativeCount, if positive, holds the approximate number of + // RepresentativeCount holds the approximate number of // transactions that this transaction represents for aggregation. // // This may be used for scaling metrics; it is not indexed. RepresentativeCount float64 + + // Root indicates whether or not the transaction is the trace root. + // + // If Root is false, it will be omitted from the output event. + Root bool } type SpanCount struct { @@ -79,19 +86,23 @@ type SpanCount struct { Started *int } -// fields creates the fields to populate in the top-level "transaction" object field. -func (e *Transaction) fields() common.MapStr { - var fields mapStr - fields.set("id", e.ID) - fields.set("type", e.Type) - fields.set("duration", utility.MillisAsMicros(e.Duration)) - fields.maybeSetString("name", e.Name) - fields.maybeSetString("result", e.Result) - fields.maybeSetMapStr("marks", e.Marks.fields()) - fields.maybeSetMapStr("page", e.Page.Fields()) - fields.maybeSetMapStr("custom", e.Custom.Fields()) - fields.maybeSetMapStr("message", e.Message.Fields()) - fields.maybeSetMapStr("experience", e.UserExperience.Fields()) +func (e *Transaction) setFields(fields *mapStr, apmEvent *APMEvent) { + var transaction mapStr + if apmEvent.Processor == TransactionProcessor { + // TODO(axw) set `event.duration` in 8.0, and remove this field. + // See https://github.com/elastic/apm-server/issues/5999 + transaction.set("duration", common.MapStr{"us": int(apmEvent.Event.Duration.Microseconds())}) + } + transaction.maybeSetString("id", e.ID) + transaction.maybeSetString("type", e.Type) + transaction.maybeSetMapStr("duration", e.AggregatedDuration.fields()) + transaction.maybeSetMapStr("duration.histogram", e.DurationHistogram.fields()) + transaction.maybeSetString("name", e.Name) + transaction.maybeSetString("result", e.Result) + transaction.maybeSetMapStr("marks", e.Marks.fields()) + transaction.maybeSetMapStr("custom", customFields(e.Custom)) + transaction.maybeSetMapStr("message", e.Message.Fields()) + transaction.maybeSetMapStr("experience", e.UserExperience.Fields()) if e.SpanCount.Dropped != nil || e.SpanCount.Started != nil { spanCount := common.MapStr{} if e.SpanCount.Dropped != nil { @@ -100,47 +111,18 @@ func (e *Transaction) fields() common.MapStr { if e.SpanCount.Started != nil { spanCount["started"] = *e.SpanCount.Started } - fields.set("span_count", spanCount) + transaction.set("span_count", spanCount) } - // TODO(axw) change Sampled to be non-pointer, and set its final value when - // instantiating the model type. - fields.set("sampled", e.Sampled == nil || *e.Sampled) - return common.MapStr(fields) -} - -func (e *Transaction) Transform(_ context.Context, _ *transform.Config) []beat.Event { - transactionTransformations.Inc() - - fields := common.MapStr{ - "processor": transactionProcessorEntry, - transactionDocType: e.fields(), + if e.Sampled { + transaction.set("sampled", e.Sampled) } - - // first set generic metadata (order is relevant) - e.Metadata.Set(fields) - utility.Set(fields, "source", fields["client"]) - - // then merge event specific information - utility.AddID(fields, "parent", e.ParentID) - utility.AddID(fields, "trace", e.TraceID) - utility.Set(fields, "timestamp", utility.TimeAsMicros(e.Timestamp)) - // merges with metadata labels, overrides conflicting keys - utility.DeepUpdate(fields, "labels", e.Labels.Fields()) - utility.Set(fields, "http", e.HTTP.Fields()) - urlFields := e.URL.Fields() - if urlFields != nil { - utility.Set(fields, "url", e.URL.Fields()) + if e.Root { + transaction.set("root", e.Root) } - if e.Page != nil { - utility.DeepUpdate(fields, "http.request.referrer", e.Page.Referer) - if urlFields == nil { - utility.Set(fields, "url", e.Page.URL.Fields()) - } + if e.BreakdownCount > 0 { + transaction.set("breakdown.count", e.BreakdownCount) } - utility.DeepUpdate(fields, "event.outcome", e.Outcome) - utility.Set(fields, "experimental", e.Experimental) - - return []beat.Event{{Fields: fields, Timestamp: e.Timestamp}} + fields.maybeSetMapStr("transaction", common.MapStr(transaction)) } type TransactionMarks map[string]TransactionMark @@ -151,7 +133,7 @@ func (m TransactionMarks) fields() common.MapStr { } out := make(mapStr, len(m)) for k, v := range m { - out.maybeSetMapStr(k, v.fields()) + out.maybeSetMapStr(sanitizeLabelKey(k), v.fields()) } return common.MapStr(out) } @@ -164,7 +146,7 @@ func (m TransactionMark) fields() common.MapStr { } out := make(common.MapStr, len(m)) for k, v := range m { - out[k] = common.Float(v) + out[sanitizeLabelKey(k)] = common.Float(v) } return out } diff --git a/model/transaction/_meta/fields.yml b/model/transaction/_meta/fields.yml index 65c160fa7be..8234b806cb8 100644 --- a/model/transaction/_meta/fields.yml +++ b/model/transaction/_meta/fields.yml @@ -2,6 +2,871 @@ title: APM Transaction description: Transaction-specific data for APM fields: + - name: processor.name + type: keyword + description: Processor name. + overwrite: true + + - name: processor.event + type: keyword + description: Processor event. + overwrite: true + + - name: timestamp + type: group + fields: + - name: us + type: long + count: 1 + description: > + Timestamp of the event in microseconds since Unix epoch. + overwrite: true + + - name: url + type: group + description: > + A complete Url, with scheme, host and path. + dynamic: false + fields: + + - name: scheme + type: keyword + description: > + The protocol of the request, e.g. "https:". + overwrite: true + + - name: full + type: keyword + description: > + The full, possibly agent-assembled URL of the request, e.g https://example.com:443/search?q=elasticsearch#top. + overwrite: true + + - name: domain + type: keyword + description: > + The hostname of the request, e.g. "example.com". + overwrite: true + + - name: port + type: long + description: > + The port of the request, e.g. 443. + overwrite: true + + - name: path + type: keyword + description: > + The path of the request, e.g. "/search". + overwrite: true + + - name: query + type: keyword + description: > + The query string of the request, e.g. "q=elasticsearch". + overwrite: true + + - name: fragment + type: keyword + description: > + A fragment specifying a location in a web page , e.g. "top". + overwrite: true + + - name: http + type: group + dynamic: false + fields: + + - name: version + type: keyword + description: > + The http version of the request leading to this event. + overwrite: true + + - name: request + type: group + fields: + + - name: method + type: keyword + description: > + The http method of the request leading to this event. + overwrite: true + + - name: headers + type: object + enabled: false + description: > + The canonical headers of the monitored HTTP request. + overwrite: true + + - name: referrer + type: keyword + ignore_above: 1024 + description: Referrer for this HTTP request. + overwrite: true + + - name: response + type: group + fields: + + - name: status_code + type: long + description: > + The status code of the HTTP response. + overwrite: true + + - name: finished + type: boolean + description: > + Used by the Node agent to indicate when in the response life cycle an error has occurred. + overwrite: true + + - name: headers + type: object + enabled: false + description: > + The canonical headers of the monitored HTTP response. + overwrite: true + + - name: labels + type: object + object_type_params: + - object_type: keyword + - object_type: boolean + - object_type: scaled_float + scaling_factor: 1000000 + dynamic: true + overwrite: true + description: > + A flat mapping of user-defined labels with string, boolean or number values. + + - name: service + type: group + dynamic: false + description: > + Service fields. + fields: + - name: name + type: keyword + description: > + Immutable name of the service emitting this event. + overwrite: true + + - name: version + type: keyword + description: > + Version of the service emitting this event. + overwrite: true + + - name: environment + type: keyword + description: > + Service environment. + overwrite: true + + - name: node + type: group + fields: + - name: name + type: keyword + description: > + Unique meaningful name of the service node. + overwrite: true + + - name: language + type: group + fields: + + - name: name + type: keyword + description: > + Name of the programming language used. + overwrite: true + + - name: version + type: keyword + description: > + Version of the programming language used. + overwrite: true + + - name: runtime + type: group + fields: + + - name: name + type: keyword + description: > + Name of the runtime used. + overwrite: true + + - name: version + type: keyword + description: > + Version of the runtime used. + overwrite: true + + - name: framework + type: group + fields: + + - name: name + type: keyword + description: > + Name of the framework used. + overwrite: true + + - name: version + type: keyword + description: > + Version of the framework used. + overwrite: true + + - name: session + type: group + dynamic: false + fields: + - name: id + type: keyword + ignore_above: 1024 + description: > + The ID of the session to which the event belongs. + - name: sequence + type: long + description: > + The sequence number of the event within the session to which the event belongs. + + - name: transaction + type: group + dynamic: false + fields: + - name: id + type: keyword + description: > + The transaction ID. + overwrite: true + - name: sampled + type: boolean + description: > + Transactions that are 'sampled' will include all available information. Transactions that are not sampled will not have spans or context. + overwrite: true + - name: type + type: keyword + description: > + Keyword of specific relevance in the service's domain (eg. 'request', 'backgroundjob', etc) + overwrite: true + - name: name + type: keyword + multi_fields: + - name: text + type: text + description: > + Generic designation of a transaction in the scope of a single service (eg. 'GET /users/:id'). + overwrite: true + + - name: trace + type: group + dynamic: false + fields: + - name: id + type: keyword + description: > + The ID of the trace to which the event belongs to. + overwrite: true + + - name: parent + type: group + dynamic: false + fields: + - name: id + type: keyword + description: > + The ID of the parent event. + overwrite: true + + - name: agent + type: group + dynamic: false + fields: + + - name: name + type: keyword + description: > + Name of the agent used. + overwrite: true + + - name: version + type: keyword + description: > + Version of the agent used. + overwrite: true + + - name: ephemeral_id + type: keyword + description: > + The Ephemeral ID identifies a running process. + overwrite: true + + - name: container + type: group + dynamic: false + title: Container + description: > + Container fields are used for meta information about the specific container + that is the source of information. These fields help correlate data based + containers from any runtime. + fields: + + - name: id + type: keyword + description: > + Unique container id. + overwrite: true + + - name: kubernetes + type: group + dynamic: false + title: Kubernetes + description: > + Kubernetes metadata reported by agents + fields: + + - name: namespace + type: keyword + description: > + Kubernetes namespace + overwrite: true + + - name: node + type: group + fields: + - name: name + type: keyword + description: > + Kubernetes node name + overwrite: true + + - name: pod + type: group + fields: + + - name: name + type: keyword + description: > + Kubernetes pod name + overwrite: true + + - name: uid + type: keyword + description: > + Kubernetes Pod UID + overwrite: true + + - name: network + type: group + dynamic: false + description: > + Optional network fields + fields: + + - name: connection + type: group + description: > + Network connection details + fields: + + - name: type + type: keyword + description: > + Network connection type, eg. "wifi", "cell" + + - name: subtype + type: keyword + description: > + Detailed network connection sub-type, e.g. "LTE", "CDMA" + + - name: carrier + type: group + description: > + Network operator + fields: + + - name: name + type: keyword + overwrite: true + description: > + Carrier name, eg. Vodafone, T-Mobile, etc. + + - name: mcc + type: keyword + overwrite: true + description: > + Mobile country code + + - name: mnc + type: keyword + overwrite: true + description: > + Mobile network code + + - name: icc + type: keyword + overwrite: true + description: > + ISO country code, eg. US + + - name: host + type: group + dynamic: false + description: > + Optional host fields. + fields: + + - name: architecture + type: keyword + description: > + The architecture of the host the event was recorded on. + overwrite: true + + - name: hostname + type: keyword + description: > + The hostname of the host the event was recorded on. + overwrite: true + + - name: name + type: keyword + description: > + Name of the host the event was recorded on. + It can contain same information as host.hostname or a name specified by the user. + overwrite: true + + - name: ip + type: ip + description: > + IP of the host that records the event. + overwrite: true + + - name: os + title: Operating System + group: 2 + description: > + The OS fields contain information about the operating system. + type: group + fields: + - name: platform + type: keyword + description: > + The platform of the host the event was recorded on. + overwrite: true + + - name: process + type: group + dynamic: false + description: > + Information pertaining to the running process where the data was collected + fields: + - name: args + level: extended + type: keyword + description: > + Process arguments. + May be filtered to protect sensitive information. + overwrite: true + + - name: pid + type: long + description: > + Numeric process ID of the service process. + overwrite: true + + - name: ppid + type: long + description: > + Numeric ID of the service's parent process. + overwrite: true + + - name: title + type: keyword + description: > + Service process title. + overwrite: true + + - name: observer + type: group + dynamic: false + fields: + + - name: listening + type: keyword + overwrite: true + description: > + Address the server is listening on. + + - name: hostname + type: keyword + overwrite: true + description: > + Hostname of the APM Server. + + - name: version + type: keyword + overwrite: true + description: > + APM Server version. + + - name: version_major + type: byte + overwrite: true + description: > + Major version number of the observer + + - name: type + type: keyword + overwrite: true + description: > + The type will be set to `apm-server`. + + - name: id + type: keyword + overwrite: true + description: > + Unique identifier of the APM Server. + + - name: ephemeral_id + type: keyword + overwrite: true + description: > + Ephemeral identifier of the APM Server. + + - name: user + type: group + dynamic: false + fields: + + - name: domain + type: keyword + description: > + The domain of the logged in user. + overwrite: true + + - name: name + type: keyword + description: > + The username of the logged in user. + overwrite: true + + - name: id + type: keyword + description: > + Identifier of the logged in user. + overwrite: true + + - name: email + type: keyword + description: > + Email of the logged in user. + overwrite: true + + - name: client + dynamic: false + type: group + fields: + + - name: domain + type: keyword + ignore_above: 1024 + description: > + Client domain. + overwrite: true + + - name: ip + type: ip + description: > + IP address of the client of a recorded event. + This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + overwrite: true + + - name: port + type: long + description: > + Port of the client. + overwrite: true + + - name: source + dynamic: false + type: group + fields: + + - name: domain + type: keyword + ignore_above: 1024 + description: > + Source domain. + overwrite: true + + - name: ip + type: ip + description: > + IP address of the source of a recorded event. + This is typically obtained from a request's X-Forwarded-For or the X-Real-IP header or falls back to a given configuration for remote address. + overwrite: true + + - name: port + type: long + description: > + Port of the source. + overwrite: true + + - name: destination + title: Destination + group: 2 + description: 'Destination fields describe details about the destination of a packet/event. + + Destination fields are usually populated in conjunction with source fields.' + type: group + fields: + - name: address + level: extended + type: keyword + ignore_above: 1024 + description: 'Some event destination addresses are defined ambiguously. The + event will sometimes list an IP, a domain or a unix socket. You should always + store the raw address in the `.address` field. + Then it should be duplicated to `.ip` or `.domain`, depending on which one + it is.' + overwrite: true + + - name: ip + level: core + type: ip + description: 'IP addess of the destination. + Can be one of multiple IPv4 or IPv6 addresses.' + overwrite: true + + - name: port + level: core + type: long + format: string + description: Port of the destination. + overwrite: true + + - name: user_agent + dynamic: false + title: User agent + description: > + The user_agent fields normally come from a browser request. They often + show up in web service logs coming from the parsed user agent string. + type: group + overwrite: true + fields: + + - name: original + type: keyword + description: > + Unparsed version of the user_agent. + example: "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1" + overwrite: true + + multi_fields: + - name: text + type: text + description: > + Software agent acting in behalf of a user, eg. a web browser / OS combination. + overwrite: true + + - name: name + type: keyword + overwrite: true + example: Safari + description: > + Name of the user agent. + + - name: version + type: keyword + overwrite: true + description: > + Version of the user agent. + example: 12.0 + + - name: device + type: group + overwrite: true + title: Device + description: > + Information concerning the device. + fields: + + - name: name + type: keyword + overwrite: true + example: iPhone + description: > + Name of the device. + + - name: os + type: group + overwrite: true + title: Operating System + description: > + The OS fields contain information about the operating system. + fields: + + - name: platform + type: keyword + overwrite: true + description: > + Operating system platform (such centos, ubuntu, windows). + example: darwin + + - name: name + type: keyword + overwrite: true + example: "Mac OS X" + description: > + Operating system name, without the version. + + - name: full + type: keyword + overwrite: true + example: "Mac OS Mojave" + description: > + Operating system name, including the version or code name. + + - name: family + type: keyword + overwrite: true + example: "debian" + description: > + OS family (such as redhat, debian, freebsd, windows). + + - name: version + type: keyword + overwrite: true + example: "10.14.1" + description: > + Operating system version as a raw string. + + - name: kernel + type: keyword + overwrite: true + example: "4.4.0-112-generic" + description: > + Operating system kernel version as a raw string. + + - name: cloud + title: Cloud + group: 2 + type: group + description: > + Cloud metadata reported by agents + fields: + - name: account + type: group + dynamic: false + fields: + - name: id + level: extended + type: keyword + ignore_above: 1024 + description: Cloud account ID + overwrite: true + - name: name + level: extended + type: keyword + ignore_above: 1024 + description: Cloud account name + overwrite: true + - name: availability_zone + level: extended + type: keyword + ignore_above: 1024 + description: Cloud availability zone name + example: us-east1-a + overwrite: true + - name: instance + type: group + dynamic: false + fields: + - name: id + level: extended + type: keyword + ignore_above: 1024 + description: Cloud instance/machine ID + overwrite: true + - name: name + level: extended + type: keyword + ignore_above: 1024 + description: Cloud instance/machine name + overwrite: true + - name: machine + type: group + dynamic: false + fields: + - name: type + level: extended + type: keyword + ignore_above: 1024 + description: Cloud instance/machine type + example: t2.medium + overwrite: true + - name: project + type: group + dynamic: false + fields: + - name: id + level: extended + type: keyword + ignore_above: 1024 + description: Cloud project ID + overwrite: true + - name: name + level: extended + type: keyword + ignore_above: 1024 + description: Cloud project name + overwrite: true + - name: provider + level: extended + type: keyword + ignore_above: 1024 + description: Cloud provider name + example: gcp + overwrite: true + - name: region + level: extended + type: keyword + ignore_above: 1024 + description: Cloud region name + example: us-east1 + overwrite: true + - name: service + type: group + dynamic: false + fields: + - name: name + level: extended + type: keyword + ignore_above: 1024 + description: > + Cloud service name, intended to distinguish services running on + different platforms within a provider. + overwrite: true + + - name: event + type: group + fields: + + - name: outcome + level: core + type: keyword + ignore_above: 1024 + description: > + `event.outcome` simply denotes whether the event represents a success or a + failure from the perspective of the entity that produced the event. + example: success + overwrite: true + - name: transaction type: group dynamic: false @@ -31,6 +896,8 @@ object_type: scaled_float scaling_factor: 1000000 dynamic: true + description: > + A user-defined mapping of groups of marks in milliseconds. - name: experience type: group @@ -50,6 +917,24 @@ scaling_factor: 1000000 description: The Total Blocking Time metric + - name: longtask + type: group + description: Longtask duration/count metrics + fields: + - name: count + type: long + description: The total number of of longtasks + + - name: sum + type: scaled_float + scaling_factor: 1000000 + description: The sum of longtask durations + + - name: max + type: scaled_float + scaling_factor: 1000000 + description: The max longtask duration + - name: span_count type: group fields: diff --git a/model/transaction/generated/schema/rum_v3_transaction.go b/model/transaction/generated/schema/rum_v3_transaction.go deleted file mode 100644 index 5a54f37f4fe..00000000000 --- a/model/transaction/generated/schema/rum_v3_transaction.go +++ /dev/null @@ -1,1030 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package schema - -const RUMV3Schema = `{ - "$id": "docs/spec/transactions/rum_v3_transaction.json", - "type": "object", - "description": "An event corresponding to an incoming request or similar task occurring in a monitored service", - "allOf": [ - { - "properties": { - "id": { - "type": "string", - "description": "Hex encoded 64 random bits ID of the transaction.", - "maxLength": 1024 - }, - "tid": { - "description": "Hex encoded 128 random bits ID of the correlated trace.", - "type": "string", - "maxLength": 1024 - }, - "pid": { - "description": "Hex encoded 64 random bits ID of the parent transaction or span. Only root transactions of a trace do not have a parent_id, otherwise it needs to be set.", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "t": { - "type": "string", - "description": "Keyword of specific relevance in the service's domain (eg: 'request', 'backgroundjob', etc)", - "maxLength": 1024 - }, - "n": { - "type": [ - "string", - "null" - ], - "description": "Generic designation of a transaction in the scope of a single service (eg: 'GET /users/:id')", - "maxLength": 1024 - }, - "y": { - "type": ["array", "null"], - "$id": "docs/spec/spans/rum_v3_span.json", - "description": "An event captured by an agent occurring in a monitored service", - "allOf": [ - { - "properties": { - "id": { - "description": "Hex encoded 64 random bits ID of the span.", - "type": "string", - "maxLength": 1024 - }, - "pi": { - "description": "Index of the parent span in the list. Absent when the parent is a transaction.", - "type": ["integer", "null"], - "maxLength": 1024 - }, - "s": { - "type": [ - "number", - "null" - ], - "description": "Offset relative to the transaction's timestamp identifying the start of the span, in milliseconds" - }, - "sr": { - "description": "Sampling rate", - "type": ["number", "null"] - }, - "t": { - "type": "string", - "description": "Keyword of specific relevance in the service's domain (eg: 'db.postgresql.query', 'template.erb', etc)", - "maxLength": 1024 - }, - "su": { - "type": [ - "string", - "null" - ], - "description": "A further sub-division of the type (e.g. postgresql, elasticsearch)", - "maxLength": 1024 - }, - "ac": { - "type": [ - "string", - "null" - ], - "description": "The specific kind of event within the sub-type represented by the span (e.g. query, connect)", - "maxLength": 1024 - }, - "o": { - "$id": "docs/spec/outcome.json", - "title": "Outcome", - "type": ["string", "null"], - "enum": [null, "success", "failure", "unknown"], - "description": "The outcome of the transaction: success, failure, or unknown. This is similar to 'result', but has a limited set of permitted values describing the success or failure of the transaction from the service's perspective. This field can be used for calculating error rates.", - "description": "The outcome of the span: success, failure, or unknown. Outcome may be one of a limited set of permitted values describing the success or failure of the span. This field can be used for calculating error rates for outgoing requests." - }, - "c": { - "type": [ - "object", - "null" - ], - "description": "Any other arbitrary data captured by the agent, optionally provided by the user", - "properties": { - "dt": { - "type": [ - "object", - "null" - ], - "description": "An object containing contextual data about the destination for spans", - "properties": { - "ad": { - "type": [ - "string", - "null" - ], - "description": "Destination network address: hostname (e.g. 'localhost'), FQDN (e.g. 'elastic.co'), IPv4 (e.g. '127.0.0.1') or IPv6 (e.g. '::1')", - "maxLength": 1024 - }, - "po": { - "type": [ - "integer", - "null" - ], - "description": "Destination network port (e.g. 443)" - }, - "se": { - "description": "Destination service context", - "type": [ - "object", - "null" - ], - "properties": { - "t": { - "description": "Type of the destination service (e.g. 'db', 'elasticsearch'). Should typically be the same as span.type.", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "n": { - "description": "Identifier for the destination service (e.g. 'http://elastic.co', 'elasticsearch', 'rabbitmq')", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "rc": { - "description": "Identifier for the destination service resource being operated on (e.g. 'http://elastic.co:80', 'elasticsearch', 'rabbitmq/queue_name')", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - }, - "required": [ - "t", - "n", - "rc" - ] - } - } - }, - "h": { - "type": [ - "object", - "null" - ], - "description": "An object containing contextual data of the related http request.", - "properties": { - "url": { - "type": [ - "string", - "null" - ], - "description": "The raw url of the correlating http request." - }, - "sc": { - "type": [ - "integer", - "null" - ], - "description": "The status code of the http request." - }, - "mt": { - "type": [ - "string", - "null" - ], - "maxLength": 1024, - "description": "The method of the http request." - } - } - }, - "g": { - "$id": "docs/spec/tags.json", - "title": "Tags", - "type": ["object", "null"], - "description": "A flat mapping of user-defined tags with string, boolean or number values.", - "patternProperties": { - "^[^.*\"]*$": { - "type": ["string", "boolean", "number", "null"], - "maxLength": 1024 - } - }, - "additionalProperties": false - }, - "se": { - "description": "Service related information can be sent per event. Provided information will override the more generic information from metadata, non provided fields will be set according to the metadata information.", - "properties": { - "a": { - "description": "Name and version of the Elastic APM agent", - "type": [ - "object", - "null" - ], - "properties": { - "n": { - "description": "Name of the Elastic APM agent, e.g. \"Python\"", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "ve": { - "description": "Version of the Elastic APM agent, e.g.\"1.0.0\"", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "n": { - "description": "Immutable name of the service emitting this event", - "type": [ - "string", - "null" - ], - "pattern": "^[a-zA-Z0-9 _-]+$", - "maxLength": 1024 - } - } - } - } - }, - "d": { - "type": "number", - "description": "Duration of the span in milliseconds", - "minimum": 0 - }, - "n": { - "type": "string", - "description": "Generic designation of a span in the scope of a transaction", - "maxLength": 1024 - }, - "st": { - "type": [ - "array", - "null" - ], - "description": "List of stack frames with variable attributes (eg: lineno, filename, etc)", - "items": { - "$id": "docs/spec/rum_v3_stacktrace_frame.json", - "title": "Stacktrace", - "type": "object", - "description": "A stacktrace frame, contains various bits (most optional) describing the context of the frame", - "properties": { - "ap": { - "description": "The absolute path of the file involved in the stack frame", - "type": [ - "string", - "null" - ] - }, - "co": { - "description": "Column number", - "type": [ - "integer", - "null" - ] - }, - "cli": { - "description": "The line of code part of the stack frame", - "type": [ - "string", - "null" - ] - }, - "f": { - "description": "The relative filename of the code involved in the stack frame, used e.g. to do error checksumming", - "type": [ - "string", - "null" - ] - }, - "cn": { - "description": "The classname of the code involved in the stack frame", - "type": [ - "string", - "null" - ] - }, - "fn": { - "description": "The function involved in the stack frame", - "type": [ - "string", - "null" - ] - }, - "li": { - "description": "The line number of code part of the stack frame, used e.g. to do error checksumming", - "type": [ - "integer", - "null" - ] - }, - "mo": { - "description": "The module to which frame belongs to", - "type": [ - "string", - "null" - ] - }, - "poc": { - "description": "The lines of code after the stack frame", - "type": [ - "array", - "null" - ], - "minItems": 0, - "items": { - "type": "string" - } - }, - "prc": { - "description": "The lines of code before the stack frame", - "type": [ - "array", - "null" - ], - "minItems": 0, - "items": { - "type": "string" - } - } - }, - "required": [ - "f" - ] - }, - "minItems": 0 - }, - "sy": { - "type": [ - "boolean", - "null" - ], - "description": "Indicates whether the span was executed synchronously or asynchronously." - } - }, - "required": [ - "d", - "n", - "t", - "id" - ] - }, - { - "required": [ - "s" - ], - "properties": { - "s": { - "type": "number" - } - } - } - ] - }, - "me": { - "type": ["array", "null"], - "$id": "docs/spec/metricsets/rum_v3_metricset.json", - "description": "Data captured by an agent representing an event occurring in a monitored service", - "properties": { - "y": { - "type": ["object", "null"], - "description": "span", - "properties": { - "t": { - "type": "string", - "description": "type", - "maxLength": 1024 - }, - "su": { - "type": ["string", "null"], - "description": "subtype", - "maxLength": 1024 - } - } - }, - "sa": { - "type": "object", - "description": "Sampled application metrics collected from the agent.", - "properties": { - "xdc": { - "description": "transaction.duration.count", - "$schema": "http://json-schema.org/draft-04/schema#", - "$id": "docs/spec/metricsets/rum_v3_sample.json", - "type": ["object", "null"], - "description": "A single metric sample.", - "properties": { - "v": {"type": "number"} - }, - "required": ["v"] - }, - "xds": { - "description": "transaction.duration.sum.us", - "$schema": "http://json-schema.org/draft-04/schema#", - "$id": "docs/spec/metricsets/rum_v3_sample.json", - "type": ["object", "null"], - "description": "A single metric sample.", - "properties": { - "v": {"type": "number"} - }, - "required": ["v"] - }, - "xbc": { - "description": "transaction.breakdown.count", - "$schema": "http://json-schema.org/draft-04/schema#", - "$id": "docs/spec/metricsets/rum_v3_sample.json", - "type": ["object", "null"], - "description": "A single metric sample.", - "properties": { - "v": {"type": "number"} - }, - "required": ["v"] - }, - "ysc": { - "description": "span.self_time.count", - "$schema": "http://json-schema.org/draft-04/schema#", - "$id": "docs/spec/metricsets/rum_v3_sample.json", - "type": ["object", "null"], - "description": "A single metric sample.", - "properties": { - "v": {"type": "number"} - }, - "required": ["v"] - }, - "yss": { - "description": "span.self_time.sum.us", - "$schema": "http://json-schema.org/draft-04/schema#", - "$id": "docs/spec/metricsets/rum_v3_sample.json", - "type": ["object", "null"], - "description": "A single metric sample.", - "properties": { - "v": {"type": "number"} - }, - "required": ["v"] - } - } - }, - "g": { - "$id": "docs/spec/tags.json", - "title": "Tags", - "type": ["object", "null"], - "description": "A flat mapping of user-defined tags with string, boolean or number values.", - "patternProperties": { - "^[^.*\"]*$": { - "type": ["string", "boolean", "number", "null"], - "maxLength": 1024 - } - }, - "additionalProperties": false - } - }, - "required": ["sa"] - }, - "sr": { - "description": "Sampling rate", - "type": ["number", "null"] - }, - "yc": { - "type": "object", - "properties": { - "sd": { - "type": "integer", - "description": "Number of correlated spans that are recorded." - }, - "dd": { - "type": [ - "integer", - "null" - ], - "description": "Number of spans that have been dd by the a recording the x." - } - }, - "required": [ - "sd" - ] - }, - "c": { - "$id": "docs/spec/rum_v3_context.json", - "title": "Context", - "description": "Any arbitrary contextual information regarding the event, captured by the agent, optionally provided by the user", - "type": [ - "object", - "null" - ], - "properties": { - "cu": { - "description": "An arbitrary mapping of additional metadata to store with the event.", - "type": [ - "object", - "null" - ], - "patternProperties": { - "^[^.*\"]*$": {} - }, - "additionalProperties": false - }, - "r": { - "type": [ - "object", - "null" - ], - "allOf": [ - { - "properties": { - "sc": { - "type": [ - "integer", - "null" - ], - "description": "The status code of the http request." - }, - "ts": { - "type": [ - "number", - "null" - ], - "description": "Total size of the payload." - }, - "ebs": { - "type": [ - "number", - "null" - ], - "description": "The encoded size of the payload." - }, - "dbs": { - "type": [ - "number", - "null" - ], - "description": "The decoded size of the payload." - }, - "he": { - "type": [ - "object", - "null" - ], - "patternProperties": { - "[.*]*$": { - "type": [ - "string", - "array", - "null" - ], - "items": { - "type": [ - "string" - ] - } - } - } - } - } - } - ] - }, - "q": { - "properties": { - "en": { - "description": "The env variable is a compounded of environment information passed from the webserver.", - "type": [ - "object", - "null" - ], - "properties": {} - }, - "he": { - "description": "Should include any headers sent by the requester. Cookies will be taken by headers if supplied.", - "type": [ - "object", - "null" - ], - "patternProperties": { - "[.*]*$": { - "type": [ - "string", - "array", - "null" - ], - "items": { - "type": [ - "string" - ] - } - } - } - }, - "hve": { - "description": "HTTP version.", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "mt": { - "description": "HTTP method.", - "type": "string", - "maxLength": 1024 - } - }, - "required": [ - "mt" - ] - }, - "g": { - "$id": "docs/spec/tags.json", - "title": "Tags", - "type": ["object", "null"], - "description": "A flat mapping of user-defined tags with string, boolean or number values.", - "patternProperties": { - "^[^.*\"]*$": { - "type": ["string", "boolean", "number", "null"], - "maxLength": 1024 - } - }, - "additionalProperties": false - }, - "u": { - "$id": "docs/spec/rum_v3_user.json", - "title": "User", - "type": [ - "object", - "null" - ], - "properties": { - "id": { - "description": "Identifier of the logged in user, e.g. the primary key of the user", - "type": [ - "string", - "integer", - "null" - ], - "maxLength": 1024 - }, - "em": { - "description": "Email of the logged in user", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "un": { - "description": "The username of the logged in user", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "p": { - "description": "", - "type": [ - "object", - "null" - ], - "properties": { - "rf": { - "description": "RUM specific field that stores the URL of the page that 'linked' to the current page.", - "type": [ - "string", - "null" - ] - }, - "url": { - "description": "RUM specific field that stores the URL of the current page", - "type": [ - "string", - "null" - ] - } - } - }, - "se": { - "description": "Service related information can be sent per event. Provided information will override the more generic information from metadata, non provided fields will be set according to the metadata information.", - "$id": "docs/spec/rum_v3_service.json", - "title": "Service", - "type": [ - "object", - "null" - ], - "properties": { - "a": { - "description": "Name and version of the Elastic APM agent", - "type": [ - "object", - "null" - ], - "properties": { - "n": { - "description": "Name of the Elastic APM agent, e.g. \"Python\"", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "ve": { - "description": "Version of the Elastic APM agent, e.g.\"1.0.0\"", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "fw": { - "description": "Name and version of the web framework used", - "type": [ - "object", - "null" - ], - "properties": { - "n": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "ve": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "la": { - "description": "Name and version of the programming language used", - "type": [ - "object", - "null" - ], - "properties": { - "n": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "ve": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "n": { - "description": "Immutable name of the service emitting this event", - "type": [ - "string", - "null" - ], - "pattern": "^[a-zA-Z0-9 _-]+$", - "maxLength": 1024 - }, - "en": { - "description": "Environment name of the service, e.g. \"production\" or \"staging\"", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "ru": { - "description": "Name and version of the language runtime running this service", - "type": [ - "object", - "null" - ], - "properties": { - "n": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - }, - "ve": { - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - }, - "ve": { - "description": "Version of the service emitting this event", - "type": [ - "string", - "null" - ], - "maxLength": 1024 - } - } - } - } - }, - "d": { - "type": "number", - "description": "How long the transaction took to complete, in ms with 3 decimal points", - "minimum": 0 - }, - "rt": { - "type": [ - "string", - "null" - ], - "description": "The result of the transaction. For HTTP-related transactions, this should be the status code formatted like 'HTTP 2xx'.", - "maxLength": 1024 - }, - "o": { - "$id": "docs/spec/outcome.json", - "title": "Outcome", - "type": ["string", "null"], - "enum": [null, "success", "failure", "unknown"], - "description": "The outcome of the transaction: success, failure, or unknown. This is similar to 'result', but has a limited set of permitted values describing the success or failure of the transaction from the service's perspective. This field can be used for calculating error rates.", - "description": "The outcome of the transaction: success, failure, or unknown. This is similar to 'result', but has a limited set of permitted values describing the success or failure of the transaction from the service's perspective. This field can be used for calculating error rates for incoming requests." - }, - "k": { - "type": [ - "object", - "null" - ], - "description": "A mark captures the timing of a significant event during the lifetime of a transaction. Marks are organized into groups and can be set by the user or the agent.", - "$id": "docs/spec/transactions/rum_v3_mark.json", - "type": ["object", "null"], - "description": "A mark captures the timing in milliseconds of a significant event during the lifetime of a transaction. Every mark is a simple key value pair, where the value has to be a number, and can be set by the user or the agent.", - "properties": { - "a": { - "type": ["object", "null"], - "description": "agent", - "properties": { - "dc": { - "type": ["number", "null"], - "description": "domComplete" - }, - "di": { - "type": ["number", "null"], - "description": "domInteractive" - }, - "ds": { - "type": ["number", "null"], - "description": "domContentLoadedEventStart" - }, - "de": { - "type": ["number", "null"], - "description": "domContentLoadedEventEnd" - }, - "fb": { - "type": ["number", "null"], - "description": "timeToFirstByte" - }, - "fp": { - "type": ["number", "null"], - "description": "firstContentfulPaint" - }, - "lp": { - "type": ["number", "null"], - "description": "largestContentfulPaint" - } - } - }, - "nt": { - "type": ["object", "null"], - "description": "navigation-timing", - "properties": { - "fs": { - "type": ["number", "null"], - "description": "fetchStart" - }, - "ls": { - "type": ["number", "null"], - "description": "domainLookupStart" - }, - "le": { - "type": ["number", "null"], - "description": "domainLookupEnd" - }, - "cs": { - "type": ["number", "null"], - "description": "connectStart" - }, - "ce": { - "type": ["number", "null"], - "description": "connectEnd" - }, - "qs": { - "type": ["number", "null"], - "description": "requestStart" - }, - "rs": { - "type": ["number", "null"], - "description": "responseStart" - }, - "re": { - "type": ["number", "null"], - "description": "responseEnd" - }, - "dl": { - "type": ["number", "null"], - "description": "domLoading" - }, - "di": { - "type": ["number", "null"], - "description": "domInteractive" - }, - "ds": { - "type": ["number", "null"], - "description": "domContentLoadedEventStart" - }, - "de": { - "type": ["number", "null"], - "description": "domContentLoadedEventEnd" - }, - "dc": { - "type": ["number", "null"], - "description": "domComplete" - }, - "es": { - "type": ["number", "null"], - "description": "loadEventStart" - }, - "ee": { - "type": ["number", "null"], - "description": "loadEventEnd" - } - } - } - } - }, - "sm": { - "type": [ - "boolean", - "null" - ], - "description": "Transactions that are 'sampled' will include all available information. Transactions that are not sampled will not have 'spans' or 'context'. Defaults to true." - }, - "exp": { - "$id": "docs/spec/rum_experience.json", - "title": "RUM Experience Metrics", - "description": "Metrics for measuring real user (browser) experience", - "type": ["object", "null"], - "properties": { - "cls": { - "type": ["number", "null"], - "description": "The Cumulative Layout Shift metric", - "minimum": 0 - }, - "tbt": { - "type": ["number", "null"], - "description": "The Total Blocking Time metric", - "minimum": 0 - }, - "fid": { - "type": ["number", "null"], - "description": "The First Input Delay metric", - "minimum": 0 - } - } - } - }, - "required": [ - "id", - "tid", - "yc", - "d", - "t" - ] - } - ] -} -` diff --git a/model/transaction/generated/schema/transaction.go b/model/transaction/generated/schema/transaction.go deleted file mode 100644 index dbb0969a86d..00000000000 --- a/model/transaction/generated/schema/transaction.go +++ /dev/null @@ -1,524 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package schema - -const ModelSchema = `{ - "$id": "docs/spec/transactions/transaction.json", - "type": "object", - "description": "An event corresponding to an incoming request or similar task occurring in a monitored service", - "allOf": [ - { "$id": "docs/spec/timestamp_epoch.json", - "title": "Timestamp Epoch", - "description": "Object with 'timestamp' property.", - "type": ["object"], - "properties": { - "timestamp": { - "description": "Recorded time of the event, UTC based and formatted as microseconds since Unix epoch", - "type": ["integer", "null"] - } - } }, - { "$id": "docs/spec/transaction_name.json", - "title": "Transaction Name", - "type": ["object"], - "properties": { - "name": { - "type": ["string","null"], - "description": "Generic designation of a transaction in the scope of a single service (eg: 'GET /users/:id')", - "maxLength": 1024 - } - } }, - { "$id": "docs/spec/transaction_type.json", - "title": "Transaction Type", - "type": ["object"], - "properties": { - "type": { - "type": "string", - "description": "Keyword of specific relevance in the service's domain (eg: 'request', 'backgroundjob', etc)", - "maxLength": 1024 - } - } }, - { - "properties": { - "id": { - "type": "string", - "description": "Hex encoded 64 random bits ID of the transaction.", - "maxLength": 1024 - }, - "trace_id": { - "description": "Hex encoded 128 random bits ID of the correlated trace.", - "type": "string", - "maxLength": 1024 - }, - "parent_id": { - "description": "Hex encoded 64 random bits ID of the parent transaction or span. Only root transactions of a trace do not have a parent_id, otherwise it needs to be set.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "sample_rate": { - "description": "Sampling rate", - "type": ["number", "null"] - }, - "span_count": { - "type": "object", - "properties": { - "started": { - "type": "integer", - "description": "Number of correlated spans that are recorded." - - }, - "dropped": { - "type": ["integer","null"], - "description": "Number of spans that have been dropped by the agent recording the transaction." - - } - }, - "required": ["started"] - }, - "context": { - "$id": "docs/spec/context.json", - "title": "Context", - "description": "Any arbitrary contextual information regarding the event, captured by the agent, optionally provided by the user", - "type": ["object", "null"], - "properties": { - "custom": { - "description": "An arbitrary mapping of additional metadata to store with the event.", - "type": ["object", "null"], - "patternProperties": { - "^[^.*\"]*$": {} - }, - "additionalProperties": false - }, - "response": { - "type": ["object", "null"], - "allOf": [ - { "$id": "docs/spec/http_response.json", - "title": "HTTP response object", - "description": "HTTP response object, used by error, span and transction documents", - "type": ["object", "null"], - "properties": { - "status_code": { - "type": ["integer", "null"], - "description": "The status code of the http request." - }, - "transfer_size": { - "type": ["number", "null"], - "description": "Total size of the payload." - }, - "encoded_body_size": { - "type": ["number", "null"], - "description": "The encoded size of the payload." - }, - "decoded_body_size": { - "type": ["number", "null"], - "description": "The decoded size of the payload." - }, - "headers": { - "type": ["object", "null"], - "patternProperties": { - "[.*]*$": { - "type": ["string", "array", "null"], - "items": { - "type": ["string"] - } - } - } - } - } }, - { - "properties": { - "finished": { - "description": "A boolean indicating whether the response was finished or not", - "type": [ - "boolean", - "null" - ] - }, - "headers_sent": { - "type": [ - "boolean", - "null" - ] - } - } - } - ] - }, - "request": { - "$id": "docs/spec/request.json", - "title": "Request", - "description": "If a log record was generated as a result of a http request, the http interface can be used to collect this information.", - "type": ["object", "null"], - "properties": { - "body": { - "description": "Data should only contain the request body (not the query string). It can either be a dictionary (for standard HTTP requests) or a raw request body.", - "type": ["object", "string", "null"] - }, - "env": { - "description": "The env variable is a compounded of environment information passed from the webserver.", - "type": ["object", "null"], - "properties": {} - }, - "headers": { - "description": "Should include any headers sent by the requester. Cookies will be taken by headers if supplied.", - "type": ["object", "null"], - "patternProperties": { - "[.*]*$": { - "type": ["string", "array", "null"], - "items": { - "type": ["string"] - } - } - } - }, - "http_version": { - "description": "HTTP version.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "method": { - "description": "HTTP method.", - "type": "string", - "maxLength": 1024 - }, - "socket": { - "type": ["object", "null"], - "properties": { - "encrypted": { - "description": "Indicates whether request was sent as SSL/HTTPS request.", - "type": ["boolean", "null"] - }, - "remote_address": { - "description": "The network address sending the request. Should be obtained through standard APIs and not parsed from any headers like 'Forwarded'.", - "type": ["string", "null"] - } - } - }, - "url": { - "description": "A complete Url, with scheme, host and path.", - "type": "object", - "properties": { - "raw": { - "type": ["string", "null"], - "description": "The raw, unparsed URL of the HTTP request line, e.g https://example.com:443/search?q=elasticsearch. This URL may be absolute or relative. For more details, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2.", - "maxLength": 1024 - }, - "protocol": { - "type": ["string", "null"], - "description": "The protocol of the request, e.g. 'https:'.", - "maxLength": 1024 - }, - "full": { - "type": ["string", "null"], - "description": "The full, possibly agent-assembled URL of the request, e.g https://example.com:443/search?q=elasticsearch#top.", - "maxLength": 1024 - }, - "hostname": { - "type": ["string", "null"], - "description": "The hostname of the request, e.g. 'example.com'.", - "maxLength": 1024 - }, - "port": { - "type": ["string", "integer","null"], - "description": "The port of the request, e.g. '443'", - "maxLength": 1024 - }, - "pathname": { - "type": ["string", "null"], - "description": "The path of the request, e.g. '/search'", - "maxLength": 1024 - }, - "search": { - "description": "The search describes the query string of the request. It is expected to have values delimited by ampersands.", - "type": ["string", "null"], - "maxLength": 1024 - }, - "hash": { - "type": ["string", "null"], - "description": "The hash of the request URL, e.g. 'top'", - "maxLength": 1024 - } - } - }, - "cookies": { - "description": "A parsed key-value object of cookies", - "type": ["object", "null"] - } - }, - "required": ["url", "method"] - }, - "tags": { - "$id": "docs/spec/tags.json", - "title": "Tags", - "type": ["object", "null"], - "description": "A flat mapping of user-defined tags with string, boolean or number values.", - "patternProperties": { - "^[^.*\"]*$": { - "type": ["string", "boolean", "number", "null"], - "maxLength": 1024 - } - }, - "additionalProperties": false - }, - "user": { - "description": "Describes the correlated user for this event. If user data are provided here, all user related information from metadata is ignored, otherwise the metadata's user information will be stored with the event.", - "$id": "docs/spec/user.json", - "title": "User", - "type": ["object", "null"], - "properties": { - "id": { - "description": "Identifier of the logged in user, e.g. the primary key of the user", - "type": ["string", "integer", "null"], - "maxLength": 1024 - }, - "email": { - "description": "Email of the logged in user", - "type": ["string", "null"], - "maxLength": 1024 - }, - "username": { - "description": "The username of the logged in user", - "type": ["string", "null"], - "maxLength": 1024 - } - } - }, - "page": { - "description": "", - "type": ["object", "null"], - "properties": { - "referer": { - "description": "RUM specific field that stores the URL of the page that 'linked' to the current page.", - "type": ["string", "null"] - }, - "url": { - "description": "RUM specific field that stores the URL of the current page", - "type": ["string", "null"] - } - } - }, - "service": { - "description": "Service related information can be sent per event. Provided information will override the more generic information from metadata, non provided fields will be set according to the metadata information.", - "$id": "docs/spec/service.json", - "title": "Service", - "type": ["object", "null"], - "properties": { - "agent": { - "description": "Name and version of the Elastic APM agent", - "type": ["object", "null"], - "properties": { - "name": { - "description": "Name of the Elastic APM agent, e.g. \"Python\"", - "type": ["string", "null"], - "maxLength": 1024 - }, - "version": { - "description": "Version of the Elastic APM agent, e.g.\"1.0.0\"", - "type": ["string", "null"], - "maxLength": 1024 - }, - "ephemeral_id": { - "description": "Free format ID used for metrics correlation by some agents", - "type": ["string", "null"], - "maxLength": 1024 - } - } - }, - "framework": { - "description": "Name and version of the web framework used", - "type": ["object", "null"], - "properties": { - "name": { - "type": ["string", "null"], - "maxLength": 1024 - }, - "version": { - "type": ["string", "null"], - "maxLength": 1024 - } - } - }, - "language": { - "description": "Name and version of the programming language used", - "type": ["object", "null"], - "properties": { - "name": { - "type": ["string", "null"], - "maxLength": 1024 - }, - "version": { - "type": ["string", "null"], - "maxLength": 1024 - } - } - }, - "name": { - "description": "Immutable name of the service emitting this event", - "type": ["string", "null"], - "pattern": "^[a-zA-Z0-9 _-]+$", - "maxLength": 1024 - }, - "environment": { - "description": "Environment name of the service, e.g. \"production\" or \"staging\"", - "type": ["string", "null"], - "maxLength": 1024 - }, - "runtime": { - "description": "Name and version of the language runtime running this service", - "type": ["object", "null"], - "properties": { - "name": { - "type": ["string", "null"], - "maxLength": 1024 - }, - "version": { - "type": ["string", "null"], - "maxLength": 1024 - } - } - }, - "version": { - "description": "Version of the service emitting this event", - "type": ["string", "null"], - "maxLength": 1024 - }, - "node": { - "description": "Unique meaningful name of the service node.", - "type": ["object", "null"], - "properties": { - "configured_name": { - "type": ["string", "null"], - "maxLength": 1024 - } - } - } - } - }, - "message": { - "$id": "docs/spec/message.json", - "title": "Message", - "description": "Details related to message receiving and publishing if the captured event integrates with a messaging system", - "type": ["object", "null"], - "properties": { - "queue": { - "type": ["object", "null"], - "properties": { - "name": { - "description": "Name of the message queue where the message is received.", - "type": ["string","null"], - "maxLength": 1024 - } - } - }, - "age": { - "type": ["object", "null"], - "properties": { - "ms": { - "description": "The age of the message in milliseconds. If the instrumented messaging framework provides a timestamp for the message, agents may use it. Otherwise, the sending agent can add a timestamp in milliseconds since the Unix epoch to the message's metadata to be retrieved by the receiving agent. If a timestamp is not available, agents should omit this field.", - "type": ["integer", "null"] - } - } - }, - "body": { - "description": "messsage body, similar to an http request body", - "type": ["string", "null"] - }, - "headers": { - "description": "messsage headers, similar to http request headers", - "type": ["object", "null"], - "patternProperties": { - "[.*]*$": { - "type": ["string", "array", "null"], - "items": { - "type": ["string"] - } - } - } - } - } - } - } - }, - "duration": { - "type": "number", - "description": "How long the transaction took to complete, in ms with 3 decimal points", - "minimum": 0 - }, - "result": { - "type": ["string", "null"], - "description": "The result of the transaction. For HTTP-related transactions, this should be the status code formatted like 'HTTP 2xx'.", - "maxLength": 1024 - }, - "outcome": { - "$id": "docs/spec/outcome.json", - "title": "Outcome", - "type": ["string", "null"], - "enum": [null, "success", "failure", "unknown"], - "description": "The outcome of the transaction: success, failure, or unknown. This is similar to 'result', but has a limited set of permitted values describing the success or failure of the transaction from the service's perspective. This field can be used for calculating error rates.", - "description": "The outcome of the transaction: success, failure, or unknown. This is similar to 'result', but has a limited set of permitted values describing the success or failure of the transaction from the service's perspective. This field can be used for calculating error rates for incoming requests." - }, - "marks": { - "type": ["object", "null"], - "description": "A mark captures the timing of a significant event during the lifetime of a transaction. Marks are organized into groups and can be set by the user or the agent.", - "patternProperties": { - "^[^.*\"]*$": { - "$id": "docs/spec/transactions/mark.json", - "type": ["object", "null"], - "description": "A mark captures the timing in milliseconds of a significant event during the lifetime of a transaction. Every mark is a simple key value pair, where the value has to be a number, and can be set by the user or the agent.", - "patternProperties": { - "^[^.*\"]*$": { - "type": ["number", "null"] - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - }, - "sampled": { - "type": ["boolean", "null"], - "description": "Transactions that are 'sampled' will include all available information. Transactions that are not sampled will not have 'spans' or 'context'. Defaults to true." - }, - "experience": { - "$id": "docs/spec/rum_experience.json", - "title": "RUM Experience Metrics", - "description": "Metrics for measuring real user (browser) experience", - "type": ["object", "null"], - "properties": { - "cls": { - "type": ["number", "null"], - "description": "The Cumulative Layout Shift metric", - "minimum": 0 - }, - "tbt": { - "type": ["number", "null"], - "description": "The Total Blocking Time metric", - "minimum": 0 - }, - "fid": { - "type": ["number", "null"], - "description": "The First Input Delay metric", - "minimum": 0 - } - } - } - }, - "required": ["id", "trace_id", "span_count", "duration", "type"] - } - ] -} -` diff --git a/model/transaction_test.go b/model/transaction_test.go index 294b4fb236f..07744ec195c 100644 --- a/model/transaction_test.go +++ b/model/transaction_test.go @@ -20,26 +20,26 @@ package model import ( "context" "fmt" - "net" - "net/http" "testing" "time" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/libbeat/common" - - "github.com/elastic/apm-server/tests" - "github.com/elastic/apm-server/transform" ) +func TestTransactionTransformEmpty(t *testing.T) { + event := APMEvent{Transaction: &Transaction{}} + beatEvent := event.BeatEvent(context.Background()) + assert.Empty(t, beatEvent.Fields) +} + func TestTransactionTransform(t *testing.T) { id := "123" result := "tx result" - sampled := false dropped, startedSpans := 5, 14 name := "mytransaction" + duration := 65980 * time.Microsecond tests := []struct { Transaction Transaction @@ -49,24 +49,19 @@ func TestTransactionTransform(t *testing.T) { { Transaction: Transaction{}, Output: common.MapStr{ - "id": "", - "type": "", - "duration": common.MapStr{"us": 0}, - "sampled": true, + "duration": common.MapStr{"us": 65980}, }, - Msg: "Empty Event", + Msg: "Empty Transaction", }, { Transaction: Transaction{ - ID: id, - Type: "tx", - Duration: 65.98, + ID: id, + Type: "tx", }, Output: common.MapStr{ "id": id, "type": "tx", "duration": common.MapStr{"us": 65980}, - "sampled": true, }, Msg: "SpanCount empty", }, @@ -74,7 +69,6 @@ func TestTransactionTransform(t *testing.T) { Transaction: Transaction{ ID: id, Type: "tx", - Duration: 65.98, SpanCount: SpanCount{Started: &startedSpans}, }, Output: common.MapStr{ @@ -82,7 +76,6 @@ func TestTransactionTransform(t *testing.T) { "type": "tx", "duration": common.MapStr{"us": 65980}, "span_count": common.MapStr{"started": 14}, - "sampled": true, }, Msg: "SpanCount only contains `started`", }, @@ -90,7 +83,6 @@ func TestTransactionTransform(t *testing.T) { Transaction: Transaction{ ID: id, Type: "tx", - Duration: 65.98, SpanCount: SpanCount{Dropped: &dropped}, }, Output: common.MapStr{ @@ -98,7 +90,6 @@ func TestTransactionTransform(t *testing.T) { "type": "tx", "duration": common.MapStr{"us": 65980}, "span_count": common.MapStr{"dropped": 5}, - "sampled": true, }, Msg: "SpanCount only contains `dropped`", }, @@ -108,10 +99,9 @@ func TestTransactionTransform(t *testing.T) { Name: name, Type: "tx", Result: result, - Timestamp: time.Now(), - Duration: 65.98, - Sampled: &sampled, + Sampled: true, SpanCount: SpanCount{Started: &startedSpans, Dropped: &dropped}, + Root: true, }, Output: common.MapStr{ "id": id, @@ -120,70 +110,59 @@ func TestTransactionTransform(t *testing.T) { "result": "tx result", "duration": common.MapStr{"us": 65980}, "span_count": common.MapStr{"started": 14, "dropped": 5}, - "sampled": false, + "sampled": true, + "root": true, }, Msg: "Full Event", }, } for idx, test := range tests { - output := test.Transaction.Transform(context.Background(), &transform.Config{}) - assert.Equal(t, test.Output, output[0].Fields["transaction"], fmt.Sprintf("Failed at idx %v; %s", idx, test.Msg)) + event := APMEvent{ + Processor: TransactionProcessor, + Transaction: &test.Transaction, + Event: Event{Duration: duration}, + } + beatEvent := event.BeatEvent(context.Background()) + assert.Equal(t, test.Output, beatEvent.Fields["transaction"], fmt.Sprintf("Failed at idx %v; %s", idx, test.Msg)) } } -func TestTransactionTransformOutcome(t *testing.T) { - tx := Transaction{Outcome: "success"} - events := tx.Transform(context.Background(), &transform.Config{}) - require.Len(t, events, 1) - assert.Equal(t, common.MapStr{"outcome": "success"}, events[0].Fields["event"]) -} - func TestEventsTransformWithMetadata(t *testing.T) { hostname := "a.b.c" architecture := "darwin" platform := "x64" - timestamp := time.Date(2019, 1, 3, 15, 17, 4, 908.596*1e6, time.FixedZone("+0100", 3600)) - timestampUs := timestamp.UnixNano() / 1000 - id, name, ip, userAgent := "123", "jane", "63.23.123.4", "node-js-2.3" - url, referer := "https://localhost", "http://localhost" + id, name, userAgent := "123", "jane", "node-js-2.3" + url := "https://localhost" serviceName, serviceNodeName, serviceVersion := "myservice", "service-123", "2.1.3" - eventMetadata := Metadata{ + + txWithContext := APMEvent{ + Processor: TransactionProcessor, Service: Service{ Name: serviceName, Version: serviceVersion, Node: ServiceNode{Name: serviceNodeName}, }, - System: System{ - ConfiguredHostname: name, - DetectedHostname: hostname, - Architecture: architecture, - Platform: platform, + Host: Host{ + Name: name, + Hostname: hostname, + Architecture: architecture, + OS: OS{Platform: platform}, }, User: User{ID: id, Name: name}, UserAgent: UserAgent{Original: userAgent}, - Client: Client{IP: net.ParseIP(ip)}, - Labels: common.MapStr{"a": true}, + URL: URL{Original: url}, + Transaction: &Transaction{ + Custom: common.MapStr{"foo.bar": "baz"}, + Message: &Message{QueueName: "routeUser"}, + Sampled: true, + }, } - request := Req{Method: "post", Socket: &Socket{}, Headers: http.Header{}} - response := Resp{Finished: new(bool), MinimalResp: MinimalResp{Headers: http.Header{"content-type": []string{"text/html"}}}} - txWithContext := Transaction{ - Metadata: eventMetadata, - Timestamp: timestamp, - Labels: &Labels{"a": "b"}, - Page: &Page{URL: &URL{Original: &url}, Referer: &referer}, - HTTP: &Http{Request: &request, Response: &response}, - URL: &URL{Original: &url}, - Custom: &Custom{"foo": "bar"}, - Message: &Message{QueueName: tests.StringPtr("routeUser")}, - } - events := txWithContext.Transform(context.Background(), &transform.Config{}) - require.Len(t, events, 1) - assert.Equal(t, events[0].Fields, common.MapStr{ + event := txWithContext.BeatEvent(context.Background()) + assert.Equal(t, common.MapStr{ + "processor": common.MapStr{"name": "transaction", "event": "transaction"}, "user": common.MapStr{"id": "123", "name": "jane"}, - "client": common.MapStr{"ip": ip}, - "source": common.MapStr{"ip": ip}, "user_agent": common.MapStr{"original": userAgent}, "host": common.MapStr{ "architecture": "darwin", @@ -193,40 +172,26 @@ func TestEventsTransformWithMetadata(t *testing.T) { "platform": "x64", }, }, - "processor": common.MapStr{ - "event": "transaction", - "name": "transaction", - }, "service": common.MapStr{ "name": serviceName, "version": serviceVersion, "node": common.MapStr{"name": serviceNodeName}, }, - "timestamp": common.MapStr{"us": timestampUs}, "transaction": common.MapStr{ "duration": common.MapStr{"us": 0}, - "id": "", - "type": "", "sampled": true, - "page": common.MapStr{"url": url, "referer": referer}, "custom": common.MapStr{ - "foo": "bar", + "foo_bar": "baz", }, "message": common.MapStr{"queue": common.MapStr{"name": "routeUser"}}, }, - "event": common.MapStr{"outcome": ""}, - "labels": common.MapStr{"a": "b"}, - "url": common.MapStr{"original": url}, - "http": common.MapStr{ - "request": common.MapStr{"method": "post", "referrer": referer}, - "response": common.MapStr{"finished": false, "headers": common.MapStr{"content-type": []string{"text/html"}}}}, - }) + "url": common.MapStr{ + "original": url, + }, + }, event.Fields) } -func TestTransactionTransformPage(t *testing.T) { - id := "123" - urlExample := "http://example.com/path" - +func TestTransactionTransformMarks(t *testing.T) { tests := []struct { Transaction Transaction Output common.MapStr @@ -234,49 +199,25 @@ func TestTransactionTransformPage(t *testing.T) { }{ { Transaction: Transaction{ - ID: id, - Type: "tx", - Duration: 65.98, - Page: &Page{ - URL: ParseURL(urlExample, ""), - Referer: nil, + Marks: TransactionMarks{ + "a.b": TransactionMark{ + "c.d": 123, + }, }, }, Output: common.MapStr{ - "domain": "example.com", - "full": "http://example.com/path", - "original": "http://example.com/path", - "path": "/path", - "scheme": "http", - }, - Msg: "With page URL", - }, - { - Transaction: Transaction{ - ID: id, - Type: "tx", - Timestamp: time.Now(), - Duration: 65.98, - URL: ParseURL("https://localhost:8200/", ""), - Page: &Page{ - URL: ParseURL(urlExample, ""), - Referer: nil, + "a_b": common.MapStr{ + "c_d": common.Float(123), }, }, - Output: common.MapStr{ - "domain": "localhost", - "full": "https://localhost:8200/", - "original": "https://localhost:8200/", - "path": "/", - "port": 8200, - "scheme": "https", - }, - Msg: "With Page URL and Request URL", + Msg: "Unsanitized transaction mark names", }, } for idx, test := range tests { - output := test.Transaction.Transform(context.Background(), &transform.Config{}) - assert.Equal(t, test.Output, output[0].Fields["url"], fmt.Sprintf("Failed at idx %v; %s", idx, test.Msg)) + event := APMEvent{Transaction: &test.Transaction} + beatEvent := event.BeatEvent(context.Background()) + marks, _ := beatEvent.Fields.GetValue("transaction.marks") + assert.Equal(t, test.Output, marks, fmt.Sprintf("Failed at idx %v; %s", idx, test.Msg)) } } diff --git a/model/url.go b/model/url.go new file mode 100644 index 00000000000..4ec4c1f8c08 --- /dev/null +++ b/model/url.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package model + +import ( + "net/url" + "strconv" + + "github.com/elastic/beats/v7/libbeat/common" +) + +// URL describes an URL and its components +type URL struct { + Original string + Scheme string + Full string + Domain string + Port int + Path string + Query string + Fragment string +} + +func ParseURL(original, defaultHostname, defaultScheme string) URL { + original = truncate(original) + url, err := url.Parse(original) + if err != nil { + return URL{Original: original} + } + if url.Scheme == "" { + url.Scheme = defaultScheme + if url.Scheme == "" { + url.Scheme = "http" + } + } + if url.Host == "" { + url.Host = defaultHostname + } + out := URL{ + Original: original, + Scheme: url.Scheme, + Full: truncate(url.String()), + Domain: truncate(url.Hostname()), + Path: truncate(url.Path), + Query: truncate(url.RawQuery), + Fragment: url.Fragment, + } + if port := url.Port(); port != "" { + if intv, err := strconv.Atoi(port); err == nil { + out.Port = intv + } + } + return out +} + +// truncate returns s truncated at n runes, and the number of runes in the resulting string (<= n). +func truncate(s string) string { + var j int + for i := range s { + if j == 1024 { + return s[:i] + } + j++ + } + return s +} + +// fields returns common.MapStr holding transformed data for attribute url. +func (url *URL) fields() common.MapStr { + var fields mapStr + fields.maybeSetString("full", url.Full) + fields.maybeSetString("fragment", url.Fragment) + fields.maybeSetString("domain", url.Domain) + fields.maybeSetString("path", url.Path) + if url.Port > 0 { + fields.set("port", url.Port) + } + fields.maybeSetString("original", url.Original) + fields.maybeSetString("scheme", url.Scheme) + fields.maybeSetString("query", url.Query) + return common.MapStr(fields) +} diff --git a/model/user.go b/model/user.go index a8ed55c692e..525934df710 100644 --- a/model/user.go +++ b/model/user.go @@ -22,16 +22,15 @@ import ( ) type User struct { - ID string - Email string - Name string + Domain string + ID string + Email string + Name string } -func (u *User) Fields() common.MapStr { - if u == nil { - return nil - } +func (u *User) fields() common.MapStr { var user mapStr + user.maybeSetString("domain", u.Domain) user.maybeSetString("id", u.ID) user.maybeSetString("email", u.Email) user.maybeSetString("name", u.Name) diff --git a/model/user_test.go b/model/user_test.go index 6102900c49a..eb037ffd3b3 100644 --- a/model/user_test.go +++ b/model/user_test.go @@ -26,6 +26,7 @@ import ( ) func TestUserFields(t *testing.T) { + domain := "ldap://abc" id := "1234" email := "test@mail.co" name := "user123" @@ -40,20 +41,22 @@ func TestUserFields(t *testing.T) { }, { User: User{ - ID: id, - Email: email, - Name: name, + Domain: domain, + ID: id, + Email: email, + Name: name, }, Output: common.MapStr{ - "id": "1234", - "email": "test@mail.co", - "name": "user123", + "domain": "ldap://abc", + "id": "1234", + "email": "test@mail.co", + "name": "user123", }, }, } for _, test := range tests { - output := test.User.Fields() + output := test.User.fields() assert.Equal(t, test.Output, output) } } diff --git a/processor/asset/processor.go b/processor/asset/processor.go deleted file mode 100644 index be028cc58a0..00000000000 --- a/processor/asset/processor.go +++ /dev/null @@ -1,28 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package asset - -import ( - "github.com/elastic/apm-server/transform" -) - -type Processor interface { - Validate(map[string]interface{}) error - Decode(map[string]interface{}) ([]transform.Transformable, error) - Name() string -} diff --git a/processor/asset/sourcemap/package_tests/TestProcessSourcemapFull.approved.json b/processor/asset/sourcemap/package_tests/TestProcessSourcemapFull.approved.json deleted file mode 100644 index b67545d760a..00000000000 --- a/processor/asset/sourcemap/package_tests/TestProcessSourcemapFull.approved.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "events": [ - { - "@timestamp": "dynamic", - "processor": { - "event": "sourcemap", - "name": "sourcemap" - }, - "sourcemap": { - "bundle_filepath": "js/bundle.js", - "service": { - "name": "service", - "version": "1" - }, - "sourcemap": "{\"file\":\"bundle.js\",\"mappings\":\"CAAS,SAAUA,GCInB,QAAAC,GAAAC,GAGA,GAAAC,EAAAD,GACA,MAAAC,GAAAD,GAAAE,OAGA,IAAAC,GAAAF,EAAAD,IACAE,WACAE,GAAAJ,EACAK,QAAA,EAUA,OANAP,GAAAE,GAAAM,KAAAH,EAAAD,QAAAC,IAAAD,QAAAH,GAGAI,EAAAE,QAAA,EAGAF,EAAAD,QAvBA,GAAAD,KAqCA,OATAF,GAAAQ,EAAAT,EAGAC,EAAAS,EAAAP,EAGAF,EAAAU,EAAA,GAGAV,EAAA,KDMM,SAASI,EAAQD,EAASH,GE3ChCA,EAAA,GAEAA,EAAA,GAEAW,OFmDM,SAASP,EAAQD,EAASH,GGxDhCI,EAAAD,QAAAH,EAAAU,EAAA,cH8DM,SAASN,EAAQD,GI9DvB,QAAAQ,KACAC,QAAAC,IAAAC,QAGAH\",\"names\":[\"modules\",\"__webpack_require__\",\"moduleId\",\"installedModules\",\"exports\",\"module\",\"id\",\"loaded\",\"call\",\"m\",\"c\",\"p\",\"foo\",\"console\",\"log\",\"foobar\"],\"sourceRoot\":\"\",\"sources\":[\"webpack:///bundle.js\",\"webpack:///webpack/bootstrap 6002740481c9666b0d38\",\"webpack:///./scripts/index.js\",\"webpack:///./index.html\",\"webpack:///./scripts/app.js\"],\"sourcesContent\":[\"/******/ (function(modules) { // webpackBootstrap\\n/******/ \\t// The module cache\\n/******/ \\tvar installedModules = {};\\n/******/\\n/******/ \\t// The require function\\n/******/ \\tfunction __webpack_require__(moduleId) {\\n/******/\\n/******/ \\t\\t// Check if module is in cache\\n/******/ \\t\\tif(installedModules[moduleId])\\n/******/ \\t\\t\\treturn installedModules[moduleId].exports;\\n/******/\\n/******/ \\t\\t// Create a new module (and put it into the cache)\\n/******/ \\t\\tvar module = installedModules[moduleId] = {\\n/******/ \\t\\t\\texports: {},\\n/******/ \\t\\t\\tid: moduleId,\\n/******/ \\t\\t\\tloaded: false\\n/******/ \\t\\t};\\n/******/\\n/******/ \\t\\t// Execute the module function\\n/******/ \\t\\tmodules[moduleId].call(module.exports, module, module.exports, __webpack_require__);\\n/******/\\n/******/ \\t\\t// Flag the module as loaded\\n/******/ \\t\\tmodule.loaded = true;\\n/******/\\n/******/ \\t\\t// Return the exports of the module\\n/******/ \\t\\treturn module.exports;\\n/******/ \\t}\\n/******/\\n/******/\\n/******/ \\t// expose the modules object (__webpack_modules__)\\n/******/ \\t__webpack_require__.m = modules;\\n/******/\\n/******/ \\t// expose the module cache\\n/******/ \\t__webpack_require__.c = installedModules;\\n/******/\\n/******/ \\t// __webpack_public_path__\\n/******/ \\t__webpack_require__.p = \\\"\\\";\\n/******/\\n/******/ \\t// Load entry module and return exports\\n/******/ \\treturn __webpack_require__(0);\\n/******/ })\\n/************************************************************************/\\n/******/ ([\\n/* 0 */\\n/***/ function(module, exports, __webpack_require__) {\\n\\n\\t// Webpack\\n\\t__webpack_require__(1)\\n\\t\\n\\t__webpack_require__(2)\\n\\t\\n\\tfoo()\\n\\n\\n/***/ },\\n/* 1 */\\n/***/ function(module, exports, __webpack_require__) {\\n\\n\\tmodule.exports = __webpack_require__.p + \\\"index.html\\\"\\n\\n/***/ },\\n/* 2 */\\n/***/ function(module, exports) {\\n\\n\\tfunction foo() {\\n\\t console.log(foobar)\\n\\t}\\n\\t\\n\\tfoo()\\n\\n\\n/***/ }\\n/******/ ]);\\n\\n\\n/** WEBPACK FOOTER **\\n ** bundle.js\\n **/\",\" \\t// The module cache\\n \\tvar installedModules = {};\\n\\n \\t// The require function\\n \\tfunction __webpack_require__(moduleId) {\\n\\n \\t\\t// Check if module is in cache\\n \\t\\tif(installedModules[moduleId])\\n \\t\\t\\treturn installedModules[moduleId].exports;\\n\\n \\t\\t// Create a new module (and put it into the cache)\\n \\t\\tvar module = installedModules[moduleId] = {\\n \\t\\t\\texports: {},\\n \\t\\t\\tid: moduleId,\\n \\t\\t\\tloaded: false\\n \\t\\t};\\n\\n \\t\\t// Execute the module function\\n \\t\\tmodules[moduleId].call(module.exports, module, module.exports, __webpack_require__);\\n\\n \\t\\t// Flag the module as loaded\\n \\t\\tmodule.loaded = true;\\n\\n \\t\\t// Return the exports of the module\\n \\t\\treturn module.exports;\\n \\t}\\n\\n\\n \\t// expose the modules object (__webpack_modules__)\\n \\t__webpack_require__.m = modules;\\n\\n \\t// expose the module cache\\n \\t__webpack_require__.c = installedModules;\\n\\n \\t// __webpack_public_path__\\n \\t__webpack_require__.p = \\\"\\\";\\n\\n \\t// Load entry module and return exports\\n \\treturn __webpack_require__(0);\\n\\n\\n\\n/** WEBPACK FOOTER **\\n ** webpack/bootstrap 6002740481c9666b0d38\\n **/\",\"// Webpack\\nrequire('../index.html')\\n\\nrequire('./app')\\n\\nfoo()\\n\\n\\n\\n/*****************\\n ** WEBPACK FOOTER\\n ** ./scripts/index.js\\n ** module id = 0\\n ** module chunks = 0\\n **/\",\"module.exports = __webpack_public_path__ + \\\"index.html\\\"\\n\\n\\n/*****************\\n ** WEBPACK FOOTER\\n ** ./index.html\\n ** module id = 1\\n ** module chunks = 0\\n **/\",\"function foo() {\\n console.log(foobar)\\n}\\n\\nfoo()\\n\\n\\n\\n/*****************\\n ** WEBPACK FOOTER\\n ** ./scripts/app.js\\n ** module id = 2\\n ** module chunks = 0\\n **/\"],\"version\":3}" - } - } - ] -} diff --git a/processor/asset/sourcemap/package_tests/TestProcessSourcemapMinimalPayload.approved.json b/processor/asset/sourcemap/package_tests/TestProcessSourcemapMinimalPayload.approved.json deleted file mode 100644 index 0708ee164cd..00000000000 --- a/processor/asset/sourcemap/package_tests/TestProcessSourcemapMinimalPayload.approved.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "events": [ - { - "@timestamp": "dynamic", - "processor": { - "event": "sourcemap", - "name": "sourcemap" - }, - "sourcemap": { - "bundle_filepath": "js/bundle.js", - "service": { - "name": "service", - "version": "1" - }, - "sourcemap": "{\"mappings\":\"CAAS,SAAUA,GCInB,QAAAC,GAAAC,GAGA,GAAAC,EAAAD,GACA,MAAAC,GAAAD,GAAAE,OAGA,IAAAC,GAAAF,EAAAD,IACAE,WACAE,GAAAJ,EACAK,QAAA,EAUA,OANAP,GAAAE,GAAAM,KAAAH,EAAAD,QAAAC,IAAAD,QAAAH,GAGAI,EAAAE,QAAA,EAGAF,EAAAD,QAvBA,GAAAD,KAqCA,OATAF,GAAAQ,EAAAT,EAGAC,EAAAS,EAAAP,EAGAF,EAAAU,EAAA,GAGAV,EAAA,KDMM,SAASI,EAAQD,EAASH,GE3ChCA,EAAA,GAEAA,EAAA,GAEAW,OFmDM,SAASP,EAAQD,EAASH,GGxDhCI,EAAAD,QAAAH,EAAAU,EAAA,cH8DM,SAASN,EAAQD,GI9DvB,QAAAQ,KACAC,QAAAC,IAAAC,QAGAH\",\"names\":[\"modules\",\"__webpack_require__\",\"moduleId\",\"installedModules\",\"exports\",\"module\",\"id\",\"loaded\",\"call\",\"m\",\"c\",\"p\",\"foo\",\"console\",\"log\",\"foobar\"],\"sources\":[\"webpack:///bundle.js\",\"webpack:///webpack/bootstrap 6002740481c9666b0d38\",\"webpack:///./scripts/index.js\",\"webpack:///./index.html\",\"webpack:///./scripts/app.js\"],\"version\":3}" - } - } - ] -} diff --git a/processor/asset/sourcemap/package_tests/doc.go b/processor/asset/sourcemap/package_tests/doc.go deleted file mode 100644 index 8c684cf7680..00000000000 --- a/processor/asset/sourcemap/package_tests/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package package_tests diff --git a/processor/asset/sourcemap/package_tests/processor_test.go b/processor/asset/sourcemap/package_tests/processor_test.go deleted file mode 100644 index f3cdbf2ebb2..00000000000 --- a/processor/asset/sourcemap/package_tests/processor_test.go +++ /dev/null @@ -1,129 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package package_tests - -import ( - "context" - "testing" - - "github.com/elastic/apm-server/approvaltest" - "github.com/elastic/apm-server/beater/beatertest" - "github.com/elastic/apm-server/model/sourcemap/generated/schema" - - "github.com/stretchr/testify/require" - - "github.com/elastic/apm-server/tests/loader" - "github.com/elastic/beats/v7/libbeat/beat" - - "github.com/elastic/apm-server/processor/asset/sourcemap" - "github.com/elastic/apm-server/tests" - "github.com/elastic/apm-server/transform" -) - -var ( - procSetup = tests.ProcessorSetup{ - Proc: &TestProcessor{Processor: sourcemap.Processor}, - FullPayloadPath: "../testdata/sourcemap/payload.json", - TemplatePaths: []string{"../../../../model/sourcemap/_meta/fields.yml"}, - Schema: schema.PayloadSchema, - } -) - -// ensure all valid documents pass through the whole validation and transformation process -func TestSourcemapProcessorOK(t *testing.T) { - data := []struct { - Name string - Path string - }{ - {Name: "TestProcessSourcemapFull", Path: "../testdata/sourcemap/payload.json"}, - {Name: "TestProcessSourcemapMinimalPayload", Path: "../testdata/sourcemap/minimal_payload.json"}, - } - - for _, info := range data { - t.Run(info.Name, func(t *testing.T) { - p := sourcemap.Processor - - data, err := loader.LoadData(info.Path) - require.NoError(t, err) - - err = p.Validate(data) - require.NoError(t, err) - - payload, err := p.Decode(data) - require.NoError(t, err) - - var events []beat.Event - for _, transformable := range payload { - events = append(events, transformable.Transform(context.Background(), &transform.Config{})...) - } - docs := beatertest.EncodeEventDocs(events...) - approvaltest.ApproveEventDocs(t, info.Name, docs, "@timestamp") - }) - } -} - -func TestPayloadAttrsMatchFields(t *testing.T) { - procSetup.PayloadAttrsMatchFields(t, tests.NewSet("sourcemap.sourcemap"), tests.NewSet()) -} - -func TestPayloadAttrsMatchJsonSchema(t *testing.T) { - procSetup.PayloadAttrsMatchJsonSchema(t, - tests.NewSet("sourcemap", "sourcemap.file", "sourcemap.names", - "sourcemap.sources", "sourcemap.sourceRoot"), tests.NewSet()) -} - -func TestAttributesPresenceRequirementInSourcemap(t *testing.T) { - procSetup.AttrsPresence(t, - tests.NewSet("service_name", "service_version", - "bundle_filepath", "sourcemap"), nil) -} - -func TestKeywordLimitationOnSourcemapAttributes(t *testing.T) { - mapping := []tests.FieldTemplateMapping{ - {Template: "sourcemap.service.name", Mapping: "service_name"}, - {Template: "sourcemap.service.version", Mapping: "service_version"}, - {Template: "sourcemap.bundle_filepath", Mapping: "bundle_filepath"}, - } - - procSetup.KeywordLimitation(t, tests.NewSet(), mapping) -} - -func TestPayloadDataForSourcemap(t *testing.T) { - type val []interface{} - payloadData := []tests.SchemaTestData{ - // add test data for testing - // * specific edge cases - // * multiple allowed dataypes - // * regex pattern, time formats - // * length restrictions, other than keyword length restrictions - - {Key: "sourcemap", Invalid: []tests.Invalid{ - {Msg: `error validating sourcemap`, Values: val{""}}, - {Msg: `sourcemap not in expected format`, Values: val{[]byte{}}}}}, - {Key: "service_name", Valid: val{tests.Str1024}, - Invalid: []tests.Invalid{ - {Msg: `service_name/minlength`, Values: val{""}}, - {Msg: `service_name/maxlength`, Values: val{tests.Str1025}}, - {Msg: `service_name/pattern`, Values: val{tests.Str1024Special}}}}, - {Key: "service_version", Valid: val{tests.Str1024}, - Invalid: []tests.Invalid{{Msg: `service_version/minlength`, Values: val{""}}}}, - {Key: "bundle_filepath", Valid: []interface{}{tests.Str1024}, - Invalid: []tests.Invalid{{Msg: `bundle_filepath/minlength`, Values: val{""}}}}, - } - procSetup.DataValidation(t, payloadData) -} diff --git a/processor/asset/sourcemap/package_tests/test_processor.go b/processor/asset/sourcemap/package_tests/test_processor.go deleted file mode 100644 index 51c4aa40879..00000000000 --- a/processor/asset/sourcemap/package_tests/test_processor.go +++ /dev/null @@ -1,68 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package package_tests - -import ( - "context" - "encoding/json" - - "github.com/elastic/apm-server/processor/asset" - "github.com/elastic/apm-server/tests/loader" - "github.com/elastic/apm-server/transform" - "github.com/elastic/beats/v7/libbeat/beat" -) - -type TestProcessor struct { - asset.Processor -} - -func (p *TestProcessor) LoadPayload(path string) (interface{}, error) { - return loader.LoadData(path) -} - -func (p *TestProcessor) Decode(input interface{}) error { - _, err := p.Processor.Decode(input.(map[string]interface{})) - return err -} - -func (p *TestProcessor) Validate(input interface{}) error { - return p.Processor.Validate(input.(map[string]interface{})) -} - -func (p *TestProcessor) Process(buf []byte) ([]beat.Event, error) { - var pl map[string]interface{} - err := json.Unmarshal(buf, &pl) - if err != nil { - return nil, err - } - - err = p.Processor.Validate(pl) - if err != nil { - return nil, err - } - transformables, err := p.Processor.Decode(pl) - if err != nil { - return nil, err - } - - var events []beat.Event - for _, transformable := range transformables { - events = append(events, transformable.Transform(context.Background(), &transform.Config{})...) - } - return events, nil -} diff --git a/processor/asset/sourcemap/sourcemap.go b/processor/asset/sourcemap/sourcemap.go deleted file mode 100644 index 70ee08d5109..00000000000 --- a/processor/asset/sourcemap/sourcemap.go +++ /dev/null @@ -1,91 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package sourcemap - -import ( - parser "github.com/go-sourcemap/sourcemap" - "github.com/pkg/errors" - "github.com/santhosh-tekuri/jsonschema" - - "github.com/elastic/beats/v7/libbeat/monitoring" - - "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/model/modeldecoder" - "github.com/elastic/apm-server/transform" - "github.com/elastic/apm-server/validation" -) - -const eventName = "sourcemap" - -var ( - Processor = &sourcemapProcessor{ - PayloadSchema: modeldecoder.SourcemapSchema, - DecodingCount: monitoring.NewInt(model.SourcemapMetrics, "decoding.count"), - DecodingError: monitoring.NewInt(model.SourcemapMetrics, "decoding.errors"), - ValidateCount: monitoring.NewInt(model.SourcemapMetrics, "validation.count"), - ValidateError: monitoring.NewInt(model.SourcemapMetrics, "validation.errors"), - } -) - -type sourcemapProcessor struct { - PayloadKey string - PayloadSchema *jsonschema.Schema - DecodingCount *monitoring.Int - DecodingError *monitoring.Int - ValidateCount *monitoring.Int - ValidateError *monitoring.Int -} - -func (p *sourcemapProcessor) Name() string { - return eventName -} - -func (p *sourcemapProcessor) Decode(raw map[string]interface{}) ([]transform.Transformable, error) { - p.DecodingCount.Inc() - transformable, err := modeldecoder.DecodeSourcemap(raw) - if err != nil { - p.DecodingError.Inc() - return nil, err - } - - return []transform.Transformable{transformable}, err -} - -func (p *sourcemapProcessor) Validate(raw map[string]interface{}) error { - p.ValidateCount.Inc() - - smap, ok := raw["sourcemap"].(string) - if !ok { - if s := raw["sourcemap"]; s == nil { - return errors.New(`missing properties: "sourcemap", expected sourcemap to be sent as string, but got null`) - } else { - return errors.New("sourcemap not in expected format") - } - } - - _, err := parser.Parse("", []byte(smap)) - if err != nil { - return errors.Wrap(err, "error validating sourcemap") - } - - err = validation.Validate(raw, p.PayloadSchema) - if err != nil { - p.ValidateError.Inc() - } - return err -} diff --git a/processor/otel/consumer.go b/processor/otel/consumer.go deleted file mode 100644 index b57e71f280f..00000000000 --- a/processor/otel/consumer.go +++ /dev/null @@ -1,759 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package otel - -import ( - "context" - "fmt" - "net" - "net/url" - "strconv" - "strings" - "time" - - commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" - tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" - "github.com/golang/protobuf/ptypes/timestamp" - "github.com/open-telemetry/opentelemetry-collector/consumer/consumerdata" - tracetranslator "github.com/open-telemetry/opentelemetry-collector/translator/trace" - - "github.com/elastic/beats/v7/libbeat/common" - "github.com/elastic/beats/v7/libbeat/logp" - - logs "github.com/elastic/apm-server/log" - "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/publish" - "github.com/elastic/apm-server/utility" -) - -const ( - AgentNameJaeger = "Jaeger" - - sourceFormatJaeger = "jaeger" - keywordLength = 1024 - dot = "." - underscore = "_" - - outcomeSuccess = "success" - outcomeFailure = "failure" - outcomeUnknown = "unknown" -) - -// Consumer transforms open-telemetry data to be compatible with elastic APM data -type Consumer struct { - Reporter publish.Reporter -} - -// ConsumeTraceData consumes OpenTelemetry trace data, -// converting into Elastic APM events and reporting to the Elastic APM schema. -func (c *Consumer) ConsumeTraceData(ctx context.Context, td consumerdata.TraceData) error { - batch := c.convert(td) - return c.Reporter(ctx, publish.PendingReq{ - Transformables: batch.Transformables(), - Trace: true, - }) -} - -func (c *Consumer) convert(td consumerdata.TraceData) *model.Batch { - md := model.Metadata{} - parseMetadata(td, &md) - hostname := md.System.DetectedHostname - - logger := logp.NewLogger(logs.Otel) - batch := model.Batch{} - for _, otelSpan := range td.Spans { - if otelSpan == nil { - continue - } - - root := len(otelSpan.ParentSpanId) == 0 - - var parentID, spanID, traceID string - if td.SourceFormat == sourceFormatJaeger { - if !root { - parentID = formatJaegerSpanID(otelSpan.ParentSpanId) - } - - traceID = formatJaegerTraceID(otelSpan.TraceId) - spanID = formatJaegerSpanID(otelSpan.SpanId) - } else { - if !root { - parentID = fmt.Sprintf("%x", otelSpan.ParentSpanId) - } - - traceID = fmt.Sprintf("%x", otelSpan.TraceId) - spanID = fmt.Sprintf("%x", otelSpan.SpanId) - } - - startTime := parseTimestamp(otelSpan.StartTime) - var duration float64 - if otelSpan.EndTime != nil && !startTime.IsZero() { - duration = parseTimestamp(otelSpan.EndTime).Sub(startTime).Seconds() * 1000 - } - name := otelSpan.GetName().GetValue() - if root || otelSpan.Kind == tracepb.Span_SERVER { - transaction := model.Transaction{ - Metadata: md, - ID: spanID, - ParentID: parentID, - TraceID: traceID, - Timestamp: startTime, - Duration: duration, - Name: name, - } - parseTransaction(otelSpan, td.SourceFormat, hostname, &transaction) - batch.Transactions = append(batch.Transactions, &transaction) - for _, err := range parseErrors(logger, td.SourceFormat, otelSpan) { - addTransactionCtxToErr(transaction, err) - batch.Errors = append(batch.Errors, err) - } - - } else { - span := model.Span{ - Metadata: md, - ID: spanID, - ParentID: parentID, - TraceID: traceID, - Timestamp: startTime, - Duration: duration, - Name: name, - Outcome: outcomeUnknown, - } - parseSpan(otelSpan, td.SourceFormat, &span) - batch.Spans = append(batch.Spans, &span) - for _, err := range parseErrors(logger, td.SourceFormat, otelSpan) { - addSpanCtxToErr(span, hostname, err) - batch.Errors = append(batch.Errors, err) - } - } - } - return &batch -} - -func parseMetadata(td consumerdata.TraceData, md *model.Metadata) { - md.Service.Name = truncate(td.Node.GetServiceInfo().GetName()) - if md.Service.Name == "" { - md.Service.Name = "unknown" - } - - if ident := td.Node.GetIdentifier(); ident != nil { - md.Process.Pid = int(ident.Pid) - if hostname := truncate(ident.HostName); hostname != "" { - md.System.DetectedHostname = hostname - } - } - if languageName, ok := languageName[td.Node.GetLibraryInfo().GetLanguage()]; ok { - md.Service.Language.Name = languageName - } - - switch td.SourceFormat { - case sourceFormatJaeger: - // version is of format `Jaeger--`, e.g. `Jaeger-Go-2.20.0` - nVersionParts := 3 - versionParts := strings.SplitN(td.Node.GetLibraryInfo().GetExporterVersion(), "-", nVersionParts) - if md.Service.Language.Name == "" && len(versionParts) == nVersionParts { - md.Service.Language.Name = versionParts[1] - } - if v := versionParts[len(versionParts)-1]; v != "" { - md.Service.Agent.Version = v - } else { - md.Service.Agent.Version = "unknown" - } - agentName := AgentNameJaeger - if md.Service.Language.Name != "" { - agentName = truncate(agentName + "/" + md.Service.Language.Name) - } - md.Service.Agent.Name = agentName - - if attributes := td.Node.GetAttributes(); attributes != nil { - if clientUUID, ok := attributes["client-uuid"]; ok { - md.Service.Agent.EphemeralID = truncate(clientUUID) - delete(td.Node.Attributes, "client-uuid") - } - if ip, ok := attributes["ip"]; ok { - md.System.IP = utility.ParseIP(ip) - delete(td.Node.Attributes, "ip") - } - } - default: - md.Service.Agent.Name = strings.Title(td.SourceFormat) - md.Service.Agent.Version = "unknown" - } - - if md.Service.Language.Name == "" { - md.Service.Language.Name = "unknown" - } - - md.Labels = make(common.MapStr) - for key, val := range td.Node.GetAttributes() { - md.Labels[replaceDots(key)] = truncate(val) - } - if t := td.Resource.GetType(); t != "" { - md.Labels["resource"] = truncate(t) - } - for key, val := range td.Resource.GetLabels() { - md.Labels[replaceDots(key)] = truncate(val) - } -} - -func parseTransaction(span *tracepb.Span, sourceFormat string, hostname string, event *model.Transaction) { - labels := make(common.MapStr) - var http model.Http - var httpStatusCode int - var message model.Message - var component string - var outcome, result string - var hasFailed bool - var isHTTP, isMessaging bool - var samplerType, samplerParam *tracepb.AttributeValue - for kDots, v := range span.Attributes.GetAttributeMap() { - if sourceFormat == sourceFormatJaeger { - switch kDots { - case "sampler.type": - samplerType = v - continue - case "sampler.param": - samplerParam = v - continue - } - } - - k := replaceDots(kDots) - switch v := v.Value.(type) { - case *tracepb.AttributeValue_BoolValue: - utility.DeepUpdate(labels, k, v.BoolValue) - if k == "error" { - hasFailed = v.BoolValue - } - case *tracepb.AttributeValue_DoubleValue: - utility.DeepUpdate(labels, k, v.DoubleValue) - case *tracepb.AttributeValue_IntValue: - switch kDots { - case "http.status_code": - httpStatusCode = int(v.IntValue) - isHTTP = true - default: - utility.DeepUpdate(labels, k, v.IntValue) - } - case *tracepb.AttributeValue_StringValue: - switch kDots { - case "span.kind": // filter out - case "http.method": - http.Request = &model.Req{Method: truncate(v.StringValue.Value)} - isHTTP = true - case "http.url", "http.path": - event.URL = model.ParseURL(v.StringValue.Value, hostname) - isHTTP = true - case "http.status_code": - if intv, err := strconv.Atoi(v.StringValue.Value); err == nil { - httpStatusCode = intv - } - isHTTP = true - case "http.protocol": - if strings.HasPrefix(v.StringValue.Value, "HTTP/") { - version := truncate(strings.TrimPrefix(v.StringValue.Value, "HTTP/")) - http.Version = &version - } else { - utility.DeepUpdate(labels, k, v.StringValue.Value) - } - isHTTP = true - case "message_bus.destination": - message.QueueName = &v.StringValue.Value - isMessaging = true - case "type": - event.Type = truncate(v.StringValue.Value) - case "service.version": - event.Metadata.Service.Version = truncate(v.StringValue.Value) - case "component": - component = truncate(v.StringValue.Value) - fallthrough - default: - utility.DeepUpdate(labels, k, truncate(v.StringValue.Value)) - } - } - } - - if event.Type == "" { - if isHTTP { - event.Type = "request" - } else if isMessaging { - event.Type = "messaging" - } else if component != "" { - event.Type = component - } else { - event.Type = "custom" - } - } - - if isHTTP { - if httpStatusCode == 0 { - httpStatusCode = int(span.GetStatus().GetCode()) - } - if httpStatusCode > 0 { - http.Response = &model.Resp{MinimalResp: model.MinimalResp{StatusCode: &httpStatusCode}} - result = statusCodeResult(httpStatusCode) - outcome = serverStatusCodeOutcome(httpStatusCode) - } - event.HTTP = &http - } else if isMessaging { - event.Message = &message - } - - if result == "" { - if hasFailed { - result = "Error" - outcome = outcomeFailure - } else { - result = "Success" - outcome = outcomeSuccess - } - } - event.Result = result - event.Outcome = outcome - - if samplerType != nil && samplerParam != nil { - // The client has reported its sampling rate, so - // we can use it to extrapolate span metrics. - parseSamplerAttributes(samplerType, samplerParam, &event.RepresentativeCount, labels) - } - - if len(labels) == 0 { - return - } - l := model.Labels(labels) - event.Labels = &l -} - -func parseSpan(span *tracepb.Span, sourceFormat string, event *model.Span) { - labels := make(common.MapStr) - - var http model.HTTP - var message model.Message - var db model.DB - var destination model.Destination - var destinationService model.DestinationService - var isDBSpan, isHTTPSpan, isMessagingSpan bool - var component string - var samplerType, samplerParam *tracepb.AttributeValue - for kDots, v := range span.Attributes.GetAttributeMap() { - if sourceFormat == sourceFormatJaeger { - switch kDots { - case "sampler.type": - samplerType = v - continue - case "sampler.param": - samplerParam = v - continue - } - } - - k := replaceDots(kDots) - switch v := v.Value.(type) { - case *tracepb.AttributeValue_BoolValue: - utility.DeepUpdate(labels, k, v.BoolValue) - case *tracepb.AttributeValue_DoubleValue: - utility.DeepUpdate(labels, k, v.DoubleValue) - case *tracepb.AttributeValue_IntValue: - switch kDots { - case "http.status_code": - code := int(v.IntValue) - http.StatusCode = &code - isHTTPSpan = true - case "peer.port": - port := int(v.IntValue) - destination.Port = &port - default: - utility.DeepUpdate(labels, k, v.IntValue) - } - case *tracepb.AttributeValue_StringValue: - switch kDots { - case "span.kind": // filter out - case "http.url": - url := truncate(v.StringValue.Value) - http.URL = &url - isHTTPSpan = true - case "http.method": - method := truncate(v.StringValue.Value) - http.Method = &method - isHTTPSpan = true - case "sql.query": - db.Statement = &v.StringValue.Value - if db.Type == nil { - dbType := "sql" - db.Type = &dbType - } - isDBSpan = true - case "db.statement": - db.Statement = &v.StringValue.Value - isDBSpan = true - case "db.instance": - val := truncate(v.StringValue.Value) - db.Instance = &val - isDBSpan = true - case "db.type": - val := truncate(v.StringValue.Value) - db.Type = &val - isDBSpan = true - case "db.user": - val := truncate(v.StringValue.Value) - db.UserName = &val - isDBSpan = true - case "peer.address": - val := truncate(v.StringValue.Value) - destinationService.Resource = &val - if !strings.ContainsRune(val, ':') || net.ParseIP(val) != nil { - // peer.address is not necessarily a hostname - // or IP address; it could be something like - // a JDBC connection string or ip:port. Ignore - // values containing colons, except for IPv6. - destination.Address = &val - } - case "peer.hostname", "peer.ipv4", "peer.ipv6": - val := truncate(v.StringValue.Value) - destination.Address = &val - case "peer.service": - val := truncate(v.StringValue.Value) - destinationService.Name = &val - if destinationService.Resource == nil { - // Prefer using peer.address for resource. - destinationService.Resource = &val - } - case "message_bus.destination": - val := truncate(v.StringValue.Value) - message.QueueName = &val - isMessagingSpan = true - case "component": - component = truncate(v.StringValue.Value) - fallthrough - default: - utility.DeepUpdate(labels, k, truncate(v.StringValue.Value)) - } - } - } - - if http.URL != nil { - if fullURL, err := url.Parse(*http.URL); err == nil { - url := url.URL{Scheme: fullURL.Scheme, Host: fullURL.Host} - hostname := truncate(url.Hostname()) - var port int - portString := url.Port() - if portString != "" { - port, _ = strconv.Atoi(portString) - } else { - port = schemeDefaultPort(url.Scheme) - } - - // Set destination.{address,port} from the HTTP URL, - // replacing peer.* based values to ensure consistency. - destination = model.Destination{Address: &hostname} - if port > 0 { - destination.Port = &port - } - - // Set destination.service.* from the HTTP URL, - // unless peer.service was specified. - if destinationService.Name == nil { - resource := url.Host - if port > 0 && port == schemeDefaultPort(url.Scheme) { - hasDefaultPort := portString != "" - if hasDefaultPort { - // Remove the default port from destination.service.name. - url.Host = hostname - } else { - // Add the default port to destination.service.resource. - resource = fmt.Sprintf("%s:%d", resource, port) - } - } - name := url.String() - destinationService.Name = &name - destinationService.Resource = &resource - } - } - } - - if destination != (model.Destination{}) { - event.Destination = &destination - } - - switch { - case isHTTPSpan: - if http.StatusCode == nil { - if code := int(span.GetStatus().GetCode()); code != 0 { - http.StatusCode = &code - } - } - if http.StatusCode != nil { - event.Outcome = clientStatusCodeOutcome(*http.StatusCode) - } - event.Type = "external" - subtype := "http" - event.Subtype = &subtype - event.HTTP = &http - case isDBSpan: - event.Type = "db" - if db.Type != nil && *db.Type != "" { - event.Subtype = db.Type - } - event.DB = &db - case isMessagingSpan: - event.Type = "messaging" - event.Message = &message - default: - event.Type = "custom" - if component != "" { - event.Subtype = &component - } - } - - if destinationService != (model.DestinationService{}) { - if destinationService.Type == nil { - // Copy span type to destination.service.type. - destinationService.Type = &event.Type - } - event.DestinationService = &destinationService - } - - if samplerType != nil && samplerParam != nil { - // The client has reported its sampling rate, so - // we can use it to extrapolate transaction metrics. - parseSamplerAttributes(samplerType, samplerParam, &event.RepresentativeCount, labels) - } - - if len(labels) == 0 { - return - } - event.Labels = labels -} - -func parseSamplerAttributes(samplerType, samplerParam *tracepb.AttributeValue, representativeCount *float64, labels common.MapStr) { - switch samplerType.GetStringValue().GetValue() { - case "probabilistic": - probability := samplerParam.GetDoubleValue() - if probability > 0 && probability <= 1 { - *representativeCount = 1 / probability - } - default: - utility.DeepUpdate(labels, "sampler_type", samplerType.GetStringValue().GetValue()) - switch v := samplerParam.Value.(type) { - case *tracepb.AttributeValue_BoolValue: - utility.DeepUpdate(labels, "sampler_param", v.BoolValue) - case *tracepb.AttributeValue_DoubleValue: - utility.DeepUpdate(labels, "sampler_param", v.DoubleValue) - } - } -} - -func parseErrors(logger *logp.Logger, source string, otelSpan *tracepb.Span) []*model.Error { - var errors []*model.Error - for _, log := range otelSpan.GetTimeEvents().GetTimeEvent() { - var isError, hasMinimalInfo bool - var err model.Error - var logMessage, exMessage, exType string - for k, v := range log.GetAnnotation().GetAttributes().GetAttributeMap() { - if source == sourceFormatJaeger { - switch v := v.Value.(type) { - case *tracepb.AttributeValue_StringValue: - vStr := v.StringValue.Value - switch k { - case "error", "error.object": - exMessage = vStr - hasMinimalInfo = true - isError = true - case "event": - if vStr == "error" { // according to opentracing spec - isError = true - } else if logMessage == "" { - // jaeger seems to send the message in the 'event' field - // in case 'event' and 'message' are sent, 'message' is used - logMessage = vStr - hasMinimalInfo = true - } - case "message": - logMessage = vStr - hasMinimalInfo = true - case "error.kind": - exType = vStr - hasMinimalInfo = true - isError = true - case "level": - isError = vStr == "error" - } - } - } - } - if !isError { - continue - } - if !hasMinimalInfo { - if logger.IsDebug() { - logger.Debugf("Cannot convert %s event into elastic apm error: %v", source, log) - } - continue - } - - if logMessage != "" { - err.Log = &model.Log{Message: logMessage} - } - if exMessage != "" || exType != "" { - err.Exception = &model.Exception{} - if exMessage != "" { - err.Exception.Message = &exMessage - } - if exType != "" { - err.Exception.Type = &exType - } - } - err.Timestamp = parseTimestamp(log.GetTime()) - errors = append(errors, &err) - } - return errors -} - -func addTransactionCtxToErr(transaction model.Transaction, err *model.Error) { - err.Metadata = transaction.Metadata - err.TransactionID = transaction.ID - err.TraceID = transaction.TraceID - err.ParentID = transaction.ID - err.HTTP = transaction.HTTP - err.URL = transaction.URL - err.TransactionType = &transaction.Type -} - -func addSpanCtxToErr(span model.Span, hostname string, err *model.Error) { - err.Metadata = span.Metadata - err.TransactionID = span.TransactionID - err.TraceID = span.TraceID - err.ParentID = span.ID - if span.HTTP != nil { - err.HTTP = &model.Http{} - if span.HTTP.StatusCode != nil { - err.HTTP.Response = &model.Resp{MinimalResp: model.MinimalResp{StatusCode: span.HTTP.StatusCode}} - } - if span.HTTP.Method != nil { - err.HTTP.Request = &model.Req{Method: *span.HTTP.Method} - } - if span.HTTP.URL != nil { - err.URL = model.ParseURL(*span.HTTP.URL, hostname) - } - } -} - -func replaceDots(s string) string { - return strings.ReplaceAll(s, dot, underscore) -} - -func parseTimestamp(timestampT *timestamp.Timestamp) time.Time { - if timestampT == nil { - return time.Time{} - } - return time.Unix(timestampT.Seconds, int64(timestampT.Nanos)).UTC() -} - -var languageName = map[commonpb.LibraryInfo_Language]string{ - 1: "C++", - 2: "CSharp", - 3: "Erlang", - 4: "Go", - 5: "Java", - 6: "Node", - 7: "PHP", - 8: "Python", - 9: "Ruby", - 10: "JavaScript", -} - -// copied from elastic go-apm agent - -var standardStatusCodeResults = [...]string{ - "HTTP 1xx", - "HTTP 2xx", - "HTTP 3xx", - "HTTP 4xx", - "HTTP 5xx", -} - -// statusCodeResult returns the transaction result value to use for the given status code. -func statusCodeResult(statusCode int) string { - switch i := statusCode / 100; i { - case 1, 2, 3, 4, 5: - return standardStatusCodeResults[i-1] - } - return fmt.Sprintf("HTTP %d", statusCode) -} - -// serverStatusCodeOutcome returns the transaction outcome value to use for the given status code. -func serverStatusCodeOutcome(statusCode int) string { - if statusCode >= 500 { - return outcomeFailure - } - return outcomeSuccess -} - -// clientStatusCodeOutcome returns the span outcome value to use for the given status code. -func clientStatusCodeOutcome(statusCode int) string { - if statusCode >= 400 { - return outcomeFailure - } - return outcomeSuccess -} - -// truncate returns s truncated at n runes, and the number of runes in the resulting string (<= n). -func truncate(s string) string { - var j int - for i := range s { - if j == keywordLength { - return s[:i] - } - j++ - } - return s -} - -// formatJaegerTraceID returns the traceID as string in Jaeger format (hexadecimal without leading zeros) -func formatJaegerTraceID(traceID []byte) string { - jaegerTraceIDHigh, jaegerTraceIDLow, err := tracetranslator.BytesToUInt64TraceID(traceID) - if err != nil { - return fmt.Sprintf("%x", traceID) - } - - if jaegerTraceIDHigh == 0 { - return fmt.Sprintf("%x", jaegerTraceIDLow) - } - - return fmt.Sprintf("%x%016x", jaegerTraceIDHigh, jaegerTraceIDLow) -} - -// formatJaegerSpanID returns the spanID as string in Jaeger format (hexadecimal without leading zeros) -func formatJaegerSpanID(spanID []byte) string { - jaegerSpanID, err := tracetranslator.BytesToUInt64SpanID(spanID) - if err != nil { - return fmt.Sprintf("%x", spanID) - } - - return fmt.Sprintf("%x", jaegerSpanID) -} - -func schemeDefaultPort(scheme string) int { - switch scheme { - case "http": - return 80 - case "https": - return 443 - } - return 0 -} diff --git a/processor/otel/consumer_test.go b/processor/otel/consumer_test.go deleted file mode 100644 index d0abf94a4a4..00000000000 --- a/processor/otel/consumer_test.go +++ /dev/null @@ -1,519 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package otel - -import ( - "context" - "path/filepath" - "testing" - - commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" - resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" - tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" - "github.com/golang/protobuf/ptypes/timestamp" - "github.com/golang/protobuf/ptypes/wrappers" - "github.com/open-telemetry/opentelemetry-collector/consumer/consumerdata" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/beats/v7/libbeat/beat" - - "github.com/elastic/apm-server/approvaltest" - "github.com/elastic/apm-server/beater/beatertest" - "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/publish" - "github.com/elastic/apm-server/transform" -) - -func TestConsumer_ConsumeTraceData(t *testing.T) { - for _, tc := range []struct { - name string - td consumerdata.TraceData - }{ - {name: "empty", td: consumerdata.TraceData{}}, - {name: "emptytrace", td: consumerdata.TraceData{ - SourceFormat: "jaeger", - Node: &commonpb.Node{}, - Resource: &resourcepb.Resource{}, - Spans: []*tracepb.Span{}}}, - {name: "span", td: consumerdata.TraceData{ - SourceFormat: "jaeger", - Spans: []*tracepb.Span{ - {Kind: tracepb.Span_SERVER, StartTime: ×tamp.Timestamp{Seconds: 1576500418, Nanos: 768068}}, - {ParentSpanId: []byte{0, 0, 0, 0, 70, 70, 48, 88}, StartTime: ×tamp.Timestamp{Seconds: 1576500418, Nanos: 768068}}, - }}}, - } { - t.Run(tc.name, func(t *testing.T) { - reporter := func(ctx context.Context, p publish.PendingReq) error { - events := transformAll(ctx, p) - approveEvents(t, "consume_"+tc.name, events) - return nil - } - consumer := Consumer{Reporter: reporter} - assert.NoError(t, consumer.ConsumeTraceData(context.Background(), tc.td)) - }) - } -} - -func TestConsumer_Metadata(t *testing.T) { - spans := []*tracepb.Span{{ - TraceId: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 70, 70, 120, 48}, - SpanId: []byte{0, 0, 0, 0, 65, 65, 70, 70}, - Kind: tracepb.Span_CLIENT, - StartTime: testStartTime(), - }} - for _, tc := range []struct { - name string - td consumerdata.TraceData - }{{ - name: "jaeger", - td: consumerdata.TraceData{ - SourceFormat: "jaeger", - Spans: spans, - Node: &commonpb.Node{ - Identifier: &commonpb.ProcessIdentifier{ - HostName: "host-foo", - Pid: 107892, - StartTimestamp: testStartTime(), - }, - LibraryInfo: &commonpb.LibraryInfo{ExporterVersion: "Jaeger-C++-3.2.1"}, - ServiceInfo: &commonpb.ServiceInfo{Name: "foo"}, - Attributes: map[string]string{ - "client-uuid": "xxf0", - "ip": "17.0.10.123", - "foo": "bar", - "peer.port": "80", - }, - }, - Resource: &resourcepb.Resource{ - Type: "request", - Labels: map[string]string{"a": "b", "c": "d", "e.f": "g"}, - }, - }, - }, { - name: "jaeger-version", - td: consumerdata.TraceData{ - SourceFormat: "jaeger", - Spans: spans, - Node: &commonpb.Node{LibraryInfo: &commonpb.LibraryInfo{Language: 7, ExporterVersion: "Jaeger-3.4.12"}}, - }, - }, { - name: "jaeger-no-language", - td: consumerdata.TraceData{ - SourceFormat: "jaeger", - Spans: spans, - Node: &commonpb.Node{LibraryInfo: &commonpb.LibraryInfo{ExporterVersion: "Jaeger-3.4.12"}}, - }, - }, { - name: "jaeger_minimal", - td: consumerdata.TraceData{ - SourceFormat: "jaeger", - Spans: spans, - Node: &commonpb.Node{ - Identifier: &commonpb.ProcessIdentifier{}, - LibraryInfo: &commonpb.LibraryInfo{}, - ServiceInfo: &commonpb.ServiceInfo{}, - }, - }, - }, { - name: "jaeger_full-traceid", - td: consumerdata.TraceData{ - SourceFormat: "jaeger", - Spans: []*tracepb.Span{{ - TraceId: []byte{0, 0, 0, 0, 70, 70, 120, 48, 0, 0, 0, 0, 70, 70, 120, 48}, - SpanId: []byte{0, 0, 0, 0, 65, 65, 70, 70}, - Kind: tracepb.Span_CLIENT, - StartTime: testStartTime(), - }}, - Node: &commonpb.Node{ - Identifier: &commonpb.ProcessIdentifier{}, - LibraryInfo: &commonpb.LibraryInfo{}, - ServiceInfo: &commonpb.ServiceInfo{}, - }, - }, - }, { - name: "minimal", - td: consumerdata.TraceData{SourceFormat: "foo", Spans: spans}, - }} { - t.Run(tc.name, func(t *testing.T) { - reporter := func(ctx context.Context, req publish.PendingReq) error { - require.Len(t, req.Transformables, 1) - events := transformAll(ctx, req) - approveEvents(t, "metadata_"+tc.name, events) - return nil - } - require.NoError(t, (&Consumer{Reporter: reporter}).ConsumeTraceData(context.Background(), tc.td)) - }) - } -} - -func TestConsumer_Transaction(t *testing.T) { - for _, tc := range []struct { - name string - td consumerdata.TraceData - }{ - {name: "jaeger_full", - td: consumerdata.TraceData{SourceFormat: "jaeger", - Node: &commonpb.Node{Identifier: &commonpb.ProcessIdentifier{HostName: "host-abc"}}, - Spans: []*tracepb.Span{{ - TraceId: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 70, 70, 120, 48}, - SpanId: []byte{0, 0, 0, 0, 65, 65, 70, 70}, - StartTime: testStartTime(), - EndTime: testEndTime(), - Name: testTruncatableString("HTTP GET"), - ChildSpanCount: testIntToWrappersUint32(10), - SameProcessAsParentSpan: testBoolToWrappersBool(true), - Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ - "error": testAttributeBoolValue(true), - "bool.a": testAttributeBoolValue(true), - "double.a": testAttributeDoubleValue(14.65), - "int.a": testAttributeIntValue(148), - "span.kind": testAttributeStringValue("http request"), - "http.method": testAttributeStringValue("get"), - "http.url": testAttributeStringValue("http://foo.bar.com?a=12"), - "http.status_code": testAttributeStringValue("400"), - "http.protocol": testAttributeStringValue("HTTP/1.1"), - "type": testAttributeStringValue("http_request"), - "component": testAttributeStringValue("foo"), - "string.a.b": testAttributeStringValue("some note"), - "service.version": testAttributeStringValue("1.0"), - }}, - TimeEvents: testTimeEvents(), - }}}}, - {name: "jaeger_type_request", - td: consumerdata.TraceData{SourceFormat: "jaeger", - Node: &commonpb.Node{Identifier: &commonpb.ProcessIdentifier{HostName: "host-abc"}}, - Spans: []*tracepb.Span{{ - ParentSpanId: []byte{0, 0, 0, 0, 97, 98, 99, 100}, Kind: tracepb.Span_SERVER, - StartTime: testStartTime(), - Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ - "http.status_code": testAttributeIntValue(500), - "http.protocol": testAttributeStringValue("HTTP"), - "http.path": testAttributeStringValue("http://foo.bar.com?a=12"), - }}}}}}, - {name: "jaeger_type_request_result", - td: consumerdata.TraceData{SourceFormat: "jaeger", - Node: &commonpb.Node{Identifier: &commonpb.ProcessIdentifier{HostName: "host-abc"}}, - Spans: []*tracepb.Span{{ - ParentSpanId: []byte{0, 0, 0, 0, 97, 98, 99, 100}, Kind: tracepb.Span_SERVER, - StartTime: testStartTime(), - Status: &tracepb.Status{Code: 200}, - Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ - "http.url": testAttributeStringValue("localhost:8080"), - }}}}}}, - {name: "jaeger_type_messaging", - td: consumerdata.TraceData{SourceFormat: "jaeger", - Node: &commonpb.Node{Identifier: &commonpb.ProcessIdentifier{HostName: "host-abc"}}, - Spans: []*tracepb.Span{{ - ParentSpanId: []byte{0, 0, 0, 0, 97, 98, 99, 100}, Kind: tracepb.Span_SERVER, - StartTime: testStartTime(), - Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ - "message_bus.destination": testAttributeStringValue("queue-abc"), - }}}}}}, - {name: "jaeger_type_component", - td: consumerdata.TraceData{SourceFormat: "jaeger", - Node: &commonpb.Node{Identifier: &commonpb.ProcessIdentifier{HostName: "host-abc"}}, - Spans: []*tracepb.Span{{ - Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ - "component": testAttributeStringValue("amqp"), - }}}}}}, - {name: "jaeger_custom", - td: consumerdata.TraceData{SourceFormat: "jaeger", - Spans: []*tracepb.Span{{Attributes: &tracepb.Span_Attributes{ - AttributeMap: map[string]*tracepb.AttributeValue{ - "a.b": testAttributeStringValue("foo")}}}}, - Node: &commonpb.Node{ - Identifier: &commonpb.ProcessIdentifier{}, - LibraryInfo: &commonpb.LibraryInfo{}, - ServiceInfo: &commonpb.ServiceInfo{}, - }}}, - {name: "jaeger_no_attrs", - td: consumerdata.TraceData{SourceFormat: "jaeger", - Node: &commonpb.Node{Identifier: &commonpb.ProcessIdentifier{HostName: "host-abc"}}, - Spans: []*tracepb.Span{{ - Kind: tracepb.Span_SERVER, - StartTime: testStartTime(), EndTime: testEndTime(), - Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ - "error": testAttributeBoolValue(true), - }}, - Status: &tracepb.Status{Code: 500}}}}}, - } { - t.Run(tc.name, func(t *testing.T) { - reporter := func(ctx context.Context, req publish.PendingReq) error { - require.True(t, len(req.Transformables) >= 1) - events := transformAll(ctx, req) - approveEvents(t, "transaction_"+tc.name, events) - return nil - } - require.NoError(t, (&Consumer{Reporter: reporter}).ConsumeTraceData(context.Background(), tc.td)) - }) - } -} - -func TestConsumer_SampleRate(t *testing.T) { - var transformables []transform.Transformable - reporter := func(ctx context.Context, req publish.PendingReq) error { - transformables = append(transformables, req.Transformables...) - events := transformAll(ctx, req) - approveEvents(t, "jaeger_sampling_rate", events) - return nil - } - require.NoError(t, (&Consumer{Reporter: reporter}).ConsumeTraceData(context.Background(), consumerdata.TraceData{ - SourceFormat: "jaeger", - Node: &commonpb.Node{ - Identifier: &commonpb.ProcessIdentifier{HostName: "host-abc"}, - }, - Spans: []*tracepb.Span{{ - Kind: tracepb.Span_SERVER, - StartTime: testStartTime(), EndTime: testEndTime(), - Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ - "sampler.type": testAttributeStringValue("probabilistic"), - "sampler.param": testAttributeDoubleValue(0.8), - }}, - }, { - Kind: tracepb.Span_CLIENT, - StartTime: testStartTime(), EndTime: testEndTime(), - ParentSpanId: []byte{1}, - Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ - "sampler.type": testAttributeStringValue("probabilistic"), - "sampler.param": testAttributeDoubleValue(0.4), - }}, - }}, - })) - - require.Len(t, transformables, 2) - tx := transformables[0].(*model.Transaction) - span := transformables[1].(*model.Span) - assert.Equal(t, 1.25 /* 1/0.8 */, tx.RepresentativeCount) - assert.Equal(t, 2.5 /* 1/0.4 */, span.RepresentativeCount) -} - -func TestConsumer_Span(t *testing.T) { - for _, tc := range []struct { - name string - td consumerdata.TraceData - }{ - {name: "jaeger_http", - td: consumerdata.TraceData{SourceFormat: "jaeger", - Node: &commonpb.Node{Identifier: &commonpb.ProcessIdentifier{HostName: "host-abc"}}, - Spans: []*tracepb.Span{{ - TraceId: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 70, 70, 120, 48}, SpanId: []byte{0, 0, 0, 0, 65, 65, 70, 70}, ParentSpanId: []byte{0, 0, 0, 0, 88, 88, 88, 88}, - StartTime: testStartTime(), EndTime: testEndTime(), - Name: testTruncatableString("HTTP GET"), - Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ - "error": testAttributeBoolValue(true), - "hasErrors": testAttributeBoolValue(true), - "double.a": testAttributeDoubleValue(14.65), - "http.status_code": testAttributeIntValue(400), - "int.a": testAttributeIntValue(148), - "span.kind": testAttributeStringValue("filtered"), - "http.url": testAttributeStringValue("http://foo.bar.com?a=12"), - "http.method": testAttributeStringValue("get"), - "component": testAttributeStringValue("foo"), - "string.a.b": testAttributeStringValue("some note"), - }}, - TimeEvents: testTimeEvents(), - }}}}, - {name: "jaeger_https_default_port", - td: consumerdata.TraceData{SourceFormat: "jaeger", - Node: &commonpb.Node{Identifier: &commonpb.ProcessIdentifier{HostName: "host-abc"}}, - Spans: []*tracepb.Span{{ - TraceId: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 70, 70, 120, 48}, SpanId: []byte{0, 0, 0, 0, 65, 65, 70, 70}, ParentSpanId: []byte{0, 0, 0, 0, 88, 88, 88, 88}, - StartTime: testStartTime(), EndTime: testEndTime(), - Name: testTruncatableString("HTTPS GET"), - Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ - "http.url": testAttributeStringValue("https://foo.bar.com:443?a=12"), - }}, - }}}}, - {name: "jaeger_http_status_code", - td: consumerdata.TraceData{SourceFormat: "jaeger", - Node: &commonpb.Node{Identifier: &commonpb.ProcessIdentifier{HostName: "host-abc"}}, - Spans: []*tracepb.Span{{ - TraceId: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 70, 70, 120, 48}, SpanId: []byte{0, 0, 0, 0, 65, 65, 70, 70}, ParentSpanId: []byte{0, 0, 0, 0, 88, 88, 88, 88}, - StartTime: testStartTime(), EndTime: testEndTime(), - Name: testTruncatableString("HTTP GET"), - Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ - "http.url": testAttributeStringValue("http://foo.bar.com?a=12"), - "http.method": testAttributeStringValue("get"), - }}, - Status: &tracepb.Status{Code: 202}, - }}}}, - {name: "jaeger_db", - td: consumerdata.TraceData{SourceFormat: "jaeger", - Node: &commonpb.Node{Identifier: &commonpb.ProcessIdentifier{HostName: "host-abc"}}, - Spans: []*tracepb.Span{{ - ParentSpanId: []byte{0, 0, 0, 0, 97, 98, 99, 100}, Kind: tracepb.Span_CLIENT, - StartTime: testStartTime(), Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ - "db.statement": testAttributeStringValue("GET * from users"), - "db.instance": testAttributeStringValue("db01"), - "db.type": testAttributeStringValue("mysql"), - "db.user": testAttributeStringValue("admin"), - "component": testAttributeStringValue("foo"), - "peer.address": testAttributeStringValue("mysql://db:3306"), - "peer.hostname": testAttributeStringValue("db"), - "peer.port": testAttributeIntValue(3306), - "peer.service": testAttributeStringValue("sql"), - }}, - }}}}, - {name: "jaeger_messaging", - td: consumerdata.TraceData{SourceFormat: "jaeger", - Node: &commonpb.Node{Identifier: &commonpb.ProcessIdentifier{HostName: "host-abc"}}, - Spans: []*tracepb.Span{{ - TraceId: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 70, 70, 120, 48}, SpanId: []byte{0, 0, 0, 0, 65, 65, 70, 70}, ParentSpanId: []byte{0, 0, 0, 0, 88, 88, 88, 88}, - StartTime: testStartTime(), EndTime: testEndTime(), - Name: testTruncatableString("Message receive"), - Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ - "peer.hostname": testAttributeStringValue("mq"), - "peer.port": testAttributeIntValue(1234), - "message_bus.destination": testAttributeStringValue("queue-abc"), - }}, - Status: &tracepb.Status{Code: 202}, - }}}}, - {name: "jaeger_custom", - td: consumerdata.TraceData{SourceFormat: "jaeger", - Node: &commonpb.Node{Identifier: &commonpb.ProcessIdentifier{HostName: "host-abc"}}, - Spans: []*tracepb.Span{{ - ParentSpanId: []byte{0, 0, 0, 0, 97, 98, 99, 100}, Kind: tracepb.Span_CLIENT, - StartTime: testStartTime()}}}}, - } { - t.Run(tc.name, func(t *testing.T) { - reporter := func(ctx context.Context, req publish.PendingReq) error { - require.True(t, len(req.Transformables) >= 1) - events := transformAll(ctx, req) - approveEvents(t, "span_"+tc.name, events) - return nil - } - require.NoError(t, (&Consumer{Reporter: reporter}).ConsumeTraceData(context.Background(), tc.td)) - }) - } -} - -func testTimeEvents() *tracepb.Span_TimeEvents { - return &tracepb.Span_TimeEvents{TimeEvent: []*tracepb.Span_TimeEvent{ - // errors that can be converted to elastic errors - {Time: testTimeStamp(testStartTime(), 23), - Value: &tracepb.Span_TimeEvent_Annotation_{Annotation: &tracepb.Span_TimeEvent_Annotation{ - Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ - "event": testAttributeStringValue("retrying connection"), - "level": testAttributeStringValue("error"), - "error": testAttributeStringValue("no connection established"), - }}}}}, - {Time: testTimeStamp(testStartTime(), 43), - Value: &tracepb.Span_TimeEvent_Annotation_{Annotation: &tracepb.Span_TimeEvent_Annotation{ - Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ - "event": testAttributeStringValue("no user.ID given"), - "message": testAttributeStringValue("nullPointer exception"), - "level": testAttributeStringValue("error"), - "isbool": testAttributeBoolValue(true), - }}}}}, - {Time: testTimeStamp(testStartTime(), 66), - Value: &tracepb.Span_TimeEvent_Annotation_{Annotation: &tracepb.Span_TimeEvent_Annotation{ - Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ - "error": testAttributeStringValue("no connection established"), - }}}}}, - {Time: testTimeStamp(testStartTime(), 66), - Value: &tracepb.Span_TimeEvent_Annotation_{Annotation: &tracepb.Span_TimeEvent_Annotation{ - Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ - "error.object": testAttributeStringValue("no connection established"), - }}}}}, - {Time: testTimeStamp(testStartTime(), 66), - Value: &tracepb.Span_TimeEvent_Annotation_{Annotation: &tracepb.Span_TimeEvent_Annotation{ - Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ - "error.kind": testAttributeStringValue("DBClosedException"), - }}}}}, - {Time: testTimeStamp(testStartTime(), 66), - Value: &tracepb.Span_TimeEvent_Annotation_{Annotation: &tracepb.Span_TimeEvent_Annotation{ - Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ - "event": testAttributeStringValue("error"), - "message": testAttributeStringValue("no connection established"), - }}}}}, - // no errors - {Time: testTimeStamp(testStartTime(), 15), - Value: &tracepb.Span_TimeEvent_Annotation_{Annotation: &tracepb.Span_TimeEvent_Annotation{ - Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ - "event": testAttributeStringValue("baggage"), - "isValid": testAttributeBoolValue(false), - }}}}}, - {Time: testTimeStamp(testStartTime(), 65), - Value: &tracepb.Span_TimeEvent_Annotation_{Annotation: &tracepb.Span_TimeEvent_Annotation{ - Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ - "event": testAttributeStringValue("retrying connection"), - "level": testAttributeStringValue("info"), - }}}}}, - // errors not convertable to elastic errors - {Time: testTimeStamp(testStartTime(), 67), - Value: &tracepb.Span_TimeEvent_Annotation_{Annotation: &tracepb.Span_TimeEvent_Annotation{ - Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ - "level": testAttributeStringValue("error"), - }}}}}}} -} - -func testStartTime() *timestamp.Timestamp { - return ×tamp.Timestamp{Seconds: 1576500418, Nanos: 768068} -} - -func testEndTime() *timestamp.Timestamp { - return ×tamp.Timestamp{Seconds: 1576500497, Nanos: 768068} -} - -func testTimeStamp(t *timestamp.Timestamp, addNanos int32) *timestamp.Timestamp { - return ×tamp.Timestamp{Seconds: t.GetSeconds(), Nanos: t.GetNanos() + addNanos} -} - -func testIntToWrappersUint32(n int) *wrappers.UInt32Value { - return &wrappers.UInt32Value{Value: uint32(n)} -} - -func testBoolToWrappersBool(b bool) *wrappers.BoolValue { - return &wrappers.BoolValue{Value: b} -} - -func testTruncatableString(s string) *tracepb.TruncatableString { - return &tracepb.TruncatableString{Value: s} -} - -func testAttributeIntValue(n int) *tracepb.AttributeValue { - return &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: int64(n)}} -} -func testAttributeBoolValue(b bool) *tracepb.AttributeValue { - return &tracepb.AttributeValue{Value: &tracepb.AttributeValue_BoolValue{BoolValue: b}} -} -func testAttributeDoubleValue(f float64) *tracepb.AttributeValue { - return &tracepb.AttributeValue{Value: &tracepb.AttributeValue_DoubleValue{DoubleValue: f}} -} - -func testAttributeStringValue(s string) *tracepb.AttributeValue { - return &tracepb.AttributeValue{Value: &tracepb.AttributeValue_StringValue{StringValue: testTruncatableString(s)}} -} - -func transformAll(ctx context.Context, p publish.PendingReq) []beat.Event { - var events []beat.Event - for _, transformable := range p.Transformables { - events = append(events, transformable.Transform(ctx, &transform.Config{})...) - } - return events -} - -func approveEvents(t testing.TB, name string, events []beat.Event) { - docs := beatertest.EncodeEventDocs(events...) - approvaltest.ApproveEventDocs(t, file(name), docs) -} - -func file(f string) string { - return filepath.Join("test_approved", f) -} diff --git a/processor/otel/exceptions.go b/processor/otel/exceptions.go new file mode 100644 index 00000000000..81b4fe17761 --- /dev/null +++ b/processor/otel/exceptions.go @@ -0,0 +1,214 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Portions copied from OpenTelemetry Collector (contrib), from the +// elastic exporter. +// +// Copyright 2020, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel + +import ( + "bufio" + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/elastic/apm-server/model" +) + +var ( + javaStacktraceAtRegexp = regexp.MustCompile(`at (.*)\(([^:]*)(?::([0-9]+))?\)`) + javaStacktraceMoreRegexp = regexp.MustCompile(`\.\.\. ([0-9]+) more`) +) + +func convertOpenTelemetryExceptionSpanEvent( + exceptionType, exceptionMessage, exceptionStacktrace string, + exceptionEscaped bool, + language string, +) *model.Error { + if exceptionMessage == "" { + exceptionMessage = "[EMPTY]" + } + exceptionHandled := !exceptionEscaped + exceptionError := model.Error{ + Exception: &model.Exception{ + Message: exceptionMessage, + Type: exceptionType, + Handled: &exceptionHandled, + }, + } + if exceptionStacktrace != "" { + if err := setExceptionStacktrace(exceptionStacktrace, language, exceptionError.Exception); err != nil { + // Couldn't parse stacktrace, just add it as an attribute to the + // exception so the user can still access it. + exceptionError.Exception.Stacktrace = nil + exceptionError.Exception.Cause = nil + exceptionError.Exception.Attributes = map[string]interface{}{ + "stacktrace": exceptionStacktrace, + } + } + } + return &exceptionError +} + +func setExceptionStacktrace(s, language string, out *model.Exception) error { + switch language { + case "java": + return setJavaExceptionStacktrace(s, out) + } + return fmt.Errorf("parsing %q stacktraces not implemented", language) +} + +// setJavaExceptionStacktrace parses a Java exception stack trace according to +// https://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html#printStackTrace() +func setJavaExceptionStacktrace(s string, out *model.Exception) error { + const ( + causedByPrefix = "Caused by: " + suppressedPrefix = "Suppressed: " + ) + + type Exception struct { + *model.Exception + enclosing *model.Exception + indent int + } + first := true + current := Exception{out, nil, 0} + stack := []Exception{} + scanner := bufio.NewScanner(strings.NewReader(s)) + for scanner.Scan() { + if first { + // Ignore the first line, we only care about the locations. + first = false + continue + } + var indent int + line := scanner.Text() + if i := strings.IndexFunc(line, isNotTab); i > 0 { + line = line[i:] + indent = i + } + for indent < current.indent { + n := len(stack) + current, stack = stack[n-1], stack[:n-1] + } + switch { + case strings.HasPrefix(line, "at "): + if err := parseJavaStacktraceFrame(line, current.Exception); err != nil { + return err + } + case strings.HasPrefix(line, "..."): + // "... N more" lines indicate that the last N frames from the enclosing + // exception's stacktrace are common to this exception. + if current.enclosing == nil { + return fmt.Errorf("no enclosing exception preceding line %q", line) + } + submatch := javaStacktraceMoreRegexp.FindStringSubmatch(line) + if submatch == nil { + return fmt.Errorf("failed to parse stacktrace line %q", line) + } + if n, err := strconv.Atoi(submatch[1]); err == nil { + enclosing := current.enclosing + if len(enclosing.Stacktrace) < n { + return fmt.Errorf( + "enclosing exception stacktrace has %d frames, cannot satisfy %q", + len(enclosing.Stacktrace), line, + ) + } + m := len(enclosing.Stacktrace) + current.Stacktrace = append(current.Stacktrace, enclosing.Stacktrace[m-n:]...) + } + case strings.HasPrefix(line, causedByPrefix): + // "Caused by:" lines are at the same level of indentation + // as the enclosing exception. + current.Cause = make([]model.Exception, 1) + current.enclosing = current.Exception + current.Exception = ¤t.Cause[0] + current.Exception.Handled = current.enclosing.Handled + current.Message = line[len(causedByPrefix):] + case strings.HasPrefix(line, suppressedPrefix): + // Suppressed exceptions have no place in the Elastic APM + // model, so they are ignored. + // + // Unlike "Caused by:", "Suppressed:" lines are indented within their + // enclosing exception; we just account for the indentation here. + stack = append(stack, current) + current.enclosing = current.Exception + current.Exception = &model.Exception{} + current.indent = indent + default: + return fmt.Errorf("unexpected line %q", line) + } + } + return scanner.Err() +} + +func parseJavaStacktraceFrame(s string, out *model.Exception) error { + submatch := javaStacktraceAtRegexp.FindStringSubmatch(s) + if submatch == nil { + return fmt.Errorf("failed to parse stacktrace line %q", s) + } + var module string + function := submatch[1] + if slash := strings.IndexRune(function, '/'); slash >= 0 { + // We could have either: + // - "class_loader/module/class.method" + // - "module/class.method" + module, function = function[:slash], function[slash+1:] + if slash := strings.IndexRune(function, '/'); slash >= 0 { + module, function = function[:slash], function[slash+1:] + } + } + var classname string + if dot := strings.LastIndexByte(function, '.'); dot > 0 { + // Split into classname and method. + classname, function = function[:dot], function[dot+1:] + } + file := submatch[2] + var lineno *int + if submatch[3] != "" { + if n, err := strconv.Atoi(submatch[3]); err == nil { + lineno = &n + } + } + out.Stacktrace = append(out.Stacktrace, &model.StacktraceFrame{ + Module: module, + Classname: classname, + Function: function, + Filename: file, + Lineno: lineno, + }) + return nil +} + +func isNotTab(r rune) bool { + return r != '\t' +} diff --git a/processor/otel/exceptions_test.go b/processor/otel/exceptions_test.go new file mode 100644 index 00000000000..6482ad0ffb3 --- /dev/null +++ b/processor/otel/exceptions_test.go @@ -0,0 +1,363 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Portions copied from OpenTelemetry Collector (contrib), from the +// elastic exporter. +// +// Copyright 2020, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/model/pdata" + semconv "go.opentelemetry.io/collector/model/semconv/v1.5.0" + + "github.com/elastic/apm-server/model" + "github.com/elastic/beats/v7/libbeat/common" +) + +func TestEncodeSpanEventsNonExceptions(t *testing.T) { + nonExceptionEvent := pdata.NewSpanEvent() + nonExceptionEvent.SetName("not_exception") + + incompleteExceptionEvent := pdata.NewSpanEvent() + incompleteExceptionEvent.SetName("exception") + incompleteExceptionEvent.Attributes().InitFromMap(map[string]pdata.AttributeValue{ + // At least one of exception.message and exception.type is required. + semconv.AttributeExceptionStacktrace: pdata.NewAttributeValueString("stacktrace"), + }) + + _, events := transformTransactionSpanEvents(t, "java", nonExceptionEvent, incompleteExceptionEvent) + require.Len(t, events, 2) + assert.Equal(t, model.LogProcessor, events[0].Processor) + assert.Equal(t, model.LogProcessor, events[1].Processor) +} + +func TestEncodeSpanEventsJavaExceptions(t *testing.T) { + timestamp := time.Unix(123, 0).UTC() + + exceptionEvent1 := pdata.NewSpanEvent() + exceptionEvent1.SetTimestamp(pdata.TimestampFromTime(timestamp)) + exceptionEvent1.SetName("exception") + exceptionEvent1.Attributes().InitFromMap(map[string]pdata.AttributeValue{ + "exception.type": pdata.NewAttributeValueString("java.net.ConnectException.OSError"), + "exception.message": pdata.NewAttributeValueString("Division by zero"), + "exception.escaped": pdata.NewAttributeValueBool(true), + "exception.stacktrace": pdata.NewAttributeValueString(` +Exception in thread "main" java.lang.RuntimeException: Test exception + at com.example.GenerateTrace.methodB(GenerateTrace.java:13) + at com.example.GenerateTrace.methodA(GenerateTrace.java:9) + at com.example.GenerateTrace.main(GenerateTrace.java:5) + at com.foo.loader/foo@9.0/com.foo.Main.run(Main.java) + at com.foo.loader//com.foo.bar.App.run(App.java:12) + at java.base/java.lang.Thread.run(Unknown Source) +`[1:], + ), + }) + exceptionEvent2 := pdata.NewSpanEvent() + exceptionEvent2.SetTimestamp(pdata.TimestampFromTime(timestamp)) + exceptionEvent2.SetName("exception") + exceptionEvent2.Attributes().InitFromMap(map[string]pdata.AttributeValue{ + "exception.type": pdata.NewAttributeValueString("HighLevelException"), + "exception.message": pdata.NewAttributeValueString("MidLevelException: LowLevelException"), + "exception.stacktrace": pdata.NewAttributeValueString(` +HighLevelException: MidLevelException: LowLevelException + at Junk.a(Junk.java:13) + at Junk.main(Junk.java:4) +Caused by: MidLevelException: LowLevelException + at Junk.c(Junk.java:23) + at Junk.b(Junk.java:17) + at Junk.a(Junk.java:11) + ... 1 more + Suppressed: java.lang.ArithmeticException: / by zero + at Junk.c(Junk.java:25) + ... 3 more +Caused by: LowLevelException + at Junk.e(Junk.java:37) + at Junk.d(Junk.java:34) + at Junk.c(Junk.java:21) + ... 3 more`[1:], + ), + }) + + service, agent := languageOnlyMetadata("java") + transactionEvent, errorEvents := transformTransactionSpanEvents(t, "java", exceptionEvent1, exceptionEvent2) + assert.Equal(t, []model.APMEvent{{ + Service: service, + Agent: agent, + Timestamp: timestamp, + Labels: common.MapStr{}, + Processor: model.ErrorProcessor, + Trace: transactionEvent.Trace, + Parent: model.Parent{ID: transactionEvent.Transaction.ID}, + Transaction: &model.Transaction{ + ID: transactionEvent.Transaction.ID, + Type: transactionEvent.Transaction.Type, + Sampled: true, + }, + Error: &model.Error{ + Exception: &model.Exception{ + Type: "java.net.ConnectException.OSError", + Message: "Division by zero", + Handled: newBool(false), + Stacktrace: []*model.StacktraceFrame{{ + Classname: "com.example.GenerateTrace", + Function: "methodB", + Filename: "GenerateTrace.java", + Lineno: newInt(13), + }, { + Classname: "com.example.GenerateTrace", + Function: "methodA", + Filename: "GenerateTrace.java", + Lineno: newInt(9), + }, { + Classname: "com.example.GenerateTrace", + Function: "main", + Filename: "GenerateTrace.java", + Lineno: newInt(5), + }, { + Module: "foo@9.0", + Classname: "com.foo.Main", + Function: "run", + Filename: "Main.java", + }, { + Classname: "com.foo.bar.App", + Function: "run", + Filename: "App.java", + Lineno: newInt(12), + }, { + Module: "java.base", + Classname: "java.lang.Thread", + Function: "run", + Filename: "Unknown Source", + }}, + }, + }, + }, { + Service: service, + Agent: agent, + Timestamp: timestamp, + Labels: common.MapStr{}, + Processor: model.ErrorProcessor, + Trace: transactionEvent.Trace, + Parent: model.Parent{ID: transactionEvent.Transaction.ID}, + Transaction: &model.Transaction{ + ID: transactionEvent.Transaction.ID, + Type: transactionEvent.Transaction.Type, + Sampled: true, + }, + Error: &model.Error{ + Exception: &model.Exception{ + Type: "HighLevelException", + Message: "MidLevelException: LowLevelException", + Handled: newBool(true), + Stacktrace: []*model.StacktraceFrame{{ + Classname: "Junk", + Function: "a", + Filename: "Junk.java", + Lineno: newInt(13), + }, { + Classname: "Junk", + Function: "main", + Filename: "Junk.java", + Lineno: newInt(4), + }}, + Cause: []model.Exception{{ + Message: "MidLevelException: LowLevelException", + Handled: newBool(true), + Stacktrace: []*model.StacktraceFrame{{ + Classname: "Junk", + Function: "c", + Filename: "Junk.java", + Lineno: newInt(23), + }, { + Classname: "Junk", + Function: "b", + Filename: "Junk.java", + Lineno: newInt(17), + }, { + Classname: "Junk", + Function: "a", + Filename: "Junk.java", + Lineno: newInt(11), + }, { + Classname: "Junk", + Function: "main", + Filename: "Junk.java", + Lineno: newInt(4), + }}, + Cause: []model.Exception{{ + Message: "LowLevelException", + Handled: newBool(true), + Stacktrace: []*model.StacktraceFrame{{ + Classname: "Junk", + Function: "e", + Filename: "Junk.java", + Lineno: newInt(37), + }, { + Classname: "Junk", + Function: "d", + Filename: "Junk.java", + Lineno: newInt(34), + }, { + Classname: "Junk", + Function: "c", + Filename: "Junk.java", + Lineno: newInt(21), + }, { + Classname: "Junk", + Function: "b", + Filename: "Junk.java", + Lineno: newInt(17), + }, { + Classname: "Junk", + Function: "a", + Filename: "Junk.java", + Lineno: newInt(11), + }, { + Classname: "Junk", + Function: "main", + Filename: "Junk.java", + Lineno: newInt(4), + }}, + }}, + }}, + }, + }, + }}, errorEvents) +} + +func TestEncodeSpanEventsJavaExceptionsUnparsedStacktrace(t *testing.T) { + stacktraces := []string{ + // Unexpected prefix. + "abc\ndef", + + // "... N more" with no preceding exception. + "abc\n... 1 more", + + // "... N more" where N is greater than the number of stack + // frames in the enclosing exception. + `ignored message + at Class.method(Class.java:1) +Caused by: something else + at Class.method(Class.java:2) + ... 2 more`, + + // "... N more" where N is not a sequence of digits. + `abc + at Class.method(Class.java:1) +Caused by: whatever + at Class.method(Class.java:2) + ... lots more`, + + // "at " where is invalid. + `abc + at the movies`, + } + + var events []pdata.SpanEvent + for _, stacktrace := range stacktraces { + event := pdata.NewSpanEvent() + event.SetName("exception") + event.Attributes().InitFromMap(map[string]pdata.AttributeValue{ + "exception.type": pdata.NewAttributeValueString("ExceptionType"), + "exception.stacktrace": pdata.NewAttributeValueString(stacktrace), + }) + events = append(events, event) + } + + _, errorEvents := transformTransactionSpanEvents(t, "java", events...) + require.Len(t, errorEvents, len(stacktraces)) + + for i, event := range errorEvents { + assert.Empty(t, event.Error.Exception.Stacktrace) + assert.Equal(t, map[string]interface{}{"stacktrace": stacktraces[i]}, event.Error.Exception.Attributes) + } +} + +func TestEncodeSpanEventsNonJavaExceptions(t *testing.T) { + timestamp := time.Unix(123, 0).UTC() + + exceptionEvent := pdata.NewSpanEvent() + exceptionEvent.SetTimestamp(pdata.TimestampFromTime(timestamp)) + exceptionEvent.SetName("exception") + exceptionEvent.Attributes().InitFromMap(map[string]pdata.AttributeValue{ + "exception.type": pdata.NewAttributeValueString("the_type"), + "exception.message": pdata.NewAttributeValueString("the_message"), + "exception.stacktrace": pdata.NewAttributeValueString("the_stacktrace"), + }) + + // For languages where we do not explicitly parse the stacktrace, + // the raw stacktrace is stored as an attribute on the exception. + transactionEvent, errorEvents := transformTransactionSpanEvents(t, "COBOL", exceptionEvent) + require.Len(t, errorEvents, 1) + + service, agent := languageOnlyMetadata("COBOL") + assert.Equal(t, model.APMEvent{ + Service: service, + Agent: agent, + Timestamp: timestamp, + Labels: common.MapStr{}, + Processor: model.ErrorProcessor, + Trace: transactionEvent.Trace, + Parent: model.Parent{ID: transactionEvent.Transaction.ID}, + Transaction: &model.Transaction{ + ID: transactionEvent.Transaction.ID, + Type: transactionEvent.Transaction.Type, + Sampled: true, + }, + Error: &model.Error{ + Exception: &model.Exception{ + Type: "the_type", + Message: "the_message", + Handled: newBool(true), + Attributes: map[string]interface{}{ + "stacktrace": "the_stacktrace", + }, + }, + }, + }, errorEvents[0]) +} + +func languageOnlyMetadata(language string) (model.Service, model.Agent) { + service := model.Service{ + Name: "unknown", + Language: model.Language{Name: language}, + } + agent := model.Agent{ + Name: "otlp/" + language, + Version: "unknown", + } + return service, agent +} diff --git a/processor/otel/metadata.go b/processor/otel/metadata.go new file mode 100644 index 00000000000..a324a88d282 --- /dev/null +++ b/processor/otel/metadata.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package otel + +import ( + "fmt" + "net" + "regexp" + "strings" + + "go.opentelemetry.io/collector/model/pdata" + semconv "go.opentelemetry.io/collector/model/semconv/v1.5.0" + + "github.com/elastic/apm-server/model" + "github.com/elastic/beats/v7/libbeat/common" +) + +const ( + AgentNameJaeger = "Jaeger" +) + +var ( + serviceNameInvalidRegexp = regexp.MustCompile("[^a-zA-Z0-9 _-]") +) + +func translateResourceMetadata(resource pdata.Resource, out *model.APMEvent) { + var exporterVersion string + resource.Attributes().Range(func(k string, v pdata.AttributeValue) bool { + switch k { + // service.* + case semconv.AttributeServiceName: + out.Service.Name = cleanServiceName(v.StringVal()) + case semconv.AttributeServiceVersion: + out.Service.Version = truncate(v.StringVal()) + case semconv.AttributeServiceInstanceID: + out.Service.Node.Name = truncate(v.StringVal()) + + // deployment.* + case semconv.AttributeDeploymentEnvironment: + out.Service.Environment = truncate(v.StringVal()) + + // telemetry.sdk.* + case semconv.AttributeTelemetrySDKName: + out.Agent.Name = truncate(v.StringVal()) + case semconv.AttributeTelemetrySDKVersion: + out.Agent.Version = truncate(v.StringVal()) + case semconv.AttributeTelemetrySDKLanguage: + out.Service.Language.Name = truncate(v.StringVal()) + + // cloud.* + case semconv.AttributeCloudProvider: + out.Cloud.Provider = truncate(v.StringVal()) + case semconv.AttributeCloudAccountID: + out.Cloud.AccountID = truncate(v.StringVal()) + case semconv.AttributeCloudRegion: + out.Cloud.Region = truncate(v.StringVal()) + case semconv.AttributeCloudAvailabilityZone: + out.Cloud.AvailabilityZone = truncate(v.StringVal()) + case semconv.AttributeCloudPlatform: + out.Cloud.ServiceName = truncate(v.StringVal()) + + // container.* + case semconv.AttributeContainerName: + out.Container.Name = truncate(v.StringVal()) + case semconv.AttributeContainerID: + out.Container.ID = truncate(v.StringVal()) + case semconv.AttributeContainerImageName: + out.Container.ImageName = truncate(v.StringVal()) + case semconv.AttributeContainerImageTag: + out.Container.ImageTag = truncate(v.StringVal()) + case "container.runtime": + out.Container.Runtime = truncate(v.StringVal()) + + // k8s.* + case semconv.AttributeK8SNamespaceName: + out.Kubernetes.Namespace = truncate(v.StringVal()) + case semconv.AttributeK8SNodeName: + out.Kubernetes.NodeName = truncate(v.StringVal()) + case semconv.AttributeK8SPodName: + out.Kubernetes.PodName = truncate(v.StringVal()) + case semconv.AttributeK8SPodUID: + out.Kubernetes.PodUID = truncate(v.StringVal()) + + // host.* + case semconv.AttributeHostName: + out.Host.Hostname = truncate(v.StringVal()) + case semconv.AttributeHostID: + out.Host.ID = truncate(v.StringVal()) + case semconv.AttributeHostType: + out.Host.Type = truncate(v.StringVal()) + case "host.arch": + out.Host.Architecture = truncate(v.StringVal()) + + // process.* + case semconv.AttributeProcessPID: + out.Process.Pid = int(v.IntVal()) + case semconv.AttributeProcessCommandLine: + out.Process.CommandLine = truncate(v.StringVal()) + case semconv.AttributeProcessExecutablePath: + out.Process.Executable = truncate(v.StringVal()) + case "process.runtime.name": + out.Service.Runtime.Name = truncate(v.StringVal()) + case "process.runtime.version": + out.Service.Runtime.Version = truncate(v.StringVal()) + + // os.* + case semconv.AttributeOSType: + out.Host.OS.Platform = strings.ToLower(truncate(v.StringVal())) + case semconv.AttributeOSDescription: + out.Host.OS.Full = truncate(v.StringVal()) + + // Legacy OpenCensus attributes. + case "opencensus.exporterversion": + exporterVersion = v.StringVal() + + default: + if out.Labels == nil { + out.Labels = make(common.MapStr) + } + out.Labels[replaceDots(k)] = ifaceAttributeValue(v) + } + return true + }) + + // https://www.elastic.co/guide/en/ecs/current/ecs-os.html#field-os-type: + // + // "One of these following values should be used (lowercase): linux, macos, unix, windows. + // If the OS you’re dealing with is not in the list, the field should not be populated." + switch out.Host.OS.Platform { + case "windows", "linux": + out.Host.OS.Type = out.Host.OS.Platform + case "darwin": + out.Host.OS.Type = "macos" + case "aix", "hpux", "solaris": + out.Host.OS.Type = "unix" + } + + if strings.HasPrefix(exporterVersion, "Jaeger") { + // version is of format `Jaeger--`, e.g. `Jaeger-Go-2.20.0` + const nVersionParts = 3 + versionParts := strings.SplitN(exporterVersion, "-", nVersionParts) + if out.Service.Language.Name == "" && len(versionParts) == nVersionParts { + out.Service.Language.Name = versionParts[1] + } + if v := versionParts[len(versionParts)-1]; v != "" { + out.Agent.Version = v + } + out.Agent.Name = AgentNameJaeger + + // Translate known Jaeger labels. + if clientUUID, ok := out.Labels["client-uuid"].(string); ok { + out.Agent.EphemeralID = clientUUID + delete(out.Labels, "client-uuid") + } + if systemIP, ok := out.Labels["ip"].(string); ok { + out.Host.IP = net.ParseIP(systemIP) + delete(out.Labels, "ip") + } + } + + if out.Service.Name == "" { + // service.name is a required field. + out.Service.Name = "unknown" + } + if out.Agent.Name == "" { + // agent.name is a required field. + out.Agent.Name = "otlp" + } + if out.Agent.Version == "" { + // agent.version is a required field. + out.Agent.Version = "unknown" + } + if out.Service.Language.Name != "" { + out.Agent.Name = fmt.Sprintf("%s/%s", out.Agent.Name, out.Service.Language.Name) + } else { + out.Service.Language.Name = "unknown" + } +} + +func cleanServiceName(name string) string { + return serviceNameInvalidRegexp.ReplaceAllString(truncate(name), "_") +} + +func ifaceAttributeValue(v pdata.AttributeValue) interface{} { + switch v.Type() { + case pdata.AttributeValueTypeString: + return truncate(v.StringVal()) + case pdata.AttributeValueTypeInt: + return v.IntVal() + case pdata.AttributeValueTypeDouble: + return v.DoubleVal() + case pdata.AttributeValueTypeBool: + return v.BoolVal() + case pdata.AttributeValueTypeArray: + return ifaceAnyValueArray(v.ArrayVal()) + } + return nil +} + +func ifaceAnyValueArray(array pdata.AnyValueArray) []interface{} { + values := make([]interface{}, array.Len()) + for i := range values { + values[i] = ifaceAttributeValue(array.At(i)) + } + return values +} + +// initEventLabels initializes an event-specific label map, either making a copy +// of commonLabels if it is non-nil, or otherwise creating a new map. +func initEventLabels(commonLabels common.MapStr) common.MapStr { + return commonLabels.Clone() +} diff --git a/processor/otel/metadata_test.go b/processor/otel/metadata_test.go new file mode 100644 index 00000000000..2a43c9344d7 --- /dev/null +++ b/processor/otel/metadata_test.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package otel_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/model/pdata" + + "github.com/elastic/apm-server/model" + "github.com/elastic/beats/v7/libbeat/common" +) + +func TestResourceConventions(t *testing.T) { + defaultAgent := model.Agent{Name: "otlp", Version: "unknown"} + defaultService := model.Service{ + Name: "unknown", + Language: model.Language{Name: "unknown"}, + } + + for name, test := range map[string]struct { + attrs map[string]pdata.AttributeValue + expected model.APMEvent + }{ + "empty": { + attrs: nil, + expected: model.APMEvent{Agent: defaultAgent, Service: defaultService}, + }, + "service": { + attrs: map[string]pdata.AttributeValue{ + "service.name": pdata.NewAttributeValueString("service_name"), + "service.version": pdata.NewAttributeValueString("service_version"), + "deployment.environment": pdata.NewAttributeValueString("service_environment"), + }, + expected: model.APMEvent{ + Agent: model.Agent{Name: "otlp", Version: "unknown"}, + Service: model.Service{ + Name: "service_name", + Version: "service_version", + Environment: "service_environment", + Language: model.Language{Name: "unknown"}, + }, + }, + }, + "agent": { + attrs: map[string]pdata.AttributeValue{ + "telemetry.sdk.name": pdata.NewAttributeValueString("sdk_name"), + "telemetry.sdk.version": pdata.NewAttributeValueString("sdk_version"), + "telemetry.sdk.language": pdata.NewAttributeValueString("language_name"), + }, + expected: model.APMEvent{ + Agent: model.Agent{Name: "sdk_name/language_name", Version: "sdk_version"}, + Service: model.Service{ + Name: "unknown", + Language: model.Language{Name: "language_name"}, + }, + }, + }, + "runtime": { + attrs: map[string]pdata.AttributeValue{ + "process.runtime.name": pdata.NewAttributeValueString("runtime_name"), + "process.runtime.version": pdata.NewAttributeValueString("runtime_version"), + }, + expected: model.APMEvent{ + Agent: model.Agent{Name: "otlp", Version: "unknown"}, + Service: model.Service{ + Name: "unknown", + Language: model.Language{Name: "unknown"}, + Runtime: model.Runtime{ + Name: "runtime_name", + Version: "runtime_version", + }, + }, + }, + }, + "cloud": { + attrs: map[string]pdata.AttributeValue{ + "cloud.provider": pdata.NewAttributeValueString("provider_name"), + "cloud.region": pdata.NewAttributeValueString("region_name"), + "cloud.account.id": pdata.NewAttributeValueString("account_id"), + "cloud.availability_zone": pdata.NewAttributeValueString("availability_zone"), + "cloud.platform": pdata.NewAttributeValueString("platform_name"), + }, + expected: model.APMEvent{ + Agent: defaultAgent, + Service: defaultService, + Cloud: model.Cloud{ + Provider: "provider_name", + Region: "region_name", + AccountID: "account_id", + AvailabilityZone: "availability_zone", + ServiceName: "platform_name", + }, + }, + }, + "container": { + attrs: map[string]pdata.AttributeValue{ + "container.name": pdata.NewAttributeValueString("container_name"), + "container.id": pdata.NewAttributeValueString("container_id"), + "container.image.name": pdata.NewAttributeValueString("container_image_name"), + "container.image.tag": pdata.NewAttributeValueString("container_image_tag"), + "container.runtime": pdata.NewAttributeValueString("container_runtime"), + }, + expected: model.APMEvent{ + Agent: defaultAgent, + Service: defaultService, + Container: model.Container{ + Name: "container_name", + ID: "container_id", + Runtime: "container_runtime", + ImageName: "container_image_name", + ImageTag: "container_image_tag", + }, + }, + }, + "kubernetes": { + attrs: map[string]pdata.AttributeValue{ + "k8s.namespace.name": pdata.NewAttributeValueString("kubernetes_namespace"), + "k8s.node.name": pdata.NewAttributeValueString("kubernetes_node_name"), + "k8s.pod.name": pdata.NewAttributeValueString("kubernetes_pod_name"), + "k8s.pod.uid": pdata.NewAttributeValueString("kubernetes_pod_uid"), + }, + expected: model.APMEvent{ + Agent: defaultAgent, + Service: defaultService, + Kubernetes: model.Kubernetes{ + Namespace: "kubernetes_namespace", + NodeName: "kubernetes_node_name", + PodName: "kubernetes_pod_name", + PodUID: "kubernetes_pod_uid", + }, + }, + }, + "host": { + attrs: map[string]pdata.AttributeValue{ + "host.name": pdata.NewAttributeValueString("host_name"), + "host.id": pdata.NewAttributeValueString("host_id"), + "host.type": pdata.NewAttributeValueString("host_type"), + "host.arch": pdata.NewAttributeValueString("host_arch"), + }, + expected: model.APMEvent{ + Agent: defaultAgent, + Service: defaultService, + Host: model.Host{ + Hostname: "host_name", + ID: "host_id", + Type: "host_type", + Architecture: "host_arch", + }, + }, + }, + "process": { + attrs: map[string]pdata.AttributeValue{ + "process.pid": pdata.NewAttributeValueInt(123), + "process.command_line": pdata.NewAttributeValueString("command_line"), + "process.executable.path": pdata.NewAttributeValueString("executable_path"), + }, + expected: model.APMEvent{ + Agent: defaultAgent, + Service: defaultService, + Process: model.Process{ + Pid: 123, + CommandLine: "command_line", + Executable: "executable_path", + }, + }, + }, + "os": { + attrs: map[string]pdata.AttributeValue{ + "os.type": pdata.NewAttributeValueString("DARWIN"), + "os.description": pdata.NewAttributeValueString("Mac OS Mojave"), + }, + expected: model.APMEvent{ + Agent: defaultAgent, + Service: defaultService, + Host: model.Host{ + OS: model.OS{ + Platform: "darwin", + Type: "macos", + Full: "Mac OS Mojave", + }, + }, + }, + }, + } { + t.Run(name, func(t *testing.T) { + meta := transformResourceMetadata(t, test.attrs) + assert.Equal(t, test.expected, meta) + }) + } +} + +func TestResourceLabels(t *testing.T) { + stringArray := pdata.NewAttributeValueArray() + stringArray.ArrayVal().AppendEmpty().SetStringVal("abc") + stringArray.ArrayVal().AppendEmpty().SetStringVal("def") + + intArray := pdata.NewAttributeValueArray() + intArray.ArrayVal().AppendEmpty().SetIntVal(123) + intArray.ArrayVal().AppendEmpty().SetIntVal(456) + + metadata := transformResourceMetadata(t, map[string]pdata.AttributeValue{ + "string_array": stringArray, + "int_array": intArray, + }) + assert.Equal(t, common.MapStr{ + "string_array": []interface{}{"abc", "def"}, + "int_array": []interface{}{int64(123), int64(456)}, + }, metadata.Labels) +} + +func transformResourceMetadata(t *testing.T, resourceAttrs map[string]pdata.AttributeValue) model.APMEvent { + traces, spans := newTracesSpans() + traces.ResourceSpans().At(0).Resource().Attributes().InitFromMap(resourceAttrs) + otelSpan := spans.Spans().AppendEmpty() + otelSpan.SetTraceID(pdata.NewTraceID([16]byte{1})) + otelSpan.SetSpanID(pdata.NewSpanID([8]byte{2})) + events := transformTraces(t, traces) + events[0].Transaction = nil + events[0].Trace = model.Trace{} + events[0].Event.Outcome = "" + events[0].Timestamp = time.Time{} + events[0].Processor = model.Processor{} + return events[0] +} diff --git a/processor/otel/metrics.go b/processor/otel/metrics.go new file mode 100644 index 00000000000..b0be29dcbb5 --- /dev/null +++ b/processor/otel/metrics.go @@ -0,0 +1,330 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Portions copied from OpenTelemetry Collector (contrib), from the +// elastic exporter. +// +// Copyright 2020, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel + +import ( + "context" + "fmt" + "strings" + "sync/atomic" + "time" + + "go.opentelemetry.io/collector/model/pdata" + + logs "github.com/elastic/apm-server/log" + "github.com/elastic/apm-server/model" + "github.com/elastic/beats/v7/libbeat/logp" +) + +// ConsumeMetrics consumes OpenTelemetry metrics data, converting into +// the Elastic APM metrics model and sending to the reporter. +func (c *Consumer) ConsumeMetrics(ctx context.Context, metrics pdata.Metrics) error { + receiveTimestamp := time.Now() + logger := logp.NewLogger(logs.Otel) + if logger.IsDebug() { + data, err := jsonMetricsMarshaler.MarshalMetrics(metrics) + if err != nil { + logger.Debug(err) + } else { + logger.Debug(string(data)) + } + } + batch := c.convertMetrics(metrics, receiveTimestamp) + return c.Processor.ProcessBatch(ctx, batch) +} + +func (c *Consumer) convertMetrics(metrics pdata.Metrics, receiveTimestamp time.Time) *model.Batch { + batch := model.Batch{} + resourceMetrics := metrics.ResourceMetrics() + for i := 0; i < resourceMetrics.Len(); i++ { + c.convertResourceMetrics(resourceMetrics.At(i), receiveTimestamp, &batch) + } + return &batch +} + +func (c *Consumer) convertResourceMetrics(resourceMetrics pdata.ResourceMetrics, receiveTimestamp time.Time, out *model.Batch) { + var baseEvent model.APMEvent + var timeDelta time.Duration + resource := resourceMetrics.Resource() + translateResourceMetadata(resource, &baseEvent) + if exportTimestamp, ok := exportTimestamp(resource); ok { + timeDelta = receiveTimestamp.Sub(exportTimestamp) + } + instrumentationLibraryMetrics := resourceMetrics.InstrumentationLibraryMetrics() + for i := 0; i < instrumentationLibraryMetrics.Len(); i++ { + c.convertInstrumentationLibraryMetrics(instrumentationLibraryMetrics.At(i), baseEvent, timeDelta, out) + } +} + +func (c *Consumer) convertInstrumentationLibraryMetrics( + in pdata.InstrumentationLibraryMetrics, + baseEvent model.APMEvent, + timeDelta time.Duration, + out *model.Batch, +) { + ms := make(metricsets) + otelMetrics := in.Metrics() + var unsupported int64 + for i := 0; i < otelMetrics.Len(); i++ { + if !c.addMetric(otelMetrics.At(i), ms) { + unsupported++ + } + } + for key, ms := range ms { + event := baseEvent + event.Processor = model.MetricsetProcessor + event.Timestamp = key.timestamp.Add(timeDelta) + event.Metricset = &model.Metricset{Samples: ms.samples} + if ms.attributes.Len() > 0 { + event.Labels = initEventLabels(event.Labels) + ms.attributes.Range(func(k string, v pdata.AttributeValue) bool { + event.Labels[k] = ifaceAttributeValue(v) + return true + }) + } + *out = append(*out, event) + } + if unsupported > 0 { + atomic.AddInt64(&c.stats.unsupportedMetricsDropped, unsupported) + } +} + +func (c *Consumer) addMetric(metric pdata.Metric, ms metricsets) bool { + // TODO(axw) support units + anyDropped := false + switch metric.DataType() { + case pdata.MetricDataTypeGauge: + dps := metric.Gauge().DataPoints() + for i := 0; i < dps.Len(); i++ { + dp := dps.At(i) + if sample, ok := numberSample(dp, model.MetricTypeGauge); ok { + ms.upsert(dp.Timestamp().AsTime(), metric.Name(), dp.Attributes(), sample) + } else { + anyDropped = true + } + } + return !anyDropped + case pdata.MetricDataTypeSum: + dps := metric.Sum().DataPoints() + for i := 0; i < dps.Len(); i++ { + dp := dps.At(i) + if sample, ok := numberSample(dp, model.MetricTypeCounter); ok { + ms.upsert(dp.Timestamp().AsTime(), metric.Name(), dp.Attributes(), sample) + } else { + anyDropped = true + } + } + return !anyDropped + case pdata.MetricDataTypeHistogram: + dps := metric.Histogram().DataPoints() + for i := 0; i < dps.Len(); i++ { + dp := dps.At(i) + if sample, ok := histogramSample(dp.BucketCounts(), dp.ExplicitBounds()); ok { + ms.upsert(dp.Timestamp().AsTime(), metric.Name(), dp.Attributes(), sample) + } else { + anyDropped = true + } + } + case pdata.MetricDataTypeSummary: + // TODO(axw) https://github.com/elastic/apm-server/issues/3195 + // (Not quite the same issue, but the solution would also enable + // aggregate metrics, which would be appropriate for summaries.) + fallthrough + default: + // Unsupported metric: report that it has been dropped. + anyDropped = true + } + return !anyDropped +} + +func numberSample(dp pdata.NumberDataPoint, metricType model.MetricType) (model.MetricsetSample, bool) { + var value float64 + switch dp.Type() { + case pdata.MetricValueTypeInt: + value = float64(dp.IntVal()) + case pdata.MetricValueTypeDouble: + value = dp.DoubleVal() + default: + return model.MetricsetSample{}, false + } + return model.MetricsetSample{ + Type: metricType, + Value: value, + }, true +} + +func histogramSample(bucketCounts []uint64, explicitBounds []float64) (model.MetricsetSample, bool) { + // (From opentelemetry-proto/opentelemetry/proto/metrics/v1/metrics.proto) + // + // This defines size(explicit_bounds) + 1 (= N) buckets. The boundaries for + // bucket at index i are: + // + // (-infinity, explicit_bounds[i]] for i == 0 + // (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < N-1 + // (explicit_bounds[i], +infinity) for i == N-1 + // + // The values in the explicit_bounds array must be strictly increasing. + // + if len(bucketCounts) != len(explicitBounds)+1 { + return model.MetricsetSample{}, false + } + + // For the bucket values, we follow the approach described by Prometheus's + // histogram_quantile function (https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) + // to achieve consistent percentile aggregation results: + // + // "The histogram_quantile() function interpolates quantile values by assuming a linear + // distribution within a bucket. (...) If a quantile is located in the highest bucket, + // the upper bound of the second highest bucket is returned. A lower limit of the lowest + // bucket is assumed to be 0 if the upper bound of that bucket is greater than 0. In that + // case, the usual linear interpolation is applied within that bucket. Otherwise, the upper + // bound of the lowest bucket is returned for quantiles located in the lowest bucket." + values := make([]float64, 0, len(bucketCounts)) + counts := make([]int64, 0, len(bucketCounts)) + for i, count := range bucketCounts { + if count == 0 { + continue + } + + var value float64 + switch i { + // (-infinity, explicit_bounds[i]] + case 0: + value = explicitBounds[i] + if value > 0 { + value /= 2 + } + + // (explicit_bounds[i], +infinity) + case len(bucketCounts) - 1: + value = explicitBounds[i-1] + + // [explicit_bounds[i-1], explicit_bounds[i]) + default: + // Use the midpoint between the boundaries. + value = explicitBounds[i-1] + (explicitBounds[i]-explicitBounds[i-1])/2.0 + } + + counts = append(counts, int64(count)) + values = append(values, value) + } + return model.MetricsetSample{ + Type: model.MetricTypeHistogram, + Histogram: model.Histogram{ + Counts: counts, + Values: values, + }, + }, true +} + +type metricsets map[metricsetKey]metricset + +type metricsetKey struct { + timestamp time.Time + signature string // combination of all attributes +} + +type metricset struct { + attributes pdata.AttributeMap + samples map[string]model.MetricsetSample +} + +// upsert searches for an existing metricset with the given timestamp and labels, +// and appends the sample to it. If there is no such existing metricset, a new one +// is created. +func (ms metricsets) upsert(timestamp time.Time, name string, attributes pdata.AttributeMap, sample model.MetricsetSample) { + // We always record metrics as they are given. We also copy some + // well-known OpenTelemetry metrics to their Elastic APM equivalents. + ms.upsertOne(timestamp, name, attributes, sample) + + switch name { + case "runtime.jvm.memory.area": + // runtime.jvm.memory.area -> jvm.memory.{area}.{type} + // Copy label "gc" to "name". + var areaValue, typeValue string + attributes.Range(func(k string, v pdata.AttributeValue) bool { + switch k { + case "area": + areaValue = v.AsString() + case "type": + typeValue = v.AsString() + } + return true + }) + if areaValue != "" && typeValue != "" { + elasticapmName := fmt.Sprintf("jvm.memory.%s.%s", areaValue, typeValue) + ms.upsertOne(timestamp, elasticapmName, pdata.NewAttributeMap(), sample) + } + case "runtime.jvm.gc.collection": + // This is the old name for runtime.jvm.gc.time. + name = "runtime.jvm.gc.time" + fallthrough + case "runtime.jvm.gc.time", "runtime.jvm.gc.count": + // Chop off the "runtime." prefix, i.e. runtime.jvm.gc.time -> jvm.gc.time. + // OpenTelemetry and Elastic APM metrics are both defined in milliseconds. + elasticapmName := name[len("runtime."):] + + // Copy label "gc" to "name". + elasticapmAttributes := pdata.NewAttributeMap() + attributes.Range(func(k string, v pdata.AttributeValue) bool { + if k == "gc" { + elasticapmAttributes.Insert("name", v) + return false + } + return true + }) + ms.upsertOne(timestamp, elasticapmName, elasticapmAttributes, sample) + } +} + +func (ms metricsets) upsertOne(timestamp time.Time, name string, attributes pdata.AttributeMap, sample model.MetricsetSample) { + var signatureBuilder strings.Builder + attributes.Range(func(k string, v pdata.AttributeValue) bool { + signatureBuilder.WriteString(k) + signatureBuilder.WriteString(v.AsString()) + return true + }) + key := metricsetKey{timestamp: timestamp, signature: signatureBuilder.String()} + + m, ok := ms[key] + if !ok { + m = metricset{ + attributes: attributes, + samples: make(map[string]model.MetricsetSample), + } + ms[key] = m + } + m.samples[name] = sample +} diff --git a/processor/otel/metrics_test.go b/processor/otel/metrics_test.go new file mode 100644 index 00000000000..81ff8109ba2 --- /dev/null +++ b/processor/otel/metrics_test.go @@ -0,0 +1,374 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Portions copied from OpenTelemetry Collector (contrib), from the +// elastic exporter. +// +// Copyright 2020, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel_test + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/model/pdata" + + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/processor/otel" + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/logp" +) + +func TestConsumeMetrics(t *testing.T) { + metrics := pdata.NewMetrics() + resourceMetrics := metrics.ResourceMetrics().AppendEmpty() + instrumentationLibraryMetrics := resourceMetrics.InstrumentationLibraryMetrics().AppendEmpty() + metricSlice := instrumentationLibraryMetrics.Metrics() + appendMetric := func(name string, dataType pdata.MetricDataType) pdata.Metric { + metric := metricSlice.AppendEmpty() + metric.SetName(name) + metric.SetDataType(dataType) + return metric + } + + timestamp0 := time.Unix(123, 0).UTC() + timestamp1 := time.Unix(456, 0).UTC() + + var expectDropped int64 + + metric := appendMetric("gauge_metric", pdata.MetricDataTypeGauge) + gauge := metric.Gauge() + gaugeDP0 := gauge.DataPoints().AppendEmpty() + gaugeDP0.SetTimestamp(pdata.TimestampFromTime(timestamp0)) + gaugeDP0.SetIntVal(1) + gaugeDP1 := gauge.DataPoints().AppendEmpty() + gaugeDP1.SetTimestamp(pdata.TimestampFromTime(timestamp1)) + gaugeDP1.SetDoubleVal(2.3) + gaugeDP1.Attributes().InitFromMap(map[string]pdata.AttributeValue{ + "k": pdata.NewAttributeValueString("v"), + }) + gaugeDP2 := gauge.DataPoints().AppendEmpty() + gaugeDP2.SetTimestamp(pdata.TimestampFromTime(timestamp1)) + gaugeDP2.SetIntVal(4) + gaugeDP3 := gauge.DataPoints().AppendEmpty() + gaugeDP3.SetTimestamp(pdata.TimestampFromTime(timestamp1)) + gaugeDP3.SetDoubleVal(5.6) + gaugeDP3.Attributes().InitFromMap(map[string]pdata.AttributeValue{ + "k": pdata.NewAttributeValueString("v2"), + }) + + metric = appendMetric("sum_metric", pdata.MetricDataTypeSum) + sum := metric.Sum() + sumDP0 := sum.DataPoints().AppendEmpty() + sumDP0.SetTimestamp(pdata.TimestampFromTime(timestamp0)) + sumDP0.SetIntVal(7) + sumDP1 := sum.DataPoints().AppendEmpty() + sumDP1.SetTimestamp(pdata.TimestampFromTime(timestamp1)) + sumDP1.SetDoubleVal(8.9) + sumDP1.Attributes().InitFromMap(map[string]pdata.AttributeValue{ + "k": pdata.NewAttributeValueString("v"), + }) + sumDP2 := sum.DataPoints().AppendEmpty() + sumDP2.SetTimestamp(pdata.TimestampFromTime(timestamp1)) + sumDP2.SetIntVal(10) + sumDP2.Attributes().InitFromMap(map[string]pdata.AttributeValue{ + "k2": pdata.NewAttributeValueString("v"), + }) + sumDP3 := sum.DataPoints().AppendEmpty() + sumDP3.SetTimestamp(pdata.TimestampFromTime(timestamp1)) + sumDP3.SetDoubleVal(11.12) + sumDP3.Attributes().InitFromMap(map[string]pdata.AttributeValue{ + "k": pdata.NewAttributeValueString("v2"), + }) + + metric = appendMetric("histogram_metric", pdata.MetricDataTypeHistogram) + histogram := metric.Histogram() + histogramDP := histogram.DataPoints().AppendEmpty() + histogramDP.SetTimestamp(pdata.TimestampFromTime(timestamp0)) + histogramDP.SetBucketCounts([]uint64{1, 1, 2, 3}) + histogramDP.SetExplicitBounds([]float64{-1.0, 2.0, 3.5}) + + metric = appendMetric("invalid_histogram_metric", pdata.MetricDataTypeHistogram) + invalidHistogram := metric.Histogram() + invalidHistogramDP := invalidHistogram.DataPoints().AppendEmpty() + invalidHistogramDP.SetTimestamp(pdata.TimestampFromTime(timestamp0)) + invalidHistogramDP.SetBucketCounts([]uint64{1, 2, 3}) // should be one more bucket count than bounds + invalidHistogramDP.SetExplicitBounds([]float64{1, 2, 3}) + expectDropped++ + + // Summary metrics are not yet supported, and will be dropped. + metric = appendMetric("summary_metric", pdata.MetricDataTypeSummary) + metric.Summary().DataPoints().AppendEmpty() + expectDropped++ + + events, stats := transformMetrics(t, metrics) + assert.Equal(t, expectDropped, stats.UnsupportedMetricsDropped) + + service := model.Service{Name: "unknown", Language: model.Language{Name: "unknown"}} + agent := model.Agent{Name: "otlp", Version: "unknown"} + assert.ElementsMatch(t, []model.APMEvent{{ + Agent: agent, + Service: service, + Timestamp: timestamp0, + Processor: model.MetricsetProcessor, + Metricset: &model.Metricset{ + Samples: map[string]model.MetricsetSample{ + "gauge_metric": {Value: 1, Type: "gauge"}, + "sum_metric": {Value: 7, Type: "counter"}, + "histogram_metric": { + Type: "histogram", + Histogram: model.Histogram{ + Counts: []int64{1, 1, 2, 3}, + Values: []float64{-1, 0.5, 2.75, 3.5}, + }, + }, + }, + }, + }, { + Agent: agent, + Service: service, + Timestamp: timestamp1, + Processor: model.MetricsetProcessor, + Metricset: &model.Metricset{ + Samples: map[string]model.MetricsetSample{ + "gauge_metric": {Value: 4, Type: "gauge"}, + }, + }, + }, { + Agent: agent, + Service: service, + Labels: common.MapStr{"k": "v"}, + Timestamp: timestamp1, + Processor: model.MetricsetProcessor, + Metricset: &model.Metricset{ + Samples: map[string]model.MetricsetSample{ + "gauge_metric": {Value: 2.3, Type: "gauge"}, + "sum_metric": {Value: 8.9, Type: "counter"}, + }, + }, + }, { + Agent: agent, + Service: service, + Labels: common.MapStr{"k": "v2"}, + Timestamp: timestamp1, + Processor: model.MetricsetProcessor, + Metricset: &model.Metricset{ + Samples: map[string]model.MetricsetSample{ + "gauge_metric": {Value: 5.6, Type: "gauge"}, + "sum_metric": {Value: 11.12, Type: "counter"}, + }, + }, + }, { + Agent: agent, + Service: service, + Labels: common.MapStr{"k2": "v"}, + Timestamp: timestamp1, + Processor: model.MetricsetProcessor, + Metricset: &model.Metricset{ + Samples: map[string]model.MetricsetSample{ + "sum_metric": {Value: 10, Type: "counter"}, + }, + }, + }}, events) +} + +func TestConsumeMetrics_JVM(t *testing.T) { + metrics := pdata.NewMetrics() + resourceMetrics := metrics.ResourceMetrics().AppendEmpty() + instrumentationLibraryMetrics := resourceMetrics.InstrumentationLibraryMetrics().AppendEmpty() + metricSlice := instrumentationLibraryMetrics.Metrics() + appendMetric := func(name string, dataType pdata.MetricDataType) pdata.Metric { + metric := metricSlice.AppendEmpty() + metric.SetName(name) + metric.SetDataType(dataType) + return metric + } + + timestamp := time.Unix(123, 0).UTC() + addInt64Sum := func(name string, value int64, attributes map[string]pdata.AttributeValue) { + metric := appendMetric(name, pdata.MetricDataTypeSum) + sum := metric.Sum() + dp := sum.DataPoints().AppendEmpty() + dp.SetTimestamp(pdata.TimestampFromTime(timestamp)) + dp.SetIntVal(value) + dp.Attributes().InitFromMap(attributes) + } + addInt64Gauge := func(name string, value int64, attributes map[string]pdata.AttributeValue) { + metric := appendMetric(name, pdata.MetricDataTypeGauge) + sum := metric.Gauge() + dp := sum.DataPoints().AppendEmpty() + dp.SetTimestamp(pdata.TimestampFromTime(timestamp)) + dp.SetIntVal(value) + dp.Attributes().InitFromMap(attributes) + } + addInt64Sum("runtime.jvm.gc.time", 9, map[string]pdata.AttributeValue{ + "gc": pdata.NewAttributeValueString("G1 Young Generation"), + }) + addInt64Sum("runtime.jvm.gc.count", 2, map[string]pdata.AttributeValue{ + "gc": pdata.NewAttributeValueString("G1 Young Generation"), + }) + addInt64Gauge("runtime.jvm.memory.area", 42, map[string]pdata.AttributeValue{ + "area": pdata.NewAttributeValueString("heap"), + "type": pdata.NewAttributeValueString("used"), + }) + + events, _ := transformMetrics(t, metrics) + service := model.Service{Name: "unknown", Language: model.Language{Name: "unknown"}} + agent := model.Agent{Name: "otlp", Version: "unknown"} + assert.ElementsMatch(t, []model.APMEvent{{ + Agent: agent, + Service: service, + Timestamp: timestamp, + Processor: model.MetricsetProcessor, + Metricset: &model.Metricset{ + Samples: map[string]model.MetricsetSample{ + "jvm.memory.heap.used": { + Type: "gauge", + Value: 42, + }, + }, + }, + }, { + Agent: agent, + Service: service, + Labels: common.MapStr{"gc": "G1 Young Generation"}, + Timestamp: timestamp, + Processor: model.MetricsetProcessor, + Metricset: &model.Metricset{ + Samples: map[string]model.MetricsetSample{ + "runtime.jvm.gc.time": { + Type: "counter", + Value: 9, + }, + "runtime.jvm.gc.count": { + Type: "counter", + Value: 2, + }, + }, + }, + }, { + Agent: agent, + Service: service, + Labels: common.MapStr{"name": "G1 Young Generation"}, + Timestamp: timestamp, + Processor: model.MetricsetProcessor, + Metricset: &model.Metricset{ + Samples: map[string]model.MetricsetSample{ + "jvm.gc.time": { + Type: "counter", + Value: 9, + }, + "jvm.gc.count": { + Type: "counter", + Value: 2, + }, + }, + }, + }, { + Agent: agent, + Service: service, + Labels: common.MapStr{"area": "heap", "type": "used"}, + Timestamp: timestamp, + Processor: model.MetricsetProcessor, + Metricset: &model.Metricset{ + Samples: map[string]model.MetricsetSample{ + "runtime.jvm.memory.area": { + Type: "gauge", + Value: 42, + }, + }, + }, + }}, events) +} + +func TestConsumeMetricsExportTimestamp(t *testing.T) { + metrics := pdata.NewMetrics() + resourceMetrics := metrics.ResourceMetrics().AppendEmpty() + + // The actual timestamps will be non-deterministic, as they are adjusted + // based on the server's clock. + // + // Use a large delta so that we can allow for a significant amount of + // delay in the test environment affecting the timestamp adjustment. + const timeDelta = time.Hour + const allowedError = 5 // seconds + + now := time.Now() + exportTimestamp := now.Add(-timeDelta) + resourceMetrics.Resource().Attributes().InitFromMap(map[string]pdata.AttributeValue{ + "telemetry.sdk.elastic_export_timestamp": pdata.NewAttributeValueInt(exportTimestamp.UnixNano()), + }) + + // Timestamp relative to the export timestamp. + dataPointOffset := -time.Second + exportedDataPointTimestamp := exportTimestamp.Add(dataPointOffset) + + instrumentationLibraryMetrics := resourceMetrics.InstrumentationLibraryMetrics().AppendEmpty() + metric := instrumentationLibraryMetrics.Metrics().AppendEmpty() + metric.SetName("int_gauge") + metric.SetDataType(pdata.MetricDataTypeGauge) + intGauge := metric.Gauge() + dp := intGauge.DataPoints().AppendEmpty() + dp.SetTimestamp(pdata.TimestampFromTime(exportedDataPointTimestamp)) + dp.SetIntVal(1) + + events, _ := transformMetrics(t, metrics) + require.Len(t, events, 1) + assert.InDelta(t, now.Add(dataPointOffset).Unix(), events[0].Timestamp.Unix(), allowedError) +} + +func TestMetricsLogging(t *testing.T) { + for _, level := range []logp.Level{logp.InfoLevel, logp.DebugLevel} { + t.Run(level.String(), func(t *testing.T) { + logp.DevelopmentSetup(logp.ToObserverOutput(), logp.WithLevel(level)) + transformMetrics(t, pdata.NewMetrics()) + logs := logp.ObserverLogs().TakeAll() + if level == logp.InfoLevel { + assert.Empty(t, logs) + } else { + assert.NotEmpty(t, logs) + } + }) + } +} + +func transformMetrics(t *testing.T, metrics pdata.Metrics) ([]model.APMEvent, otel.ConsumerStats) { + var batches []*model.Batch + recorder := batchRecorderBatchProcessor(&batches) + + consumer := &otel.Consumer{Processor: recorder} + err := consumer.ConsumeMetrics(context.Background(), metrics) + require.NoError(t, err) + require.Len(t, batches, 1) + return *batches[0], consumer.Stats() +} diff --git a/processor/otel/test_approved/consume_empty.approved.json b/processor/otel/test_approved/consume_empty.approved.json deleted file mode 100644 index 81a8aeaa2c3..00000000000 --- a/processor/otel/test_approved/consume_empty.approved.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "events": [] -} diff --git a/processor/otel/test_approved/consume_emptytrace.approved.json b/processor/otel/test_approved/consume_emptytrace.approved.json deleted file mode 100644 index 81a8aeaa2c3..00000000000 --- a/processor/otel/test_approved/consume_emptytrace.approved.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "events": [] -} diff --git a/processor/otel/test_approved/consume_span.approved.json b/processor/otel/test_approved/consume_span.approved.json deleted file mode 100644 index 27532f1aa4c..00000000000 --- a/processor/otel/test_approved/consume_span.approved.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "events": [ - { - "@timestamp": "2019-12-16T12:46:58.000Z", - "agent": { - "name": "Jaeger", - "version": "unknown" - }, - "event": { - "outcome": "success" - }, - "processor": { - "event": "transaction", - "name": "transaction" - }, - "service": { - "language": { - "name": "unknown" - }, - "name": "unknown" - }, - "timestamp": { - "us": 1576500418000768 - }, - "transaction": { - "duration": { - "us": 0 - }, - "id": "", - "result": "Success", - "sampled": true, - "type": "custom" - } - }, - { - "@timestamp": "2019-12-16T12:46:58.000Z", - "agent": { - "name": "Jaeger", - "version": "unknown" - }, - "event": { - "outcome": "unknown" - }, - "parent": { - "id": "46463058" - }, - "processor": { - "event": "span", - "name": "transaction" - }, - "service": { - "language": { - "name": "unknown" - }, - "name": "unknown" - }, - "span": { - "duration": { - "us": 0 - }, - "name": "", - "type": "custom" - }, - "timestamp": { - "us": 1576500418000768 - } - } - ] -} diff --git a/processor/otel/test_approved/jaeger_sampling_rate.approved.json b/processor/otel/test_approved/jaeger_sampling_rate.approved.json index 62dd8bc9954..9c2ecfd13e5 100644 --- a/processor/otel/test_approved/jaeger_sampling_rate.approved.json +++ b/processor/otel/test_approved/jaeger_sampling_rate.approved.json @@ -7,11 +7,10 @@ "version": "unknown" }, "event": { - "outcome": "success" + "outcome": "unknown" }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" }, "processor": { "event": "transaction", @@ -21,10 +20,7 @@ "language": { "name": "unknown" }, - "name": "unknown", - "node": { - "name": "host-abc" - } + "name": "unknown" }, "timestamp": { "us": 1576500418000768 @@ -33,8 +29,6 @@ "duration": { "us": 79000000 }, - "id": "", - "result": "Success", "sampled": true, "type": "custom" } @@ -49,11 +43,10 @@ "outcome": "unknown" }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" }, "parent": { - "id": "01" + "id": "0000000000000001" }, "processor": { "event": "span", @@ -63,20 +56,56 @@ "language": { "name": "unknown" }, - "name": "unknown", - "node": { - "name": "host-abc" - } + "name": "unknown" }, "span": { "duration": { "us": 79000000 }, - "name": "", - "type": "custom" + "type": "app" }, "timestamp": { "us": 1576500418000768 + }, + "trace": { + "id": "00000000000000010000000000000001" + } + }, + { + "@timestamp": "2019-12-16T12:46:58.000Z", + "agent": { + "name": "Jaeger", + "version": "unknown" + }, + "event": { + "outcome": "unknown" + }, + "host": { + "hostname": "host-abc" + }, + "labels": { + "sampler_param": 2, + "sampler_type": "ratelimiting" + }, + "processor": { + "event": "transaction", + "name": "transaction" + }, + "service": { + "language": { + "name": "unknown" + }, + "name": "unknown" + }, + "timestamp": { + "us": 1576500418000768 + }, + "transaction": { + "duration": { + "us": 79000000 + }, + "sampled": true, + "type": "custom" } } ] diff --git a/processor/otel/test_approved/metadata_jaeger-no-language.approved.json b/processor/otel/test_approved/metadata_jaeger-no-language.approved.json index c2b0e86f0ee..b0db5a40175 100644 --- a/processor/otel/test_approved/metadata_jaeger-no-language.approved.json +++ b/processor/otel/test_approved/metadata_jaeger-no-language.approved.json @@ -7,7 +7,7 @@ "version": "3.4.12" }, "event": { - "outcome": "success" + "outcome": "unknown" }, "processor": { "event": "transaction", @@ -23,14 +23,13 @@ "us": 1576500418000768 }, "trace": { - "id": "46467830" + "id": "00000000000000000000000046467830" }, "transaction": { "duration": { "us": 0 }, - "id": "41414646", - "result": "Success", + "id": "0000000041414646", "sampled": true, "type": "custom" } diff --git a/processor/otel/test_approved/metadata_jaeger-version.approved.json b/processor/otel/test_approved/metadata_jaeger-version.approved.json index d3254dc9a11..4827fa32217 100644 --- a/processor/otel/test_approved/metadata_jaeger-version.approved.json +++ b/processor/otel/test_approved/metadata_jaeger-version.approved.json @@ -7,7 +7,7 @@ "version": "3.4.12" }, "event": { - "outcome": "success" + "outcome": "unknown" }, "processor": { "event": "transaction", @@ -23,14 +23,13 @@ "us": 1576500418000768 }, "trace": { - "id": "46467830" + "id": "00000000000000000000000046467830" }, "transaction": { "duration": { "us": 0 }, - "id": "41414646", - "result": "Success", + "id": "0000000041414646", "sampled": true, "type": "custom" } diff --git a/processor/otel/test_approved/metadata_jaeger.approved.json b/processor/otel/test_approved/metadata_jaeger.approved.json index 878f9244434..6d4476d08f7 100644 --- a/processor/otel/test_approved/metadata_jaeger.approved.json +++ b/processor/otel/test_approved/metadata_jaeger.approved.json @@ -8,23 +8,15 @@ "version": "3.2.1" }, "event": { - "outcome": "success" + "outcome": "unknown" }, "host": { "hostname": "host-foo", - "ip": "17.0.10.123", - "name": "host-foo" + "ip": "17.0.10.123" }, "labels": { - "a": "b", - "c": "d", - "e_f": "g", "foo": "bar", - "peer_port": "80", - "resource": "request" - }, - "process": { - "pid": 107892 + "peer_port": "80" }, "processor": { "event": "transaction", @@ -34,23 +26,19 @@ "language": { "name": "C++" }, - "name": "foo", - "node": { - "name": "host-foo" - } + "name": "foo" }, "timestamp": { "us": 1576500418000768 }, "trace": { - "id": "46467830" + "id": "00000000000000000000000046467830" }, "transaction": { "duration": { "us": 0 }, - "id": "41414646", - "result": "Success", + "id": "0000000041414646", "sampled": true, "type": "custom" } diff --git a/processor/otel/test_approved/metadata_jaeger_full-traceid.approved.json b/processor/otel/test_approved/metadata_jaeger_full-traceid.approved.json deleted file mode 100644 index c5c53999fe4..00000000000 --- a/processor/otel/test_approved/metadata_jaeger_full-traceid.approved.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "events": [ - { - "@timestamp": "2019-12-16T12:46:58.000Z", - "agent": { - "name": "Jaeger", - "version": "unknown" - }, - "event": { - "outcome": "success" - }, - "processor": { - "event": "transaction", - "name": "transaction" - }, - "service": { - "language": { - "name": "unknown" - }, - "name": "unknown" - }, - "timestamp": { - "us": 1576500418000768 - }, - "trace": { - "id": "464678300000000046467830" - }, - "transaction": { - "duration": { - "us": 0 - }, - "id": "41414646", - "result": "Success", - "sampled": true, - "type": "custom" - } - } - ] -} diff --git a/processor/otel/test_approved/metadata_jaeger_minimal.approved.json b/processor/otel/test_approved/metadata_jaeger_minimal.approved.json index 23443e1c37e..423a3674c6a 100644 --- a/processor/otel/test_approved/metadata_jaeger_minimal.approved.json +++ b/processor/otel/test_approved/metadata_jaeger_minimal.approved.json @@ -6,6 +6,8 @@ "name": "Jaeger", "version": "unknown" }, + "data_stream.dataset": "apm", + "data_stream.type": "traces", "event": { "outcome": "success" }, diff --git a/processor/otel/test_approved/metadata_minimal.approved.json b/processor/otel/test_approved/metadata_minimal.approved.json deleted file mode 100644 index 34faf081ee8..00000000000 --- a/processor/otel/test_approved/metadata_minimal.approved.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "events": [ - { - "@timestamp": "2019-12-16T12:46:58.000Z", - "agent": { - "name": "Foo", - "version": "unknown" - }, - "event": { - "outcome": "success" - }, - "processor": { - "event": "transaction", - "name": "transaction" - }, - "service": { - "language": { - "name": "unknown" - }, - "name": "unknown" - }, - "timestamp": { - "us": 1576500418000768 - }, - "trace": { - "id": "00000000000000000000000046467830" - }, - "transaction": { - "duration": { - "us": 0 - }, - "id": "0000000041414646", - "result": "Success", - "sampled": true, - "type": "custom" - } - } - ] -} diff --git a/processor/otel/test_approved/span_jaeger_custom.approved.json b/processor/otel/test_approved/span_jaeger_custom.approved.json index 52caa6e6884..333b4f4a282 100644 --- a/processor/otel/test_approved/span_jaeger_custom.approved.json +++ b/processor/otel/test_approved/span_jaeger_custom.approved.json @@ -10,11 +10,10 @@ "outcome": "unknown" }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" }, "parent": { - "id": "61626364" + "id": "0000000058585858" }, "processor": { "event": "span", @@ -24,20 +23,20 @@ "language": { "name": "unknown" }, - "name": "unknown", - "node": { - "name": "host-abc" - } + "name": "unknown" }, "span": { "duration": { - "us": 0 + "us": 79000000 }, - "name": "", - "type": "custom" + "id": "0000000041414646", + "type": "app" }, "timestamp": { "us": 1576500418000768 + }, + "trace": { + "id": "00000000000000000000000046467830" } } ] diff --git a/processor/otel/test_approved/span_jaeger_db.approved.json b/processor/otel/test_approved/span_jaeger_db.approved.json index d09666b5ab8..e578f14ca47 100644 --- a/processor/otel/test_approved/span_jaeger_db.approved.json +++ b/processor/otel/test_approved/span_jaeger_db.approved.json @@ -14,14 +14,13 @@ "outcome": "unknown" }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" }, "labels": { "component": "foo" }, "parent": { - "id": "61626364" + "id": "0000000058585858" }, "processor": { "event": "span", @@ -31,10 +30,7 @@ "language": { "name": "unknown" }, - "name": "unknown", - "node": { - "name": "host-abc" - } + "name": "unknown" }, "span": { "db": { @@ -53,14 +49,17 @@ } }, "duration": { - "us": 0 + "us": 79000000 }, - "name": "", + "id": "0000000041414646", "subtype": "mysql", "type": "db" }, "timestamp": { "us": 1576500418000768 + }, + "trace": { + "id": "00000000000000000000000046467830" } } ] diff --git a/processor/otel/test_approved/span_jaeger_http.approved.json b/processor/otel/test_approved/span_jaeger_http.approved.json index f0730302c8d..84b845800ff 100644 --- a/processor/otel/test_approved/span_jaeger_http.approved.json +++ b/processor/otel/test_approved/span_jaeger_http.approved.json @@ -14,19 +14,25 @@ "outcome": "failure" }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" + }, + "http": { + "request": { + "method": "get" + }, + "response": { + "status_code": 400 + } }, "labels": { "component": "foo", "double_a": 14.65, - "error": true, "hasErrors": true, "int_a": 148, "string_a_b": "some note" }, "parent": { - "id": "58585858" + "id": "0000000058585858" }, "processor": { "event": "span", @@ -36,10 +42,7 @@ "language": { "name": "unknown" }, - "name": "unknown", - "node": { - "name": "host-abc" - } + "name": "unknown" }, "span": { "destination": { @@ -56,12 +59,10 @@ "method": "get", "response": { "status_code": 400 - }, - "url": { - "original": "http://foo.bar.com?a=12" } }, - "id": "41414646", + "http.url.original": "http://foo.bar.com?a=12", + "id": "0000000041414646", "name": "HTTP GET", "subtype": "http", "type": "external" @@ -70,7 +71,10 @@ "us": 1576500418000768 }, "trace": { - "id": "46467830" + "id": "00000000000000000000000046467830" + }, + "url": { + "original": "http://foo.bar.com?a=12" } }, { @@ -85,14 +89,12 @@ "message": "no connection established" } ], - "grouping_key": "c9221918248f05433f6b81c46a666aee", "log": { "message": "retrying connection" } }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" }, "http": { "request": { @@ -103,7 +105,7 @@ } }, "parent": { - "id": "41414646" + "id": "0000000041414646" }, "processor": { "event": "error", @@ -113,23 +115,16 @@ "language": { "name": "unknown" }, - "name": "unknown", - "node": { - "name": "host-abc" - } + "name": "unknown" }, "timestamp": { "us": 1576500418000768 }, "trace": { - "id": "46467830" + "id": "00000000000000000000000046467830" }, "url": { - "domain": "foo.bar.com", - "full": "http://foo.bar.com?a=12", - "original": "http://foo.bar.com?a=12", - "query": "a=12", - "scheme": "http" + "original": "http://foo.bar.com?a=12" } }, { @@ -139,14 +134,12 @@ "version": "unknown" }, "error": { - "grouping_key": "23b7ac1bdf1ca957f9f581cfadee467c", "log": { "message": "nullPointer exception" } }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" }, "http": { "request": { @@ -157,7 +150,7 @@ } }, "parent": { - "id": "41414646" + "id": "0000000041414646" }, "processor": { "event": "error", @@ -167,23 +160,16 @@ "language": { "name": "unknown" }, - "name": "unknown", - "node": { - "name": "host-abc" - } + "name": "unknown" }, "timestamp": { "us": 1576500418000768 }, "trace": { - "id": "46467830" + "id": "00000000000000000000000046467830" }, "url": { - "domain": "foo.bar.com", - "full": "http://foo.bar.com?a=12", - "original": "http://foo.bar.com?a=12", - "query": "a=12", - "scheme": "http" + "original": "http://foo.bar.com?a=12" } }, { @@ -197,12 +183,10 @@ { "message": "no connection established" } - ], - "grouping_key": "c9221918248f05433f6b81c46a666aee" + ] }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" }, "http": { "request": { @@ -213,7 +197,7 @@ } }, "parent": { - "id": "41414646" + "id": "0000000041414646" }, "processor": { "event": "error", @@ -223,23 +207,16 @@ "language": { "name": "unknown" }, - "name": "unknown", - "node": { - "name": "host-abc" - } + "name": "unknown" }, "timestamp": { "us": 1576500418000768 }, "trace": { - "id": "46467830" + "id": "00000000000000000000000046467830" }, "url": { - "domain": "foo.bar.com", - "full": "http://foo.bar.com?a=12", - "original": "http://foo.bar.com?a=12", - "query": "a=12", - "scheme": "http" + "original": "http://foo.bar.com?a=12" } }, { @@ -253,12 +230,10 @@ { "message": "no connection established" } - ], - "grouping_key": "c9221918248f05433f6b81c46a666aee" + ] }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" }, "http": { "request": { @@ -269,7 +244,7 @@ } }, "parent": { - "id": "41414646" + "id": "0000000041414646" }, "processor": { "event": "error", @@ -279,23 +254,16 @@ "language": { "name": "unknown" }, - "name": "unknown", - "node": { - "name": "host-abc" - } + "name": "unknown" }, "timestamp": { "us": 1576500418000768 }, "trace": { - "id": "46467830" + "id": "00000000000000000000000046467830" }, "url": { - "domain": "foo.bar.com", - "full": "http://foo.bar.com?a=12", - "original": "http://foo.bar.com?a=12", - "query": "a=12", - "scheme": "http" + "original": "http://foo.bar.com?a=12" } }, { @@ -309,12 +277,10 @@ { "type": "DBClosedException" } - ], - "grouping_key": "b0cf243ae3f66aa9e6bbed022417d274" + ] }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" }, "http": { "request": { @@ -325,7 +291,7 @@ } }, "parent": { - "id": "41414646" + "id": "0000000041414646" }, "processor": { "event": "error", @@ -335,23 +301,16 @@ "language": { "name": "unknown" }, - "name": "unknown", - "node": { - "name": "host-abc" - } + "name": "unknown" }, "timestamp": { "us": 1576500418000768 }, "trace": { - "id": "46467830" + "id": "00000000000000000000000046467830" }, "url": { - "domain": "foo.bar.com", - "full": "http://foo.bar.com?a=12", - "original": "http://foo.bar.com?a=12", - "query": "a=12", - "scheme": "http" + "original": "http://foo.bar.com?a=12" } }, { @@ -361,14 +320,12 @@ "version": "unknown" }, "error": { - "grouping_key": "c9221918248f05433f6b81c46a666aee", "log": { "message": "no connection established" } }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" }, "http": { "request": { @@ -379,7 +336,7 @@ } }, "parent": { - "id": "41414646" + "id": "0000000041414646" }, "processor": { "event": "error", @@ -389,23 +346,141 @@ "language": { "name": "unknown" }, - "name": "unknown", - "node": { - "name": "host-abc" - } + "name": "unknown" }, "timestamp": { "us": 1576500418000768 }, "trace": { - "id": "46467830" + "id": "00000000000000000000000046467830" + }, + "url": { + "original": "http://foo.bar.com?a=12" + } + }, + { + "@timestamp": "2019-12-16T12:46:58.000Z", + "agent": { + "name": "Jaeger", + "version": "unknown" + }, + "data_stream.type": "logs", + "host": { + "hostname": "host-abc" + }, + "http": { + "request": { + "method": "get" + }, + "response": { + "status_code": 400 + } + }, + "labels": { + "event": "baggage", + "isValid": false + }, + "parent": { + "id": "0000000058585858" + }, + "processor": { + "event": "log", + "name": "log" + }, + "service": { + "language": { + "name": "unknown" + }, + "name": "unknown" + }, + "trace": { + "id": "00000000000000000000000046467830" + }, + "url": { + "original": "http://foo.bar.com?a=12" + } + }, + { + "@timestamp": "2019-12-16T12:46:58.000Z", + "agent": { + "name": "Jaeger", + "version": "unknown" + }, + "data_stream.type": "logs", + "host": { + "hostname": "host-abc" + }, + "http": { + "request": { + "method": "get" + }, + "response": { + "status_code": 400 + } + }, + "labels": { + "level": "info" + }, + "message": "retrying connection", + "parent": { + "id": "0000000058585858" + }, + "processor": { + "event": "log", + "name": "log" + }, + "service": { + "language": { + "name": "unknown" + }, + "name": "unknown" + }, + "trace": { + "id": "00000000000000000000000046467830" + }, + "url": { + "original": "http://foo.bar.com?a=12" + } + }, + { + "@timestamp": "2019-12-16T12:46:58.000Z", + "agent": { + "name": "Jaeger", + "version": "unknown" + }, + "data_stream.type": "logs", + "host": { + "hostname": "host-abc" + }, + "http": { + "request": { + "method": "get" + }, + "response": { + "status_code": 400 + } + }, + "labels": { + "level": "error" + }, + "parent": { + "id": "0000000058585858" + }, + "processor": { + "event": "log", + "name": "log" + }, + "service": { + "language": { + "name": "unknown" + }, + "name": "unknown" + }, + "trace": { + "id": "00000000000000000000000046467830" }, "url": { - "domain": "foo.bar.com", - "full": "http://foo.bar.com?a=12", - "original": "http://foo.bar.com?a=12", - "query": "a=12", - "scheme": "http" + "original": "http://foo.bar.com?a=12" } } ] diff --git a/processor/otel/test_approved/span_jaeger_http_status_code.approved.json b/processor/otel/test_approved/span_jaeger_http_status_code.approved.json index b0eaa635f99..0c4efe5ab9d 100644 --- a/processor/otel/test_approved/span_jaeger_http_status_code.approved.json +++ b/processor/otel/test_approved/span_jaeger_http_status_code.approved.json @@ -14,11 +14,18 @@ "outcome": "success" }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" + }, + "http": { + "request": { + "method": "get" + }, + "response": { + "status_code": 202 + } }, "parent": { - "id": "58585858" + "id": "0000000058585858" }, "processor": { "event": "span", @@ -28,10 +35,7 @@ "language": { "name": "unknown" }, - "name": "unknown", - "node": { - "name": "host-abc" - } + "name": "unknown" }, "span": { "destination": { @@ -48,12 +52,10 @@ "method": "get", "response": { "status_code": 202 - }, - "url": { - "original": "http://foo.bar.com?a=12" } }, - "id": "41414646", + "http.url.original": "http://foo.bar.com?a=12", + "id": "0000000041414646", "name": "HTTP GET", "subtype": "http", "type": "external" @@ -62,7 +64,10 @@ "us": 1576500418000768 }, "trace": { - "id": "46467830" + "id": "00000000000000000000000046467830" + }, + "url": { + "original": "http://foo.bar.com?a=12" } } ] diff --git a/processor/otel/test_approved/span_jaeger_https_default_port.approved.json b/processor/otel/test_approved/span_jaeger_https_default_port.approved.json index 118982f1120..cba479e9272 100644 --- a/processor/otel/test_approved/span_jaeger_https_default_port.approved.json +++ b/processor/otel/test_approved/span_jaeger_https_default_port.approved.json @@ -14,11 +14,10 @@ "outcome": "unknown" }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" }, "parent": { - "id": "58585858" + "id": "0000000058585858" }, "processor": { "event": "span", @@ -28,10 +27,7 @@ "language": { "name": "unknown" }, - "name": "unknown", - "node": { - "name": "host-abc" - } + "name": "unknown" }, "span": { "destination": { @@ -44,12 +40,8 @@ "duration": { "us": 79000000 }, - "http": { - "url": { - "original": "https://foo.bar.com:443?a=12" - } - }, - "id": "41414646", + "http.url.original": "https://foo.bar.com:443?a=12", + "id": "0000000041414646", "name": "HTTPS GET", "subtype": "http", "type": "external" @@ -58,7 +50,10 @@ "us": 1576500418000768 }, "trace": { - "id": "46467830" + "id": "00000000000000000000000046467830" + }, + "url": { + "original": "https://foo.bar.com:443?a=12" } } ] diff --git a/processor/otel/test_approved/span_jaeger_messaging.approved.json b/processor/otel/test_approved/span_jaeger_messaging.approved.json index 09358952df0..e7944ec07e5 100644 --- a/processor/otel/test_approved/span_jaeger_messaging.approved.json +++ b/processor/otel/test_approved/span_jaeger_messaging.approved.json @@ -14,11 +14,10 @@ "outcome": "unknown" }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" }, "parent": { - "id": "58585858" + "id": "0000000058585858" }, "processor": { "event": "span", @@ -28,16 +27,13 @@ "language": { "name": "unknown" }, - "name": "unknown", - "node": { - "name": "host-abc" - } + "name": "unknown" }, "span": { "duration": { "us": 79000000 }, - "id": "41414646", + "id": "0000000041414646", "message": { "queue": { "name": "queue-abc" @@ -50,7 +46,7 @@ "us": 1576500418000768 }, "trace": { - "id": "46467830" + "id": "00000000000000000000000046467830" } } ] diff --git a/processor/otel/test_approved/span_jaeger_subtype_component.approved.json b/processor/otel/test_approved/span_jaeger_subtype_component.approved.json new file mode 100644 index 00000000000..c850e2e27b6 --- /dev/null +++ b/processor/otel/test_approved/span_jaeger_subtype_component.approved.json @@ -0,0 +1,47 @@ +{ + "events": [ + { + "@timestamp": "2019-12-16T12:46:58.000Z", + "agent": { + "name": "Jaeger", + "version": "unknown" + }, + "event": { + "outcome": "unknown" + }, + "host": { + "hostname": "host-abc" + }, + "labels": { + "component": "whatever" + }, + "parent": { + "id": "0000000058585858" + }, + "processor": { + "event": "span", + "name": "transaction" + }, + "service": { + "language": { + "name": "unknown" + }, + "name": "unknown" + }, + "span": { + "duration": { + "us": 79000000 + }, + "id": "0000000041414646", + "subtype": "whatever", + "type": "app" + }, + "timestamp": { + "us": 1576500418000768 + }, + "trace": { + "id": "00000000000000000000000046467830" + } + } + ] +} diff --git a/processor/otel/test_approved/transaction_jaeger_custom.approved.json b/processor/otel/test_approved/transaction_jaeger_custom.approved.json index 7146dcf5257..bd3a378e4e1 100644 --- a/processor/otel/test_approved/transaction_jaeger_custom.approved.json +++ b/processor/otel/test_approved/transaction_jaeger_custom.approved.json @@ -1,13 +1,16 @@ { "events": [ { - "@timestamp": "0001-01-01T00:00:00.000Z", + "@timestamp": "2019-12-16T12:46:58.000Z", "agent": { "name": "Jaeger", "version": "unknown" }, "event": { - "outcome": "success" + "outcome": "unknown" + }, + "host": { + "hostname": "host-abc" }, "labels": { "a_b": "foo" @@ -22,12 +25,13 @@ }, "name": "unknown" }, + "timestamp": { + "us": 1576500418000768 + }, "transaction": { "duration": { "us": 0 }, - "id": "", - "result": "Success", "sampled": true, "type": "custom" } diff --git a/processor/otel/test_approved/transaction_jaeger_full.approved.json b/processor/otel/test_approved/transaction_jaeger_full.approved.json index d99eeeb0ba5..225ad2316df 100644 --- a/processor/otel/test_approved/transaction_jaeger_full.approved.json +++ b/processor/otel/test_approved/transaction_jaeger_full.approved.json @@ -7,11 +7,10 @@ "version": "unknown" }, "event": { - "outcome": "success" + "outcome": "failure" }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" }, "http": { "request": { @@ -26,7 +25,6 @@ "bool_a": true, "component": "foo", "double_a": 14.65, - "error": true, "int_a": 148, "string_a_b": "some note" }, @@ -39,22 +37,19 @@ "name": "unknown" }, "name": "unknown", - "node": { - "name": "host-abc" - }, "version": "1.0" }, "timestamp": { "us": 1576500418000768 }, "trace": { - "id": "46467830" + "id": "00000000000000000000000046467830" }, "transaction": { "duration": { "us": 79000000 }, - "id": "41414646", + "id": "0000000041414646", "name": "HTTP GET", "result": "HTTP 4xx", "sampled": true, @@ -80,14 +75,12 @@ "message": "no connection established" } ], - "grouping_key": "c9221918248f05433f6b81c46a666aee", "log": { "message": "retrying connection" } }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" }, "http": { "request": { @@ -99,7 +92,7 @@ "version": "1.1" }, "parent": { - "id": "41414646" + "id": "0000000041414646" }, "processor": { "event": "error", @@ -110,19 +103,17 @@ "name": "unknown" }, "name": "unknown", - "node": { - "name": "host-abc" - }, "version": "1.0" }, "timestamp": { "us": 1576500418000768 }, "trace": { - "id": "46467830" + "id": "00000000000000000000000046467830" }, "transaction": { - "id": "41414646", + "id": "0000000041414646", + "sampled": true, "type": "http_request" }, "url": { @@ -140,14 +131,12 @@ "version": "unknown" }, "error": { - "grouping_key": "23b7ac1bdf1ca957f9f581cfadee467c", "log": { "message": "nullPointer exception" } }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" }, "http": { "request": { @@ -159,7 +148,7 @@ "version": "1.1" }, "parent": { - "id": "41414646" + "id": "0000000041414646" }, "processor": { "event": "error", @@ -170,19 +159,17 @@ "name": "unknown" }, "name": "unknown", - "node": { - "name": "host-abc" - }, "version": "1.0" }, "timestamp": { "us": 1576500418000768 }, "trace": { - "id": "46467830" + "id": "00000000000000000000000046467830" }, "transaction": { - "id": "41414646", + "id": "0000000041414646", + "sampled": true, "type": "http_request" }, "url": { @@ -204,12 +191,10 @@ { "message": "no connection established" } - ], - "grouping_key": "c9221918248f05433f6b81c46a666aee" + ] }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" }, "http": { "request": { @@ -221,7 +206,7 @@ "version": "1.1" }, "parent": { - "id": "41414646" + "id": "0000000041414646" }, "processor": { "event": "error", @@ -232,19 +217,17 @@ "name": "unknown" }, "name": "unknown", - "node": { - "name": "host-abc" - }, "version": "1.0" }, "timestamp": { "us": 1576500418000768 }, "trace": { - "id": "46467830" + "id": "00000000000000000000000046467830" }, "transaction": { - "id": "41414646", + "id": "0000000041414646", + "sampled": true, "type": "http_request" }, "url": { @@ -266,12 +249,10 @@ { "message": "no connection established" } - ], - "grouping_key": "c9221918248f05433f6b81c46a666aee" + ] }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" }, "http": { "request": { @@ -283,7 +264,7 @@ "version": "1.1" }, "parent": { - "id": "41414646" + "id": "0000000041414646" }, "processor": { "event": "error", @@ -294,19 +275,17 @@ "name": "unknown" }, "name": "unknown", - "node": { - "name": "host-abc" - }, "version": "1.0" }, "timestamp": { "us": 1576500418000768 }, "trace": { - "id": "46467830" + "id": "00000000000000000000000046467830" }, "transaction": { - "id": "41414646", + "id": "0000000041414646", + "sampled": true, "type": "http_request" }, "url": { @@ -328,12 +307,10 @@ { "type": "DBClosedException" } - ], - "grouping_key": "b0cf243ae3f66aa9e6bbed022417d274" + ] }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" }, "http": { "request": { @@ -345,7 +322,7 @@ "version": "1.1" }, "parent": { - "id": "41414646" + "id": "0000000041414646" }, "processor": { "event": "error", @@ -356,19 +333,17 @@ "name": "unknown" }, "name": "unknown", - "node": { - "name": "host-abc" - }, "version": "1.0" }, "timestamp": { "us": 1576500418000768 }, "trace": { - "id": "46467830" + "id": "00000000000000000000000046467830" }, "transaction": { - "id": "41414646", + "id": "0000000041414646", + "sampled": true, "type": "http_request" }, "url": { @@ -386,14 +361,12 @@ "version": "unknown" }, "error": { - "grouping_key": "c9221918248f05433f6b81c46a666aee", "log": { "message": "no connection established" } }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" }, "http": { "request": { @@ -405,7 +378,7 @@ "version": "1.1" }, "parent": { - "id": "41414646" + "id": "0000000041414646" }, "processor": { "event": "error", @@ -416,19 +389,17 @@ "name": "unknown" }, "name": "unknown", - "node": { - "name": "host-abc" - }, "version": "1.0" }, "timestamp": { "us": 1576500418000768 }, "trace": { - "id": "46467830" + "id": "00000000000000000000000046467830" }, "transaction": { - "id": "41414646", + "id": "0000000041414646", + "sampled": true, "type": "http_request" }, "url": { @@ -438,6 +409,140 @@ "query": "a=12", "scheme": "http" } + }, + { + "@timestamp": "2019-12-16T12:46:58.000Z", + "agent": { + "name": "Jaeger", + "version": "unknown" + }, + "data_stream.type": "logs", + "host": { + "hostname": "host-abc" + }, + "http": { + "request": { + "method": "get" + }, + "response": { + "status_code": 400 + }, + "version": "1.1" + }, + "labels": { + "event": "baggage", + "isValid": false + }, + "processor": { + "event": "log", + "name": "log" + }, + "service": { + "language": { + "name": "unknown" + }, + "name": "unknown", + "version": "1.0" + }, + "trace": { + "id": "00000000000000000000000046467830" + }, + "url": { + "domain": "foo.bar.com", + "full": "http://foo.bar.com?a=12", + "original": "http://foo.bar.com?a=12", + "query": "a=12", + "scheme": "http" + } + }, + { + "@timestamp": "2019-12-16T12:46:58.000Z", + "agent": { + "name": "Jaeger", + "version": "unknown" + }, + "data_stream.type": "logs", + "host": { + "hostname": "host-abc" + }, + "http": { + "request": { + "method": "get" + }, + "response": { + "status_code": 400 + }, + "version": "1.1" + }, + "labels": { + "level": "info" + }, + "message": "retrying connection", + "processor": { + "event": "log", + "name": "log" + }, + "service": { + "language": { + "name": "unknown" + }, + "name": "unknown", + "version": "1.0" + }, + "trace": { + "id": "00000000000000000000000046467830" + }, + "url": { + "domain": "foo.bar.com", + "full": "http://foo.bar.com?a=12", + "original": "http://foo.bar.com?a=12", + "query": "a=12", + "scheme": "http" + } + }, + { + "@timestamp": "2019-12-16T12:46:58.000Z", + "agent": { + "name": "Jaeger", + "version": "unknown" + }, + "data_stream.type": "logs", + "host": { + "hostname": "host-abc" + }, + "http": { + "request": { + "method": "get" + }, + "response": { + "status_code": 400 + }, + "version": "1.1" + }, + "labels": { + "level": "error" + }, + "processor": { + "event": "log", + "name": "log" + }, + "service": { + "language": { + "name": "unknown" + }, + "name": "unknown", + "version": "1.0" + }, + "trace": { + "id": "00000000000000000000000046467830" + }, + "url": { + "domain": "foo.bar.com", + "full": "http://foo.bar.com?a=12", + "original": "http://foo.bar.com?a=12", + "query": "a=12", + "scheme": "http" + } } ] } diff --git a/processor/otel/test_approved/transaction_jaeger_no_attrs.approved.json b/processor/otel/test_approved/transaction_jaeger_no_attrs.approved.json index b25646f7724..5ef68402367 100644 --- a/processor/otel/test_approved/transaction_jaeger_no_attrs.approved.json +++ b/processor/otel/test_approved/transaction_jaeger_no_attrs.approved.json @@ -10,11 +10,7 @@ "outcome": "failure" }, "host": { - "hostname": "host-abc", - "name": "host-abc" - }, - "labels": { - "error": true + "hostname": "host-abc" }, "processor": { "event": "transaction", @@ -24,10 +20,7 @@ "language": { "name": "unknown" }, - "name": "unknown", - "node": { - "name": "host-abc" - } + "name": "unknown" }, "timestamp": { "us": 1576500418000768 @@ -36,7 +29,6 @@ "duration": { "us": 79000000 }, - "id": "", "result": "Error", "sampled": true, "type": "custom" diff --git a/processor/otel/test_approved/transaction_jaeger_type_component.approved.json b/processor/otel/test_approved/transaction_jaeger_type_component.approved.json index 1a54e23acd3..8b034c4ea24 100644 --- a/processor/otel/test_approved/transaction_jaeger_type_component.approved.json +++ b/processor/otel/test_approved/transaction_jaeger_type_component.approved.json @@ -1,17 +1,16 @@ { "events": [ { - "@timestamp": "0001-01-01T00:00:00.000Z", + "@timestamp": "2019-12-16T12:46:58.000Z", "agent": { "name": "Jaeger", "version": "unknown" }, "event": { - "outcome": "success" + "outcome": "unknown" }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" }, "labels": { "component": "amqp" @@ -24,17 +23,15 @@ "language": { "name": "unknown" }, - "name": "unknown", - "node": { - "name": "host-abc" - } + "name": "unknown" + }, + "timestamp": { + "us": 1576500418000768 }, "transaction": { "duration": { "us": 0 }, - "id": "", - "result": "Success", "sampled": true, "type": "amqp" } diff --git a/processor/otel/test_approved/transaction_jaeger_type_messaging.approved.json b/processor/otel/test_approved/transaction_jaeger_type_messaging.approved.json index bcb21fb6051..a60b0db966f 100644 --- a/processor/otel/test_approved/transaction_jaeger_type_messaging.approved.json +++ b/processor/otel/test_approved/transaction_jaeger_type_messaging.approved.json @@ -7,14 +7,13 @@ "version": "unknown" }, "event": { - "outcome": "success" + "outcome": "unknown" }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" }, "parent": { - "id": "61626364" + "id": "0000000061626364" }, "processor": { "event": "transaction", @@ -24,10 +23,7 @@ "language": { "name": "unknown" }, - "name": "unknown", - "node": { - "name": "host-abc" - } + "name": "unknown" }, "timestamp": { "us": 1576500418000768 @@ -36,13 +32,11 @@ "duration": { "us": 0 }, - "id": "", "message": { "queue": { "name": "queue-abc" } }, - "result": "Success", "sampled": true, "type": "messaging" } diff --git a/processor/otel/test_approved/transaction_jaeger_type_request.approved.json b/processor/otel/test_approved/transaction_jaeger_type_request.approved.json index 649a4bbc419..678dfef8d2a 100644 --- a/processor/otel/test_approved/transaction_jaeger_type_request.approved.json +++ b/processor/otel/test_approved/transaction_jaeger_type_request.approved.json @@ -10,8 +10,7 @@ "outcome": "failure" }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" }, "http": { "response": { @@ -22,7 +21,7 @@ "http_protocol": "HTTP" }, "parent": { - "id": "61626364" + "id": "0000000061626364" }, "processor": { "event": "transaction", @@ -32,10 +31,7 @@ "language": { "name": "unknown" }, - "name": "unknown", - "node": { - "name": "host-abc" - } + "name": "unknown" }, "timestamp": { "us": 1576500418000768 @@ -44,7 +40,6 @@ "duration": { "us": 0 }, - "id": "", "result": "HTTP 5xx", "sampled": true, "type": "request" diff --git a/processor/otel/test_approved/transaction_jaeger_type_request_result.approved.json b/processor/otel/test_approved/transaction_jaeger_type_request_result.approved.json index 965c35a5587..371c4258575 100644 --- a/processor/otel/test_approved/transaction_jaeger_type_request_result.approved.json +++ b/processor/otel/test_approved/transaction_jaeger_type_request_result.approved.json @@ -10,8 +10,7 @@ "outcome": "success" }, "host": { - "hostname": "host-abc", - "name": "host-abc" + "hostname": "host-abc" }, "http": { "response": { @@ -19,7 +18,7 @@ } }, "parent": { - "id": "61626364" + "id": "0000000061626364" }, "processor": { "event": "transaction", @@ -29,10 +28,7 @@ "language": { "name": "unknown" }, - "name": "unknown", - "node": { - "name": "host-abc" - } + "name": "unknown" }, "timestamp": { "us": 1576500418000768 @@ -41,7 +37,6 @@ "duration": { "us": 0 }, - "id": "", "result": "HTTP 2xx", "sampled": true, "type": "request" diff --git a/processor/otel/timestamps.go b/processor/otel/timestamps.go new file mode 100644 index 00000000000..17f0403f329 --- /dev/null +++ b/processor/otel/timestamps.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package otel + +import ( + "time" + + "go.opentelemetry.io/collector/model/pdata" +) + +// exportTimestamp extracts the `telemetry.sdk.elastic_export_timestamp` +// resource attribute as a timestamp, and returns a boolean indicating +// whether the attribute was found. +func exportTimestamp(resource pdata.Resource) (time.Time, bool) { + attr, ok := resource.Attributes().Get("telemetry.sdk.elastic_export_timestamp") + if !ok { + return time.Time{}, false + } + nsec := attr.IntVal() + return time.Unix(0, nsec), nsec > 0 +} diff --git a/processor/otel/traces.go b/processor/otel/traces.go new file mode 100644 index 00000000000..48894670bd0 --- /dev/null +++ b/processor/otel/traces.go @@ -0,0 +1,1020 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Portions copied from OpenTelemetry Collector (contrib), from the +// elastic exporter. +// +// Copyright 2020, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel + +import ( + "context" + "fmt" + "net" + "net/url" + "strconv" + "strings" + "sync/atomic" + "time" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/model/otlp" + "go.opentelemetry.io/collector/model/pdata" + semconv "go.opentelemetry.io/collector/model/semconv/v1.5.0" + "google.golang.org/grpc/codes" + + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/logp" + + "github.com/elastic/apm-server/datastreams" + logs "github.com/elastic/apm-server/log" + "github.com/elastic/apm-server/model" +) + +const ( + keywordLength = 1024 + dot = "." + underscore = "_" + + outcomeSuccess = "success" + outcomeFailure = "failure" + outcomeUnknown = "unknown" + + // TODO: handle net.host.connection.subtype, which will + // require adding a new field to the model as well. + + attributeNetworkConnectionType = "net.host.connection.type" + attributeNetworkConnectionSubtype = "net.host.connection.subtype" + attributeNetworkMCC = "net.host.carrier.mcc" + attributeNetworkMNC = "net.host.carrier.mnc" + attributeNetworkCarrierName = "net.host.carrier.name" + attributeNetworkICC = "net.host.carrier.icc" +) + +var ( + jsonTracesMarshaler = otlp.NewJSONTracesMarshaler() + jsonMetricsMarshaler = otlp.NewJSONMetricsMarshaler() +) + +// Consumer transforms open-telemetry data to be compatible with elastic APM data +type Consumer struct { + stats consumerStats + + Processor model.BatchProcessor +} + +// ConsumerStats holds a snapshot of statistics about data consumption. +type ConsumerStats struct { + // UnsupportedMetricsDropped records the number of unsupported metrics + // that have been dropped by the consumer. + UnsupportedMetricsDropped int64 +} + +// consumerStats holds the current statistics, which must be accessed and +// modified using atomic operations. +type consumerStats struct { + unsupportedMetricsDropped int64 +} + +// Stats returns a snapshot of the current statistics about data consumption. +func (c *Consumer) Stats() ConsumerStats { + return ConsumerStats{ + UnsupportedMetricsDropped: atomic.LoadInt64(&c.stats.unsupportedMetricsDropped), + } +} + +// Capabilities is part of the consumer interfaces. +func (c *Consumer) Capabilities() consumer.Capabilities { + return consumer.Capabilities{ + MutatesData: false, + } +} + +// ConsumeTraces consumes OpenTelemetry trace data, +// converting into Elastic APM events and reporting to the Elastic APM schema. +func (c *Consumer) ConsumeTraces(ctx context.Context, traces pdata.Traces) error { + receiveTimestamp := time.Now() + logger := logp.NewLogger(logs.Otel) + if logger.IsDebug() { + data, err := jsonTracesMarshaler.MarshalTraces(traces) + if err != nil { + logger.Debug(err) + } else { + logger.Debug(string(data)) + } + } + batch := c.convert(traces, receiveTimestamp, logger) + return c.Processor.ProcessBatch(ctx, batch) +} + +func (c *Consumer) convert(td pdata.Traces, receiveTimestamp time.Time, logger *logp.Logger) *model.Batch { + batch := model.Batch{} + resourceSpans := td.ResourceSpans() + for i := 0; i < resourceSpans.Len(); i++ { + c.convertResourceSpans(resourceSpans.At(i), receiveTimestamp, logger, &batch) + } + return &batch +} + +func (c *Consumer) convertResourceSpans( + resourceSpans pdata.ResourceSpans, + receiveTimestamp time.Time, + logger *logp.Logger, + out *model.Batch, +) { + var baseEvent model.APMEvent + var timeDelta time.Duration + resource := resourceSpans.Resource() + translateResourceMetadata(resource, &baseEvent) + if exportTimestamp, ok := exportTimestamp(resource); ok { + timeDelta = receiveTimestamp.Sub(exportTimestamp) + } + instrumentationLibrarySpans := resourceSpans.InstrumentationLibrarySpans() + for i := 0; i < instrumentationLibrarySpans.Len(); i++ { + c.convertInstrumentationLibrarySpans( + instrumentationLibrarySpans.At(i), baseEvent, timeDelta, logger, out, + ) + } +} + +func (c *Consumer) convertInstrumentationLibrarySpans( + in pdata.InstrumentationLibrarySpans, + baseEvent model.APMEvent, + timeDelta time.Duration, + logger *logp.Logger, + out *model.Batch, +) { + otelSpans := in.Spans() + for i := 0; i < otelSpans.Len(); i++ { + c.convertSpan(otelSpans.At(i), in.InstrumentationLibrary(), baseEvent, timeDelta, logger, out) + } +} + +func (c *Consumer) convertSpan( + otelSpan pdata.Span, + otelLibrary pdata.InstrumentationLibrary, + baseEvent model.APMEvent, + timeDelta time.Duration, + logger *logp.Logger, + out *model.Batch, +) { + root := otelSpan.ParentSpanID().IsEmpty() + var parentID string + if !root { + parentID = otelSpan.ParentSpanID().HexString() + } + + startTime := otelSpan.StartTimestamp().AsTime() + endTime := otelSpan.EndTimestamp().AsTime() + duration := endTime.Sub(startTime) + + // Message consumption results in either a transaction or a span based + // on whether the consumption is active or passive. Otel spans + // currently do not have the metadata to make this distinction. For + // now, we assume that the majority of consumption is passive, and + // therefore start a transaction whenever span kind == consumer. + name := otelSpan.Name() + spanID := otelSpan.SpanID().HexString() + event := baseEvent + event.Labels = initEventLabels(event.Labels) + event.Timestamp = startTime.Add(timeDelta) + event.Trace.ID = otelSpan.TraceID().HexString() + event.Event.Duration = duration + event.Event.Outcome = spanStatusOutcome(otelSpan.Status()) + event.Parent.ID = parentID + if root || otelSpan.Kind() == pdata.SpanKindServer || otelSpan.Kind() == pdata.SpanKindConsumer { + event.Processor = model.TransactionProcessor + event.Transaction = &model.Transaction{ + ID: spanID, + Name: name, + Sampled: true, + } + translateTransaction(otelSpan, otelLibrary, &event) + } else { + event.Processor = model.SpanProcessor + event.Span = &model.Span{ + ID: spanID, + Name: name, + } + translateSpan(otelSpan, &event) + } + if len(event.Labels) == 0 { + event.Labels = nil + } + *out = append(*out, event) + + events := otelSpan.Events() + event.Labels = baseEvent.Labels // only copy common labels to span events + event.Event.Outcome = "" // don't set event.outcome for span events + event.Destination = model.Destination{} // don't set destination for span events + for i := 0; i < events.Len(); i++ { + *out = append(*out, convertSpanEvent(logger, events.At(i), event, timeDelta)) + } +} + +func translateTransaction( + span pdata.Span, + library pdata.InstrumentationLibrary, + event *model.APMEvent, +) { + isJaeger := strings.HasPrefix(event.Agent.Name, "Jaeger") + + var ( + netHostName string + netHostPort int + ) + + var ( + isHTTP bool + httpScheme string + httpURL string + httpServerName string + httpHost string + http model.HTTP + httpRequest model.HTTPRequest + httpResponse model.HTTPResponse + ) + + var ( + isMessaging bool + message model.Message + ) + + var component string + var samplerType, samplerParam pdata.AttributeValue + span.Attributes().Range(func(kDots string, v pdata.AttributeValue) bool { + if isJaeger { + switch kDots { + case "sampler.type": + samplerType = v + return true + case "sampler.param": + samplerParam = v + return true + } + } + + k := replaceDots(kDots) + switch v.Type() { + case pdata.AttributeValueTypeArray: + event.Labels[k] = ifaceAnyValueArray(v.ArrayVal()) + case pdata.AttributeValueTypeBool: + event.Labels[k] = v.BoolVal() + case pdata.AttributeValueTypeDouble: + event.Labels[k] = v.DoubleVal() + case pdata.AttributeValueTypeInt: + switch kDots { + case semconv.AttributeHTTPStatusCode: + isHTTP = true + httpResponse.StatusCode = int(v.IntVal()) + http.Response = &httpResponse + case semconv.AttributeNetPeerPort: + event.Source.Port = int(v.IntVal()) + case semconv.AttributeNetHostPort: + netHostPort = int(v.IntVal()) + case "rpc.grpc.status_code": + event.Transaction.Result = codes.Code(v.IntVal()).String() + default: + event.Labels[k] = v.IntVal() + } + case pdata.AttributeValueTypeString: + stringval := truncate(v.StringVal()) + switch kDots { + // http.* + case semconv.AttributeHTTPMethod: + isHTTP = true + httpRequest.Method = stringval + http.Request = &httpRequest + case semconv.AttributeHTTPURL, semconv.AttributeHTTPTarget, "http.path": + isHTTP = true + httpURL = stringval + case semconv.AttributeHTTPHost: + isHTTP = true + httpHost = stringval + case semconv.AttributeHTTPScheme: + isHTTP = true + httpScheme = stringval + case semconv.AttributeHTTPStatusCode: + if intv, err := strconv.Atoi(stringval); err == nil { + isHTTP = true + httpResponse.StatusCode = intv + http.Response = &httpResponse + } + case "http.protocol": + if !strings.HasPrefix(stringval, "HTTP/") { + // Unexpected, store in labels for debugging. + event.Labels[k] = stringval + break + } + stringval = strings.TrimPrefix(stringval, "HTTP/") + fallthrough + case semconv.AttributeHTTPFlavor: + isHTTP = true + http.Version = stringval + case semconv.AttributeHTTPServerName: + isHTTP = true + httpServerName = stringval + case semconv.AttributeHTTPClientIP: + event.Client.IP = net.ParseIP(stringval) + case semconv.AttributeHTTPUserAgent: + event.UserAgent.Original = stringval + + // net.* + case semconv.AttributeNetPeerIP: + event.Source.IP = net.ParseIP(stringval) + case semconv.AttributeNetPeerName: + event.Source.Domain = stringval + case semconv.AttributeNetHostName: + netHostName = stringval + case attributeNetworkConnectionType: + event.Network.Connection.Type = stringval + case attributeNetworkConnectionSubtype: + event.Network.Connection.Subtype = stringval + case attributeNetworkMCC: + event.Network.Carrier.MCC = stringval + case attributeNetworkMNC: + event.Network.Carrier.MNC = stringval + case attributeNetworkCarrierName: + event.Network.Carrier.Name = stringval + case attributeNetworkICC: + event.Network.Carrier.ICC = stringval + + // messaging.* + case "message_bus.destination", semconv.AttributeMessagingDestination: + message.QueueName = stringval + isMessaging = true + + // rpc.* + // + // TODO(axw) add RPC fieldset to ECS? Currently we drop these + // attributes, and rely on the operation name like we do with + // Elastic APM agents. + case semconv.AttributeRPCSystem: + event.Transaction.Type = "request" + case semconv.AttributeRPCService: + case semconv.AttributeRPCMethod: + + // miscellaneous + case "span.kind": // filter out + case "type": + event.Transaction.Type = stringval + case semconv.AttributeServiceVersion: + // NOTE support for sending service.version as a span tag + // is deprecated, and will be removed in 8.0. Instrumentation + // should set this as a resource attribute (OTel) or tracer + // tag (Jaeger). + event.Service.Version = stringval + case "component": + component = stringval + fallthrough + default: + event.Labels[k] = stringval + } + } + return true + }) + + if event.Transaction.Type == "" { + if isHTTP { + event.Transaction.Type = "request" + } else if isMessaging { + event.Transaction.Type = "messaging" + } else if component != "" { + event.Transaction.Type = component + } else { + event.Transaction.Type = "custom" + } + } + + if isHTTP { + event.HTTP = http + + // Set outcome nad result from status code. + if statusCode := httpResponse.StatusCode; statusCode > 0 { + if event.Event.Outcome == outcomeUnknown { + event.Event.Outcome = serverHTTPStatusCodeOutcome(statusCode) + } + if event.Transaction.Result == "" { + event.Transaction.Result = httpStatusCodeResult(statusCode) + } + } + + // Build the model.URL from http{URL,Host,Scheme}. + httpHost := httpHost + if httpHost == "" { + httpHost = httpServerName + if httpHost == "" { + httpHost = netHostName + if httpHost == "" { + httpHost = event.Host.Hostname + } + } + if httpHost != "" && netHostPort > 0 { + httpHost = net.JoinHostPort(httpHost, strconv.Itoa(netHostPort)) + } + } + event.URL = model.ParseURL(httpURL, httpHost, httpScheme) + } + + if isMessaging { + event.Transaction.Message = &message + } + + if event.Client.IP == nil { + event.Client = model.Client(event.Source) + } + + if samplerType != (pdata.AttributeValue{}) { + // The client has reported its sampling rate, so we can use it to extrapolate span metrics. + parseSamplerAttributes(samplerType, samplerParam, &event.Transaction.RepresentativeCount, event.Labels) + } else { + event.Transaction.RepresentativeCount = 1 + } + + if event.Transaction.Result == "" { + event.Transaction.Result = spanStatusResult(span.Status()) + } + if name := library.Name(); name != "" { + event.Service.Framework.Name = name + event.Service.Framework.Version = library.Version() + } +} + +func translateSpan(span pdata.Span, event *model.APMEvent) { + isJaeger := strings.HasPrefix(event.Agent.Name, "Jaeger") + + var ( + netPeerName string + netPeerIP string + netPeerPort int + ) + + var ( + httpURL string + httpHost string + httpTarget string + httpScheme string = "http" + ) + + var ( + messageSystem string + messageOperation string + ) + + var http model.HTTP + var httpRequest model.HTTPRequest + var httpResponse model.HTTPResponse + var message model.Message + var db model.DB + var destinationService model.DestinationService + var isDBSpan, isHTTPSpan, isMessagingSpan, isRPCSpan bool + var component string + var rpcSystem string + var samplerType, samplerParam pdata.AttributeValue + span.Attributes().Range(func(kDots string, v pdata.AttributeValue) bool { + if isJaeger { + switch kDots { + case "sampler.type": + samplerType = v + return true + case "sampler.param": + samplerParam = v + return true + } + } + + k := replaceDots(kDots) + switch v.Type() { + case pdata.AttributeValueTypeArray: + event.Labels[k] = ifaceAnyValueArray(v.ArrayVal()) + case pdata.AttributeValueTypeBool: + event.Labels[k] = v.BoolVal() + case pdata.AttributeValueTypeDouble: + event.Labels[k] = v.DoubleVal() + case pdata.AttributeValueTypeInt: + switch kDots { + case "http.status_code": + httpResponse.StatusCode = int(v.IntVal()) + http.Response = &httpResponse + isHTTPSpan = true + case semconv.AttributeNetPeerPort, "peer.port": + netPeerPort = int(v.IntVal()) + case "rpc.grpc.status_code": + // Ignored for spans. + default: + event.Labels[k] = v.IntVal() + } + case pdata.AttributeValueTypeString: + stringval := truncate(v.StringVal()) + switch kDots { + // http.* + case semconv.AttributeHTTPHost: + httpHost = stringval + isHTTPSpan = true + case semconv.AttributeHTTPScheme: + httpScheme = stringval + isHTTPSpan = true + case semconv.AttributeHTTPTarget: + httpTarget = stringval + isHTTPSpan = true + case semconv.AttributeHTTPURL: + httpURL = stringval + isHTTPSpan = true + case semconv.AttributeHTTPMethod: + httpRequest.Method = stringval + http.Request = &httpRequest + isHTTPSpan = true + + // db.* + case "sql.query": + if db.Type == "" { + db.Type = "sql" + } + fallthrough + case semconv.AttributeDBStatement: + db.Statement = stringval + isDBSpan = true + case semconv.AttributeDBName, "db.instance": + db.Instance = stringval + isDBSpan = true + case semconv.AttributeDBSystem, "db.type": + db.Type = stringval + isDBSpan = true + case semconv.AttributeDBUser: + db.UserName = stringval + isDBSpan = true + + // net.* + case semconv.AttributeNetPeerName, "peer.hostname": + netPeerName = stringval + case semconv.AttributeNetPeerIP, "peer.ipv4", "peer.ipv6": + netPeerIP = stringval + case "peer.address": + destinationService.Resource = stringval + if !strings.ContainsRune(stringval, ':') || net.ParseIP(stringval) != nil { + // peer.address is not necessarily a hostname + // or IP address; it could be something like + // a JDBC connection string or ip:port. Ignore + // values containing colons, except for IPv6. + netPeerName = stringval + } + case attributeNetworkConnectionType: + event.Network.Connection.Type = stringval + case attributeNetworkConnectionSubtype: + event.Network.Connection.Subtype = stringval + case attributeNetworkMCC: + event.Network.Carrier.MCC = stringval + case attributeNetworkMNC: + event.Network.Carrier.MNC = stringval + case attributeNetworkCarrierName: + event.Network.Carrier.Name = stringval + case attributeNetworkICC: + event.Network.Carrier.ICC = stringval + + // messaging.* + case "message_bus.destination", semconv.AttributeMessagingDestination: + message.QueueName = stringval + isMessagingSpan = true + case semconv.AttributeMessagingOperation: + messageOperation = stringval + isMessagingSpan = true + case semconv.AttributeMessagingSystem: + messageSystem = stringval + destinationService.Resource = stringval + destinationService.Name = stringval + isMessagingSpan = true + + // rpc.* + // + // TODO(axw) add RPC fieldset to ECS? Currently we drop these + // attributes, and rely on the operation name and span type/subtype + // like we do with Elastic APM agents. + case semconv.AttributeRPCSystem: + rpcSystem = stringval + isRPCSpan = true + case semconv.AttributeRPCService: + case semconv.AttributeRPCMethod: + + // miscellaneous + case "span.kind": // filter out + case semconv.AttributePeerService: + destinationService.Name = stringval + if destinationService.Resource == "" { + // Prefer using peer.address for resource. + destinationService.Resource = stringval + } + case "component": + component = stringval + fallthrough + default: + event.Labels[k] = stringval + } + } + return true + }) + + destPort := netPeerPort + destAddr := netPeerName + if destAddr == "" { + destAddr = netPeerIP + } + + if isHTTPSpan { + var fullURL *url.URL + if httpURL != "" { + fullURL, _ = url.Parse(httpURL) + } else if httpTarget != "" { + // Build http.url from http.scheme, http.target, etc. + if u, err := url.Parse(httpTarget); err == nil { + fullURL = u + fullURL.Scheme = httpScheme + if httpHost == "" { + // Set host from net.peer.* + httpHost = destAddr + if destPort > 0 { + httpHost = net.JoinHostPort(httpHost, strconv.Itoa(destPort)) + } + } + fullURL.Host = httpHost + httpURL = fullURL.String() + } + } + if fullURL != nil { + url := url.URL{Scheme: fullURL.Scheme, Host: fullURL.Host} + hostname := truncate(url.Hostname()) + var port int + portString := url.Port() + if portString != "" { + port, _ = strconv.Atoi(portString) + } else { + port = schemeDefaultPort(url.Scheme) + } + + // Set destination.{address,port} from the HTTP URL, + // replacing peer.* based values to ensure consistency. + destAddr = hostname + if port > 0 { + destPort = port + } + + // Set destination.service.* from the HTTP URL, unless peer.service was specified. + if destinationService.Name == "" { + resource := url.Host + if port > 0 && port == schemeDefaultPort(url.Scheme) { + hasDefaultPort := portString != "" + if hasDefaultPort { + // Remove the default port from destination.service.name. + url.Host = hostname + } else { + // Add the default port to destination.service.resource. + resource = fmt.Sprintf("%s:%d", resource, port) + } + } + destinationService.Name = url.String() + destinationService.Resource = resource + } + } + } + + if isRPCSpan { + // Set destination.service.* from the peer address, unless peer.service was specified. + if destinationService.Name == "" { + destHostPort := net.JoinHostPort(destAddr, strconv.Itoa(destPort)) + destinationService.Name = destHostPort + destinationService.Resource = destHostPort + } + } + + switch { + case isHTTPSpan: + if httpResponse.StatusCode > 0 { + if event.Event.Outcome == outcomeUnknown { + event.Event.Outcome = clientHTTPStatusCodeOutcome(httpResponse.StatusCode) + } + } + event.Span.Type = "external" + subtype := "http" + event.Span.Subtype = subtype + event.HTTP = http + event.URL.Original = httpURL + case isDBSpan: + event.Span.Type = "db" + if db.Type != "" { + event.Span.Subtype = db.Type + if destinationService.Name == "" { + // For database requests, we currently just identify + // the destination service by db.system. + destinationService.Name = event.Span.Subtype + destinationService.Resource = event.Span.Subtype + } + } + event.Span.DB = &db + case isMessagingSpan: + event.Span.Type = "messaging" + event.Span.Subtype = messageSystem + if messageOperation == "" && span.Kind() == pdata.SpanKindProducer { + messageOperation = "send" + } + event.Span.Action = messageOperation + if destinationService.Resource != "" && message.QueueName != "" { + destinationService.Resource += "/" + message.QueueName + } + event.Span.Message = &message + case isRPCSpan: + event.Span.Type = "external" + event.Span.Subtype = rpcSystem + default: + event.Span.Type = "app" + event.Span.Subtype = component + } + + if destAddr != "" { + event.Destination = model.Destination{Address: destAddr, Port: destPort} + } + if destinationService != (model.DestinationService{}) { + if destinationService.Type == "" { + // Copy span type to destination.service.type. + destinationService.Type = event.Span.Type + } + event.Span.DestinationService = &destinationService + } + + if samplerType != (pdata.AttributeValue{}) { + // The client has reported its sampling rate, so we can use it to extrapolate transaction metrics. + parseSamplerAttributes(samplerType, samplerParam, &event.Span.RepresentativeCount, event.Labels) + } else { + event.Span.RepresentativeCount = 1 + } +} + +func parseSamplerAttributes(samplerType, samplerParam pdata.AttributeValue, representativeCount *float64, labels common.MapStr) { + switch samplerType := samplerType.StringVal(); samplerType { + case "probabilistic": + probability := samplerParam.DoubleVal() + if probability > 0 && probability <= 1 { + *representativeCount = 1 / probability + } + default: + labels["sampler_type"] = samplerType + switch samplerParam.Type() { + case pdata.AttributeValueTypeBool: + labels["sampler_param"] = samplerParam.BoolVal() + case pdata.AttributeValueTypeDouble: + labels["sampler_param"] = samplerParam.DoubleVal() + } + } +} + +func convertSpanEvent( + logger *logp.Logger, + spanEvent pdata.SpanEvent, + parent model.APMEvent, // either span or transaction + timeDelta time.Duration, +) model.APMEvent { + event := parent + event.Labels = initEventLabels(event.Labels) + event.Transaction = nil + event.Span = nil + event.Timestamp = spanEvent.Timestamp().AsTime().Add(timeDelta) + + isJaeger := strings.HasPrefix(parent.Agent.Name, "Jaeger") + if isJaeger { + event.Error = convertJaegerErrorSpanEvent(logger, spanEvent, event.Labels) + } else if spanEvent.Name() == "exception" { + // Translate exception span events to errors. + // + // If it's not Jaeger, we assume OpenTelemetry semantic semconv. + // Per OpenTelemetry semantic conventions: + // `The name of the event MUST be "exception"` + var exceptionEscaped bool + var exceptionMessage, exceptionStacktrace, exceptionType string + spanEvent.Attributes().Range(func(k string, v pdata.AttributeValue) bool { + switch k { + case semconv.AttributeExceptionMessage: + exceptionMessage = v.StringVal() + case semconv.AttributeExceptionStacktrace: + exceptionStacktrace = v.StringVal() + case semconv.AttributeExceptionType: + exceptionType = v.StringVal() + case "exception.escaped": + exceptionEscaped = v.BoolVal() + default: + event.Labels[replaceDots(k)] = ifaceAttributeValue(v) + } + return true + }) + if exceptionMessage != "" || exceptionType != "" { + // Per OpenTelemetry semantic conventions: + // `At least one of the following sets of attributes is required: + // - exception.type + // - exception.message` + event.Error = convertOpenTelemetryExceptionSpanEvent( + exceptionType, exceptionMessage, exceptionStacktrace, + exceptionEscaped, parent.Service.Language.Name, + ) + } + } + + if event.Error != nil { + event.Processor = model.ErrorProcessor + setErrorContext(&event, parent) + } else { + event.Processor = model.LogProcessor + event.DataStream.Type = datastreams.LogsType + event.Message = spanEvent.Name() + spanEvent.Attributes().Range(func(k string, v pdata.AttributeValue) bool { + event.Labels[replaceDots(k)] = ifaceAttributeValue(v) + return true + }) + } + return event +} + +func convertJaegerErrorSpanEvent(logger *logp.Logger, event pdata.SpanEvent, labels common.MapStr) *model.Error { + var isError bool + var exMessage, exType string + logMessage := event.Name() + hasMinimalInfo := logMessage != "" + event.Attributes().Range(func(k string, v pdata.AttributeValue) bool { + if v.Type() != pdata.AttributeValueTypeString { + return true + } + stringval := truncate(v.StringVal()) + switch k { + case "error", "error.object": + exMessage = stringval + hasMinimalInfo = true + isError = true + case "event": + if stringval == "error" { // according to opentracing spec + isError = true + } else if logMessage == "" { + // Jaeger seems to send the message in the 'event' field. + // + // In case 'message' is sent, the event's name will be set + // and we will use that. Otherwise we use 'event'. + logMessage = stringval + hasMinimalInfo = true + } + case "error.kind": + exType = stringval + hasMinimalInfo = true + isError = true + case "level": + isError = stringval == "error" + default: + labels[replaceDots(k)] = ifaceAttributeValue(v) + } + return true + }) + if !isError { + return nil + } + if !hasMinimalInfo { + logger.Debugf("Cannot convert span event (name=%q) into elastic apm error: %v", event.Name()) + return nil + } + e := &model.Error{} + if logMessage != "" { + e.Log = &model.Log{Message: logMessage} + } + if exMessage != "" || exType != "" { + e.Exception = &model.Exception{ + Message: exMessage, + Type: exType, + } + } + return e +} + +func setErrorContext(out *model.APMEvent, parent model.APMEvent) { + out.Trace.ID = parent.Trace.ID + out.HTTP = parent.HTTP + out.URL = parent.URL + if parent.Transaction != nil { + out.Transaction = &model.Transaction{ + ID: parent.Transaction.ID, + Sampled: parent.Transaction.Sampled, + Type: parent.Transaction.Type, + } + out.Error.Custom = parent.Transaction.Custom + out.Parent.ID = parent.Transaction.ID + } + if parent.Span != nil { + out.Parent.ID = parent.Span.ID + } +} + +func replaceDots(s string) string { + return strings.ReplaceAll(s, dot, underscore) +} + +// spanStatusOutcome returns the outcome for transactions and spans based on +// the given OTLP span status. +func spanStatusOutcome(status pdata.SpanStatus) string { + switch status.Code() { + case pdata.StatusCodeOk: + return outcomeSuccess + case pdata.StatusCodeError: + return outcomeFailure + } + return outcomeUnknown +} + +// spanStatusResult returns the result for transactions based on the given +// OTLP span status. If the span status is unknown, an empty result string +// is returned. +func spanStatusResult(status pdata.SpanStatus) string { + switch status.Code() { + case pdata.StatusCodeOk: + return "Success" + case pdata.StatusCodeError: + return "Error" + } + return "" +} + +var standardStatusCodeResults = [...]string{ + "HTTP 1xx", + "HTTP 2xx", + "HTTP 3xx", + "HTTP 4xx", + "HTTP 5xx", +} + +// httpStatusCodeResult returns the transaction result value to use for the +// given HTTP status code. +func httpStatusCodeResult(statusCode int) string { + switch i := statusCode / 100; i { + case 1, 2, 3, 4, 5: + return standardStatusCodeResults[i-1] + } + return fmt.Sprintf("HTTP %d", statusCode) +} + +// serverHTTPStatusCodeOutcome returns the transaction outcome value to use for +// the given HTTP status code. +func serverHTTPStatusCodeOutcome(statusCode int) string { + if statusCode >= 500 { + return outcomeFailure + } + return outcomeSuccess +} + +// clientHTTPStatusCodeOutcome returns the span outcome value to use for the +// given HTTP status code. +func clientHTTPStatusCodeOutcome(statusCode int) string { + if statusCode >= 400 { + return outcomeFailure + } + return outcomeSuccess +} + +// truncate returns s truncated at n runes, and the number of runes in the resulting string (<= n). +func truncate(s string) string { + var j int + for i := range s { + if j == keywordLength { + return s[:i] + } + j++ + } + return s +} + +func schemeDefaultPort(scheme string) int { + switch scheme { + case "http": + return 80 + case "https": + return 443 + } + return 0 +} diff --git a/processor/otel/traces_test.go b/processor/otel/traces_test.go new file mode 100644 index 00000000000..e4c14023ab0 --- /dev/null +++ b/processor/otel/traces_test.go @@ -0,0 +1,1282 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Portions copied from OpenTelemetry Collector (contrib), from the +// elastic exporter. +// +// Copyright 2020, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel_test + +import ( + "context" + "fmt" + "net" + "path/filepath" + "testing" + "time" + + jaegermodel "github.com/jaegertracing/jaeger/model" + jaegertranslator "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/model/pdata" + semconv "go.opentelemetry.io/collector/model/semconv/v1.5.0" + "google.golang.org/grpc/codes" + + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/logp" + + "github.com/elastic/apm-server/approvaltest" + "github.com/elastic/apm-server/beater/beatertest" + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/processor/otel" +) + +func TestConsumer_ConsumeTraces_Empty(t *testing.T) { + var processor model.ProcessBatchFunc = func(ctx context.Context, batch *model.Batch) error { + assert.Empty(t, batch) + return nil + } + + consumer := otel.Consumer{Processor: processor} + traces := pdata.NewTraces() + assert.NoError(t, consumer.ConsumeTraces(context.Background(), traces)) +} + +func TestOutcome(t *testing.T) { + test := func(t *testing.T, expectedOutcome, expectedResult string, statusCode pdata.StatusCode) { + t.Helper() + + traces, spans := newTracesSpans() + otelSpan1 := spans.Spans().AppendEmpty() + otelSpan1.SetTraceID(pdata.NewTraceID([16]byte{1})) + otelSpan1.SetSpanID(pdata.NewSpanID([8]byte{2})) + otelSpan1.Status().SetCode(statusCode) + otelSpan2 := spans.Spans().AppendEmpty() + otelSpan2.SetTraceID(pdata.NewTraceID([16]byte{1})) + otelSpan2.SetSpanID(pdata.NewSpanID([8]byte{2})) + otelSpan2.SetParentSpanID(pdata.NewSpanID([8]byte{3})) + otelSpan2.Status().SetCode(statusCode) + + batch := transformTraces(t, traces) + require.Len(t, batch, 2) + + assert.Equal(t, expectedOutcome, batch[0].Event.Outcome) + assert.Equal(t, expectedResult, batch[0].Transaction.Result) + assert.Equal(t, expectedOutcome, batch[1].Event.Outcome) + } + + test(t, "unknown", "", pdata.StatusCodeUnset) + test(t, "success", "Success", pdata.StatusCodeOk) + test(t, "failure", "Error", pdata.StatusCodeError) +} + +func TestRepresentativeCount(t *testing.T) { + traces, spans := newTracesSpans() + otelSpan1 := spans.Spans().AppendEmpty() + otelSpan1.SetTraceID(pdata.NewTraceID([16]byte{1})) + otelSpan1.SetSpanID(pdata.NewSpanID([8]byte{2})) + otelSpan2 := spans.Spans().AppendEmpty() + otelSpan2.SetTraceID(pdata.NewTraceID([16]byte{1})) + otelSpan2.SetSpanID(pdata.NewSpanID([8]byte{2})) + otelSpan2.SetParentSpanID(pdata.NewSpanID([8]byte{3})) + + batch := transformTraces(t, traces) + require.Len(t, batch, 2) + + assert.Equal(t, 1.0, batch[0].Transaction.RepresentativeCount) + assert.Equal(t, 1.0, batch[1].Span.RepresentativeCount) +} + +func TestHTTPTransactionURL(t *testing.T) { + test := func(t *testing.T, expected model.URL, attrs map[string]pdata.AttributeValue) { + t.Helper() + event := transformTransactionWithAttributes(t, attrs) + assert.Equal(t, expected, event.URL) + } + + t.Run("scheme_host_target", func(t *testing.T) { + test(t, model.URL{ + Scheme: "https", + Original: "/foo?bar", + Full: "https://testing.invalid:80/foo?bar", + Path: "/foo", + Query: "bar", + Domain: "testing.invalid", + Port: 80, + }, map[string]pdata.AttributeValue{ + "http.scheme": pdata.NewAttributeValueString("https"), + "http.host": pdata.NewAttributeValueString("testing.invalid:80"), + "http.target": pdata.NewAttributeValueString("/foo?bar"), + }) + }) + t.Run("scheme_servername_nethostport_target", func(t *testing.T) { + test(t, model.URL{ + Scheme: "https", + Original: "/foo?bar", + Full: "https://testing.invalid:80/foo?bar", + Path: "/foo", + Query: "bar", + Domain: "testing.invalid", + Port: 80, + }, map[string]pdata.AttributeValue{ + "http.scheme": pdata.NewAttributeValueString("https"), + "http.server_name": pdata.NewAttributeValueString("testing.invalid"), + "net.host.port": pdata.NewAttributeValueInt(80), + "http.target": pdata.NewAttributeValueString("/foo?bar"), + }) + }) + t.Run("scheme_nethostname_nethostport_target", func(t *testing.T) { + test(t, model.URL{ + Scheme: "https", + Original: "/foo?bar", + Full: "https://testing.invalid:80/foo?bar", + Path: "/foo", + Query: "bar", + Domain: "testing.invalid", + Port: 80, + }, map[string]pdata.AttributeValue{ + "http.scheme": pdata.NewAttributeValueString("https"), + "net.host.name": pdata.NewAttributeValueString("testing.invalid"), + "net.host.port": pdata.NewAttributeValueInt(80), + "http.target": pdata.NewAttributeValueString("/foo?bar"), + }) + }) + t.Run("http.url", func(t *testing.T) { + test(t, model.URL{ + Scheme: "https", + Original: "https://testing.invalid:80/foo?bar", + Full: "https://testing.invalid:80/foo?bar", + Path: "/foo", + Query: "bar", + Domain: "testing.invalid", + Port: 80, + }, map[string]pdata.AttributeValue{ + "http.url": pdata.NewAttributeValueString("https://testing.invalid:80/foo?bar"), + }) + }) + t.Run("host_no_port", func(t *testing.T) { + test(t, model.URL{ + Scheme: "https", + Original: "/foo", + Full: "https://testing.invalid/foo", + Path: "/foo", + Domain: "testing.invalid", + }, map[string]pdata.AttributeValue{ + "http.scheme": pdata.NewAttributeValueString("https"), + "http.host": pdata.NewAttributeValueString("testing.invalid"), + "http.target": pdata.NewAttributeValueString("/foo"), + }) + }) + t.Run("ipv6_host_no_port", func(t *testing.T) { + test(t, model.URL{ + Scheme: "https", + Original: "/foo", + Full: "https://[::1]/foo", + Path: "/foo", + Domain: "::1", + }, map[string]pdata.AttributeValue{ + "http.scheme": pdata.NewAttributeValueString("https"), + "http.host": pdata.NewAttributeValueString("[::1]"), + "http.target": pdata.NewAttributeValueString("/foo"), + }) + }) + t.Run("default_scheme", func(t *testing.T) { + // scheme is set to "http" if it can't be deduced from attributes. + test(t, model.URL{ + Scheme: "http", + Original: "/foo", + Full: "http://testing.invalid/foo", + Path: "/foo", + Domain: "testing.invalid", + }, map[string]pdata.AttributeValue{ + "http.host": pdata.NewAttributeValueString("testing.invalid"), + "http.target": pdata.NewAttributeValueString("/foo"), + }) + }) +} + +func TestHTTPSpanURL(t *testing.T) { + test := func(t *testing.T, expected string, attrs map[string]pdata.AttributeValue) { + t.Helper() + event := transformSpanWithAttributes(t, attrs) + assert.Equal(t, model.URL{Original: expected}, event.URL) + } + + t.Run("host.url", func(t *testing.T) { + test(t, "https://testing.invalid:80/foo?bar", map[string]pdata.AttributeValue{ + "http.url": pdata.NewAttributeValueString("https://testing.invalid:80/foo?bar"), + }) + }) + t.Run("scheme_host_target", func(t *testing.T) { + test(t, "https://testing.invalid:80/foo?bar", map[string]pdata.AttributeValue{ + "http.scheme": pdata.NewAttributeValueString("https"), + "http.host": pdata.NewAttributeValueString("testing.invalid:80"), + "http.target": pdata.NewAttributeValueString("/foo?bar"), + }) + }) + t.Run("scheme_netpeername_netpeerport_target", func(t *testing.T) { + test(t, "https://testing.invalid:80/foo?bar", map[string]pdata.AttributeValue{ + "http.scheme": pdata.NewAttributeValueString("https"), + "net.peer.name": pdata.NewAttributeValueString("testing.invalid"), + "net.peer.ip": pdata.NewAttributeValueString("::1"), // net.peer.name preferred + "net.peer.port": pdata.NewAttributeValueInt(80), + "http.target": pdata.NewAttributeValueString("/foo?bar"), + }) + }) + t.Run("scheme_netpeerip_netpeerport_target", func(t *testing.T) { + test(t, "https://[::1]:80/foo?bar", map[string]pdata.AttributeValue{ + "http.scheme": pdata.NewAttributeValueString("https"), + "net.peer.ip": pdata.NewAttributeValueString("::1"), + "net.peer.port": pdata.NewAttributeValueInt(80), + "http.target": pdata.NewAttributeValueString("/foo?bar"), + }) + }) + t.Run("default_scheme", func(t *testing.T) { + // scheme is set to "http" if it can't be deduced from attributes. + test(t, "http://testing.invalid/foo", map[string]pdata.AttributeValue{ + "http.host": pdata.NewAttributeValueString("testing.invalid"), + "http.target": pdata.NewAttributeValueString("/foo"), + }) + }) +} + +func TestHTTPSpanDestination(t *testing.T) { + test := func(t *testing.T, expectedDestination model.Destination, expectedDestinationService *model.DestinationService, attrs map[string]pdata.AttributeValue) { + t.Helper() + event := transformSpanWithAttributes(t, attrs) + assert.Equal(t, expectedDestination, event.Destination) + assert.Equal(t, expectedDestinationService, event.Span.DestinationService) + } + + t.Run("url_default_port_specified", func(t *testing.T) { + test(t, model.Destination{ + Address: "testing.invalid", + Port: 443, + }, &model.DestinationService{ + Type: "external", + Name: "https://testing.invalid", + Resource: "testing.invalid:443", + }, map[string]pdata.AttributeValue{ + "http.url": pdata.NewAttributeValueString("https://testing.invalid:443/foo?bar"), + }) + }) + t.Run("url_port_scheme", func(t *testing.T) { + test(t, model.Destination{ + Address: "testing.invalid", + Port: 443, + }, &model.DestinationService{ + Type: "external", + Name: "https://testing.invalid", + Resource: "testing.invalid:443", + }, map[string]pdata.AttributeValue{ + "http.url": pdata.NewAttributeValueString("https://testing.invalid/foo?bar"), + }) + }) + t.Run("url_non_default_port", func(t *testing.T) { + test(t, model.Destination{ + Address: "testing.invalid", + Port: 444, + }, &model.DestinationService{ + Type: "external", + Name: "https://testing.invalid:444", + Resource: "testing.invalid:444", + }, map[string]pdata.AttributeValue{ + "http.url": pdata.NewAttributeValueString("https://testing.invalid:444/foo?bar"), + }) + }) + t.Run("scheme_host_target", func(t *testing.T) { + test(t, model.Destination{ + Address: "testing.invalid", + Port: 444, + }, &model.DestinationService{ + Type: "external", + Name: "https://testing.invalid:444", + Resource: "testing.invalid:444", + }, map[string]pdata.AttributeValue{ + "http.scheme": pdata.NewAttributeValueString("https"), + "http.host": pdata.NewAttributeValueString("testing.invalid:444"), + "http.target": pdata.NewAttributeValueString("/foo?bar"), + }) + }) + t.Run("scheme_netpeername_nethostport_target", func(t *testing.T) { + test(t, model.Destination{ + Address: "::1", + Port: 444, + }, &model.DestinationService{ + Type: "external", + Name: "https://[::1]:444", + Resource: "[::1]:444", + }, map[string]pdata.AttributeValue{ + "http.scheme": pdata.NewAttributeValueString("https"), + "net.peer.ip": pdata.NewAttributeValueString("::1"), + "net.peer.port": pdata.NewAttributeValueInt(444), + "http.target": pdata.NewAttributeValueString("/foo?bar"), + }) + }) +} + +func TestHTTPTransactionSource(t *testing.T) { + test := func(t *testing.T, expectedDomain, expectedIP string, expectedPort int, attrs map[string]pdata.AttributeValue) { + // "http.method" is a required attribute for HTTP spans, + // and its presence causes the transaction's HTTP request + // context to be built. + attrs["http.method"] = pdata.NewAttributeValueString("POST") + + event := transformTransactionWithAttributes(t, attrs) + require.NotNil(t, event.HTTP) + require.NotNil(t, event.HTTP.Request) + parsedIP := net.ParseIP(expectedIP) + require.NotNil(t, parsedIP) + assert.Equal(t, model.Source{ + Domain: expectedDomain, + IP: net.ParseIP(expectedIP), + Port: expectedPort, + }, event.Source) + assert.Equal(t, model.Client(event.Source), event.Client) + } + + t.Run("net.peer.ip_port", func(t *testing.T) { + test(t, "", "192.168.0.1", 1234, map[string]pdata.AttributeValue{ + "net.peer.ip": pdata.NewAttributeValueString("192.168.0.1"), + "net.peer.port": pdata.NewAttributeValueInt(1234), + }) + }) + t.Run("net.peer.ip", func(t *testing.T) { + test(t, "", "192.168.0.1", 0, map[string]pdata.AttributeValue{ + "net.peer.ip": pdata.NewAttributeValueString("192.168.0.1"), + }) + }) + t.Run("net.peer.ip_name", func(t *testing.T) { + test(t, "source.domain", "192.168.0.1", 0, map[string]pdata.AttributeValue{ + "net.peer.name": pdata.NewAttributeValueString("source.domain"), + "net.peer.ip": pdata.NewAttributeValueString("192.168.0.1"), + }) + }) +} + +func TestHTTPTransactionFlavor(t *testing.T) { + event := transformTransactionWithAttributes(t, map[string]pdata.AttributeValue{ + "http.flavor": pdata.NewAttributeValueString("1.1"), + }) + assert.Equal(t, "1.1", event.HTTP.Version) +} + +func TestHTTPTransactionUserAgent(t *testing.T) { + event := transformTransactionWithAttributes(t, map[string]pdata.AttributeValue{ + "http.user_agent": pdata.NewAttributeValueString("Foo/bar (baz)"), + }) + assert.Equal(t, model.UserAgent{Original: "Foo/bar (baz)"}, event.UserAgent) +} + +func TestHTTPTransactionClientIP(t *testing.T) { + event := transformTransactionWithAttributes(t, map[string]pdata.AttributeValue{ + "net.peer.ip": pdata.NewAttributeValueString("1.2.3.4"), + "net.peer.port": pdata.NewAttributeValueInt(5678), + "http.client_ip": pdata.NewAttributeValueString("9.10.11.12"), + }) + assert.Equal(t, model.Source{IP: net.ParseIP("1.2.3.4"), Port: 5678}, event.Source) + assert.Equal(t, model.Client{IP: net.ParseIP("9.10.11.12")}, event.Client) +} + +func TestHTTPTransactionStatusCode(t *testing.T) { + event := transformTransactionWithAttributes(t, map[string]pdata.AttributeValue{ + "http.status_code": pdata.NewAttributeValueInt(200), + }) + assert.Equal(t, 200, event.HTTP.Response.StatusCode) +} + +func TestDatabaseSpan(t *testing.T) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/semantic_conventions/database.md#mysql + connectionString := "Server=shopdb.example.com;Database=ShopDb;Uid=billing_user;TableCache=true;UseCompression=True;MinimumPoolSize=10;MaximumPoolSize=50;" + event := transformSpanWithAttributes(t, map[string]pdata.AttributeValue{ + "db.system": pdata.NewAttributeValueString("mysql"), + "db.connection_string": pdata.NewAttributeValueString(connectionString), + "db.user": pdata.NewAttributeValueString("billing_user"), + "db.name": pdata.NewAttributeValueString("ShopDb"), + "db.statement": pdata.NewAttributeValueString("SELECT * FROM orders WHERE order_id = 'o4711'"), + "net.peer.name": pdata.NewAttributeValueString("shopdb.example.com"), + "net.peer.ip": pdata.NewAttributeValueString("192.0.2.12"), + "net.peer.port": pdata.NewAttributeValueInt(3306), + "net.transport": pdata.NewAttributeValueString("IP.TCP"), + }) + + assert.Equal(t, "db", event.Span.Type) + assert.Equal(t, "mysql", event.Span.Subtype) + assert.Equal(t, "", event.Span.Action) + + assert.Equal(t, &model.DB{ + Instance: "ShopDb", + Statement: "SELECT * FROM orders WHERE order_id = 'o4711'", + Type: "mysql", + UserName: "billing_user", + }, event.Span.DB) + + assert.Equal(t, common.MapStr{ + "db_connection_string": connectionString, + "net_transport": "IP.TCP", + }, event.Labels) + + assert.Equal(t, model.Destination{ + Address: "shopdb.example.com", + Port: 3306, + }, event.Destination) + + assert.Equal(t, &model.DestinationService{ + Type: "db", + Name: "mysql", + Resource: "mysql", + }, event.Span.DestinationService) +} + +func TestInstrumentationLibrary(t *testing.T) { + traces, spans := newTracesSpans() + spans.InstrumentationLibrary().SetName("library-name") + spans.InstrumentationLibrary().SetVersion("1.2.3") + otelSpan := spans.Spans().AppendEmpty() + otelSpan.SetTraceID(pdata.NewTraceID([16]byte{1})) + otelSpan.SetSpanID(pdata.NewSpanID([8]byte{2})) + events := transformTraces(t, traces) + event := events[0] + + assert.Equal(t, "library-name", event.Service.Framework.Name) + assert.Equal(t, "1.2.3", event.Service.Framework.Version) +} + +func TestRPCTransaction(t *testing.T) { + event := transformTransactionWithAttributes(t, map[string]pdata.AttributeValue{ + "rpc.system": pdata.NewAttributeValueString("grpc"), + "rpc.service": pdata.NewAttributeValueString("myservice.EchoService"), + "rpc.method": pdata.NewAttributeValueString("exampleMethod"), + "rpc.grpc.status_code": pdata.NewAttributeValueInt(int64(codes.Unavailable)), + "net.peer.name": pdata.NewAttributeValueString("peer_name"), + "net.peer.ip": pdata.NewAttributeValueString("10.20.30.40"), + "net.peer.port": pdata.NewAttributeValueInt(123), + }) + assert.Equal(t, "request", event.Transaction.Type) + assert.Equal(t, "Unavailable", event.Transaction.Result) + assert.Empty(t, event.Labels) + assert.Equal(t, model.Client{ + Domain: "peer_name", + IP: net.ParseIP("10.20.30.40"), + Port: 123, + }, event.Client) +} + +func TestRPCSpan(t *testing.T) { + event := transformSpanWithAttributes(t, map[string]pdata.AttributeValue{ + "rpc.system": pdata.NewAttributeValueString("grpc"), + "rpc.service": pdata.NewAttributeValueString("myservice.EchoService"), + "rpc.method": pdata.NewAttributeValueString("exampleMethod"), + "rpc.grpc.status_code": pdata.NewAttributeValueInt(int64(codes.Unavailable)), + "net.peer.ip": pdata.NewAttributeValueString("10.20.30.40"), + "net.peer.port": pdata.NewAttributeValueInt(123), + }) + assert.Equal(t, "external", event.Span.Type) + assert.Equal(t, "grpc", event.Span.Subtype) + assert.Empty(t, event.Labels) + assert.Equal(t, model.Destination{ + Address: "10.20.30.40", + Port: 123, + }, event.Destination) + assert.Equal(t, &model.DestinationService{ + Type: "external", + Name: "10.20.30.40:123", + Resource: "10.20.30.40:123", + }, event.Span.DestinationService) +} + +func TestMessagingTransaction(t *testing.T) { + event := transformTransactionWithAttributes(t, map[string]pdata.AttributeValue{ + "messaging.destination": pdata.NewAttributeValueString("myQueue"), + }, func(s pdata.Span) { + s.SetKind(pdata.SpanKindConsumer) + // Set parentID to imply this isn't the root, but + // kind==Consumer should still force the span to be translated + // as a transaction. + s.SetParentSpanID(pdata.NewSpanID([8]byte{3})) + }) + assert.Equal(t, "messaging", event.Transaction.Type) + assert.Empty(t, event.Labels) + assert.Equal(t, &model.Message{ + QueueName: "myQueue", + }, event.Transaction.Message) +} + +func TestMessagingSpan(t *testing.T) { + event := transformSpanWithAttributes(t, map[string]pdata.AttributeValue{ + "messaging.system": pdata.NewAttributeValueString("kafka"), + "messaging.destination": pdata.NewAttributeValueString("myTopic"), + "net.peer.ip": pdata.NewAttributeValueString("10.20.30.40"), + "net.peer.port": pdata.NewAttributeValueInt(123), + }, func(s pdata.Span) { + s.SetKind(pdata.SpanKindProducer) + }) + assert.Equal(t, "messaging", event.Span.Type) + assert.Equal(t, "kafka", event.Span.Subtype) + assert.Equal(t, "send", event.Span.Action) + assert.Empty(t, event.Labels) + assert.Equal(t, model.Destination{ + Address: "10.20.30.40", + Port: 123, + }, event.Destination) + assert.Equal(t, &model.DestinationService{ + Type: "messaging", + Name: "kafka", + Resource: "kafka/myTopic", + }, event.Span.DestinationService) +} + +func TestSpanNetworkAttributes(t *testing.T) { + networkAttributes := map[string]pdata.AttributeValue{ + "net.host.connection.type": pdata.NewAttributeValueString("cell"), + "net.host.connection.subtype": pdata.NewAttributeValueString("LTE"), + "net.host.carrier.name": pdata.NewAttributeValueString("Vodafone"), + "net.host.carrier.mnc": pdata.NewAttributeValueString("01"), + "net.host.carrier.mcc": pdata.NewAttributeValueString("101"), + "net.host.carrier.icc": pdata.NewAttributeValueString("UK"), + } + txEvent := transformTransactionWithAttributes(t, networkAttributes) + spanEvent := transformSpanWithAttributes(t, networkAttributes) + + expected := model.Network{ + Connection: model.NetworkConnection{ + Type: "cell", + Subtype: "LTE", + }, + Carrier: model.NetworkCarrier{ + Name: "Vodafone", + MNC: "01", + MCC: "101", + ICC: "UK", + }, + } + assert.Equal(t, expected, txEvent.Network) + assert.Equal(t, expected, spanEvent.Network) +} + +func TestArrayLabels(t *testing.T) { + stringArray := pdata.NewAttributeValueArray() + stringArray.ArrayVal().AppendEmpty().SetStringVal("string1") + stringArray.ArrayVal().AppendEmpty().SetStringVal("string2") + + boolArray := pdata.NewAttributeValueArray() + boolArray.ArrayVal().AppendEmpty().SetBoolVal(false) + boolArray.ArrayVal().AppendEmpty().SetBoolVal(true) + + txEvent := transformTransactionWithAttributes(t, map[string]pdata.AttributeValue{ + "string_array": stringArray, + "bool_array": boolArray, + }) + assert.Equal(t, common.MapStr{ + "bool_array": []interface{}{false, true}, + "string_array": []interface{}{"string1", "string2"}, + }, txEvent.Labels) + + spanEvent := transformSpanWithAttributes(t, map[string]pdata.AttributeValue{ + "string_array": stringArray, + "bool_array": boolArray, + }) + assert.Equal(t, common.MapStr{ + "bool_array": []interface{}{false, true}, + "string_array": []interface{}{"string1", "string2"}, + }, spanEvent.Labels) +} + +func TestConsumeTracesExportTimestamp(t *testing.T) { + traces, otelSpans := newTracesSpans() + + // The actual timestamps will be non-deterministic, as they are adjusted + // based on the server's clock. + // + // Use a large delta so that we can allow for a significant amount of + // delay in the test environment affecting the timestamp adjustment. + const timeDelta = time.Hour + const allowedError = 5 // seconds + + now := time.Now() + exportTimestamp := now.Add(-timeDelta) + traces.ResourceSpans().At(0).Resource().Attributes().InitFromMap(map[string]pdata.AttributeValue{ + "telemetry.sdk.elastic_export_timestamp": pdata.NewAttributeValueInt(exportTimestamp.UnixNano()), + }) + + // Offsets are start times relative to the export timestamp. + transactionOffset := -2 * time.Second + spanOffset := transactionOffset + time.Second + exceptionOffset := spanOffset + 25*time.Millisecond + transactionDuration := time.Second + 100*time.Millisecond + spanDuration := 50 * time.Millisecond + + exportedTransactionTimestamp := exportTimestamp.Add(transactionOffset) + exportedSpanTimestamp := exportTimestamp.Add(spanOffset) + exportedExceptionTimestamp := exportTimestamp.Add(exceptionOffset) + + otelSpan1 := otelSpans.Spans().AppendEmpty() + otelSpan1.SetTraceID(pdata.NewTraceID([16]byte{1})) + otelSpan1.SetSpanID(pdata.NewSpanID([8]byte{2})) + otelSpan1.SetStartTimestamp(pdata.TimestampFromTime(exportedTransactionTimestamp)) + otelSpan1.SetEndTimestamp(pdata.TimestampFromTime(exportedTransactionTimestamp.Add(transactionDuration))) + + otelSpan2 := otelSpans.Spans().AppendEmpty() + otelSpan2.SetTraceID(pdata.NewTraceID([16]byte{1})) + otelSpan2.SetSpanID(pdata.NewSpanID([8]byte{2})) + otelSpan2.SetParentSpanID(pdata.NewSpanID([8]byte{3})) + otelSpan2.SetStartTimestamp(pdata.TimestampFromTime(exportedSpanTimestamp)) + otelSpan2.SetEndTimestamp(pdata.TimestampFromTime(exportedSpanTimestamp.Add(spanDuration))) + + otelSpanEvent := otelSpan2.Events().AppendEmpty() + otelSpanEvent.SetTimestamp(pdata.TimestampFromTime(exportedExceptionTimestamp)) + otelSpanEvent.SetName("exception") + otelSpanEvent.Attributes().InitFromMap(map[string]pdata.AttributeValue{ + "exception.type": pdata.NewAttributeValueString("the_type"), + "exception.message": pdata.NewAttributeValueString("the_message"), + "exception.stacktrace": pdata.NewAttributeValueString("the_stacktrace"), + }) + + batch := transformTraces(t, traces) + require.Len(t, batch, 3) + + // Give some leeway for one event, and check other events' timestamps relative to that one. + assert.InDelta(t, now.Add(transactionOffset).Unix(), batch[0].Timestamp.Unix(), allowedError) + assert.Equal(t, spanOffset-transactionOffset, batch[1].Timestamp.Sub(batch[0].Timestamp)) + assert.Equal(t, exceptionOffset-transactionOffset, batch[2].Timestamp.Sub(batch[0].Timestamp)) + + // Durations should be unaffected. + assert.Equal(t, transactionDuration, batch[0].Event.Duration) + assert.Equal(t, spanDuration, batch[1].Event.Duration) +} + +func TestConsumer_JaegerMetadata(t *testing.T) { + jaegerBatch := jaegermodel.Batch{ + Spans: []*jaegermodel.Span{{ + StartTime: testStartTime(), + Tags: []jaegermodel.KeyValue{jaegerKeyValue("span.kind", "client")}, + TraceID: jaegermodel.NewTraceID(0, 0x46467830), + SpanID: jaegermodel.NewSpanID(0x41414646), + }}, + } + + for _, tc := range []struct { + name string + process *jaegermodel.Process + }{{ + name: "jaeger-version", + process: jaegermodel.NewProcess("", []jaegermodel.KeyValue{ + jaegerKeyValue("jaeger.version", "PHP-3.4.12"), + }), + }, { + name: "jaeger-no-language", + process: jaegermodel.NewProcess("", []jaegermodel.KeyValue{ + jaegerKeyValue("jaeger.version", "3.4.12"), + }), + }, { + // TODO(axw) break this down into more specific test cases. + name: "jaeger", + process: jaegermodel.NewProcess("foo", []jaegermodel.KeyValue{ + jaegerKeyValue("jaeger.version", "C++-3.2.1"), + jaegerKeyValue("hostname", "host-foo"), + jaegerKeyValue("client-uuid", "xxf0"), + jaegerKeyValue("ip", "17.0.10.123"), + jaegerKeyValue("foo", "bar"), + jaegerKeyValue("peer.port", "80"), + }), + }} { + t.Run(tc.name, func(t *testing.T) { + var events []beat.Event + recorder := eventRecorderBatchProcessor(&events) + jaegerBatch.Process = tc.process + traces := jaegertranslator.ProtoBatchToInternalTraces(jaegerBatch) + require.NoError(t, (&otel.Consumer{Processor: recorder}).ConsumeTraces(context.Background(), traces)) + approveEvents(t, "metadata_"+tc.name, events) + }) + } +} + +func TestConsumer_JaegerSampleRate(t *testing.T) { + jaegerBatch := jaegermodel.Batch{ + Process: jaegermodel.NewProcess("", jaegerKeyValues( + "jaeger.version", "unknown", + "hostname", "host-abc", + )), + Spans: []*jaegermodel.Span{{ + StartTime: testStartTime(), + Duration: testDuration(), + Tags: []jaegermodel.KeyValue{ + jaegerKeyValue("span.kind", "server"), + jaegerKeyValue("sampler.type", "probabilistic"), + jaegerKeyValue("sampler.param", 0.8), + }, + }, { + StartTime: testStartTime(), + Duration: testDuration(), + TraceID: jaegermodel.NewTraceID(1, 1), + References: []jaegermodel.SpanRef{{ + RefType: jaegermodel.SpanRefType_CHILD_OF, + TraceID: jaegermodel.NewTraceID(1, 1), + SpanID: 1, + }}, + Tags: []jaegermodel.KeyValue{ + jaegerKeyValue("span.kind", "client"), + jaegerKeyValue("sampler.type", "probabilistic"), + jaegerKeyValue("sampler.param", 0.4), + }, + }, { + StartTime: testStartTime(), + Duration: testDuration(), + Tags: []jaegermodel.KeyValue{ + jaegerKeyValue("span.kind", "server"), + jaegerKeyValue("sampler.type", "ratelimiting"), + jaegerKeyValue("sampler.param", 2.0), // 2 traces per second + }, + }}, + } + traces := jaegertranslator.ProtoBatchToInternalTraces(jaegerBatch) + + var batches []*model.Batch + recorder := batchRecorderBatchProcessor(&batches) + require.NoError(t, (&otel.Consumer{Processor: recorder}).ConsumeTraces(context.Background(), traces)) + require.Len(t, batches, 1) + batch := *batches[0] + + events := transformBatch(context.Background(), batches...) + approveEvents(t, "jaeger_sampling_rate", events) + + tx1 := batch[0].Transaction + span := batch[1].Span + tx2 := batch[2].Transaction + assert.Equal(t, 1.25 /* 1/0.8 */, tx1.RepresentativeCount) + assert.Equal(t, 2.5 /* 1/0.4 */, span.RepresentativeCount) + assert.Zero(t, tx2.RepresentativeCount) // not set for non-probabilistic +} + +func TestConsumer_JaegerTraceID(t *testing.T) { + var batches []*model.Batch + recorder := batchRecorderBatchProcessor(&batches) + + jaegerBatch := jaegermodel.Batch{ + Process: jaegermodel.NewProcess("", jaegerKeyValues("jaeger.version", "unknown")), + Spans: []*jaegermodel.Span{{ + TraceID: jaegermodel.NewTraceID(0, 0x000046467830), + SpanID: jaegermodel.NewSpanID(456), + }, { + TraceID: jaegermodel.NewTraceID(0x000046467830, 0x000046467830), + SpanID: jaegermodel.NewSpanID(789), + }}, + } + traces := jaegertranslator.ProtoBatchToInternalTraces(jaegerBatch) + require.NoError(t, (&otel.Consumer{Processor: recorder}).ConsumeTraces(context.Background(), traces)) + + batch := *batches[0] + assert.Equal(t, "00000000000000000000000046467830", batch[0].Trace.ID) + assert.Equal(t, "00000000464678300000000046467830", batch[1].Trace.ID) +} + +func TestConsumer_JaegerTransaction(t *testing.T) { + for _, tc := range []struct { + name string + spans []*jaegermodel.Span + }{ + { + name: "jaeger_full", + spans: []*jaegermodel.Span{{ + StartTime: testStartTime(), + Duration: testDuration(), + TraceID: jaegermodel.NewTraceID(0, 0x46467830), + SpanID: 0x41414646, + OperationName: "HTTP GET", + Tags: []jaegermodel.KeyValue{ + jaegerKeyValue("error", true), + jaegerKeyValue("bool.a", true), + jaegerKeyValue("double.a", 14.65), + jaegerKeyValue("int.a", int64(148)), + jaegerKeyValue("http.method", "get"), + jaegerKeyValue("http.url", "http://foo.bar.com?a=12"), + jaegerKeyValue("http.status_code", "400"), + jaegerKeyValue("http.protocol", "HTTP/1.1"), + jaegerKeyValue("type", "http_request"), + jaegerKeyValue("component", "foo"), + jaegerKeyValue("string.a.b", "some note"), + jaegerKeyValue("service.version", "1.0"), + }, + Logs: testJaegerLogs(), + }}, + }, + { + name: "jaeger_type_request", + spans: []*jaegermodel.Span{{ + StartTime: testStartTime(), + References: []jaegermodel.SpanRef{{ + RefType: jaegermodel.SpanRefType_CHILD_OF, + SpanID: 0x61626364, + }}, + Tags: []jaegermodel.KeyValue{ + jaegerKeyValue("span.kind", "server"), + jaegerKeyValue("http.status_code", int64(500)), + jaegerKeyValue("http.protocol", "HTTP"), + jaegerKeyValue("http.path", "http://foo.bar.com?a=12"), + }, + }}, + }, + { + name: "jaeger_type_request_result", + spans: []*jaegermodel.Span{{ + StartTime: testStartTime(), + References: []jaegermodel.SpanRef{{ + RefType: jaegermodel.SpanRefType_CHILD_OF, + SpanID: 0x61626364, + }}, + Tags: []jaegermodel.KeyValue{ + jaegerKeyValue("span.kind", "server"), + jaegerKeyValue("http.status_code", int64(200)), + jaegerKeyValue("http.url", "localhost:8080"), + }, + }}, + }, + { + name: "jaeger_type_messaging", + spans: []*jaegermodel.Span{{ + StartTime: testStartTime(), + References: []jaegermodel.SpanRef{{ + RefType: jaegermodel.SpanRefType_CHILD_OF, + SpanID: 0x61626364, + }}, + Tags: []jaegermodel.KeyValue{ + jaegerKeyValue("span.kind", "server"), + jaegerKeyValue("message_bus.destination", "queue-abc"), + }, + }}, + }, + { + name: "jaeger_type_component", + spans: []*jaegermodel.Span{{ + StartTime: testStartTime(), + Tags: []jaegermodel.KeyValue{ + jaegerKeyValue("component", "amqp"), + }, + }}, + }, + { + name: "jaeger_custom", + spans: []*jaegermodel.Span{{ + StartTime: testStartTime(), + Tags: []jaegermodel.KeyValue{ + jaegerKeyValue("a.b", "foo"), + }, + }}, + }, + { + name: "jaeger_no_attrs", + spans: []*jaegermodel.Span{{ + StartTime: testStartTime(), + Duration: testDuration(), + Tags: []jaegermodel.KeyValue{ + jaegerKeyValue("span.kind", "server"), + jaegerKeyValue("error", true), + jaegerKeyValue("otel.status_code", int64(2)), + }, + }}, + }, + } { + t.Run(tc.name, func(t *testing.T) { + batch := jaegermodel.Batch{ + Process: jaegermodel.NewProcess("", []jaegermodel.KeyValue{ + jaegerKeyValue("hostname", "host-abc"), + jaegerKeyValue("jaeger.version", "unknown"), + }), + Spans: tc.spans, + } + traces := jaegertranslator.ProtoBatchToInternalTraces(batch) + + var events []beat.Event + recorder := eventRecorderBatchProcessor(&events) + require.NoError(t, (&otel.Consumer{Processor: recorder}).ConsumeTraces(context.Background(), traces)) + approveEvents(t, "transaction_"+tc.name, events) + }) + } +} + +func TestConsumer_JaegerSpan(t *testing.T) { + for _, tc := range []struct { + name string + spans []*jaegermodel.Span + }{ + { + name: "jaeger_http", + spans: []*jaegermodel.Span{{ + OperationName: "HTTP GET", + Tags: []jaegermodel.KeyValue{ + jaegerKeyValue("error", true), + jaegerKeyValue("hasErrors", true), + jaegerKeyValue("double.a", 14.65), + jaegerKeyValue("http.status_code", int64(400)), + jaegerKeyValue("int.a", int64(148)), + jaegerKeyValue("span.kind", "filtered"), + jaegerKeyValue("http.url", "http://foo.bar.com?a=12"), + jaegerKeyValue("http.method", "get"), + jaegerKeyValue("component", "foo"), + jaegerKeyValue("string.a.b", "some note"), + }, + Logs: testJaegerLogs(), + }}, + }, + { + name: "jaeger_https_default_port", + spans: []*jaegermodel.Span{{ + OperationName: "HTTPS GET", + Tags: jaegerKeyValues( + "http.url", "https://foo.bar.com:443?a=12", + ), + }}, + }, + { + name: "jaeger_http_status_code", + spans: []*jaegermodel.Span{{ + OperationName: "HTTP GET", + Tags: jaegerKeyValues( + "http.url", "http://foo.bar.com?a=12", + "http.method", "get", + "http.status_code", int64(202), + ), + }}, + }, + { + name: "jaeger_db", + spans: []*jaegermodel.Span{{ + Tags: jaegerKeyValues( + "db.statement", "GET * from users", + "db.instance", "db01", + "db.type", "mysql", + "db.user", "admin", + "component", "foo", + "peer.address", "mysql://db:3306", + "peer.hostname", "db", + "peer.port", int64(3306), + "peer.service", "sql", + ), + }}, + }, + { + name: "jaeger_messaging", + spans: []*jaegermodel.Span{{ + OperationName: "Message receive", + Tags: jaegerKeyValues( + "peer.hostname", "mq", + "peer.port", int64(1234), + "message_bus.destination", "queue-abc", + ), + }}, + }, + { + name: "jaeger_subtype_component", + spans: []*jaegermodel.Span{{ + Tags: []jaegermodel.KeyValue{ + jaegerKeyValue("component", "whatever"), + }, + }}, + }, + { + name: "jaeger_custom", + spans: []*jaegermodel.Span{{}}, + }, + } { + t.Run(tc.name, func(t *testing.T) { + batch := jaegermodel.Batch{ + Process: jaegermodel.NewProcess("", []jaegermodel.KeyValue{ + jaegerKeyValue("hostname", "host-abc"), + jaegerKeyValue("jaeger.version", "unknown"), + }), + Spans: tc.spans, + } + for _, span := range batch.Spans { + span.StartTime = testStartTime() + span.Duration = testDuration() + span.TraceID = jaegermodel.NewTraceID(0, 0x46467830) + span.SpanID = 0x41414646 + span.References = []jaegermodel.SpanRef{{ + RefType: jaegermodel.SpanRefType_CHILD_OF, + TraceID: jaegermodel.NewTraceID(0, 0x46467830), + SpanID: 0x58585858, + }} + } + traces := jaegertranslator.ProtoBatchToInternalTraces(batch) + + var events []beat.Event + recorder := eventRecorderBatchProcessor(&events) + require.NoError(t, (&otel.Consumer{Processor: recorder}).ConsumeTraces(context.Background(), traces)) + approveEvents(t, "span_"+tc.name, events) + }) + } +} + +func TestJaegerServiceVersion(t *testing.T) { + jaegerBatch := jaegermodel.Batch{ + Process: jaegermodel.NewProcess("", jaegerKeyValues( + "jaeger.version", "unknown", + "service.version", "process_tag_value", + )), + Spans: []*jaegermodel.Span{{ + TraceID: jaegermodel.NewTraceID(0, 0x000046467830), + SpanID: jaegermodel.NewSpanID(456), + }, { + TraceID: jaegermodel.NewTraceID(0, 0x000046467830), + SpanID: jaegermodel.NewSpanID(456), + Tags: []jaegermodel.KeyValue{ + jaegerKeyValue("service.version", "span_tag_value"), + }, + }}, + } + traces := jaegertranslator.ProtoBatchToInternalTraces(jaegerBatch) + + var batches []*model.Batch + recorder := batchRecorderBatchProcessor(&batches) + require.NoError(t, (&otel.Consumer{Processor: recorder}).ConsumeTraces(context.Background(), traces)) + + batch := *batches[0] + assert.Equal(t, "process_tag_value", batch[0].Service.Version) + assert.Equal(t, "span_tag_value", batch[1].Service.Version) +} + +func TestTracesLogging(t *testing.T) { + for _, level := range []logp.Level{logp.InfoLevel, logp.DebugLevel} { + t.Run(level.String(), func(t *testing.T) { + logp.DevelopmentSetup(logp.ToObserverOutput(), logp.WithLevel(level)) + transformTraces(t, pdata.NewTraces()) + logs := logp.ObserverLogs().TakeAll() + if level == logp.InfoLevel { + assert.Empty(t, logs) + } else { + assert.NotEmpty(t, logs) + } + }) + } +} + +func testJaegerLogs() []jaegermodel.Log { + return []jaegermodel.Log{{ + // errors that can be converted to elastic errors + Timestamp: testStartTime().Add(23 * time.Nanosecond), + Fields: jaegerKeyValues( + "event", "retrying connection", + "level", "error", + "error", "no connection established", + ), + }, { + Timestamp: testStartTime().Add(43 * time.Nanosecond), + Fields: jaegerKeyValues( + "event", "no user.ID given", + "level", "error", + "message", "nullPointer exception", + "isbool", true, + ), + }, { + Timestamp: testStartTime().Add(66 * time.Nanosecond), + Fields: jaegerKeyValues( + "error", "no connection established", + ), + }, { + Timestamp: testStartTime().Add(66 * time.Nanosecond), + Fields: jaegerKeyValues( + "error.object", "no connection established", + ), + }, { + Timestamp: testStartTime().Add(66 * time.Nanosecond), + Fields: jaegerKeyValues( + "error.kind", "DBClosedException", + ), + }, { + Timestamp: testStartTime().Add(66 * time.Nanosecond), + Fields: jaegerKeyValues( + "event", "error", + "message", "no connection established", + ), + }, { + // non errors + Timestamp: testStartTime().Add(15 * time.Nanosecond), + Fields: jaegerKeyValues( + "event", "baggage", + "isValid", false, + ), + }, { + Timestamp: testStartTime().Add(65 * time.Nanosecond), + Fields: jaegerKeyValues( + "message", "retrying connection", + "level", "info", + ), + }, { + // errors not convertible to elastic errors + Timestamp: testStartTime().Add(67 * time.Nanosecond), + Fields: jaegerKeyValues( + "level", "error", + ), + }} +} + +func testStartTime() time.Time { + return time.Unix(1576500418, 768068) +} + +func testDuration() time.Duration { + return 79 * time.Second +} + +func batchRecorderBatchProcessor(out *[]*model.Batch) model.BatchProcessor { + return model.ProcessBatchFunc(func(ctx context.Context, batch *model.Batch) error { + *out = append(*out, batch) + return nil + }) +} + +func eventRecorderBatchProcessor(out *[]beat.Event) model.BatchProcessor { + return model.ProcessBatchFunc(func(ctx context.Context, batch *model.Batch) error { + *out = append(*out, transformBatch(ctx, batch)...) + return nil + }) +} + +func transformBatch(ctx context.Context, batches ...*model.Batch) []beat.Event { + var out []beat.Event + for _, batch := range batches { + out = append(out, batch.Transform(ctx)...) + } + return out +} + +func approveEvents(t testing.TB, name string, events []beat.Event) { + t.Helper() + docs := beatertest.EncodeEventDocs(events...) + approvaltest.ApproveEventDocs(t, filepath.Join("test_approved", name), docs) +} + +func jaegerKeyValues(kv ...interface{}) []jaegermodel.KeyValue { + if len(kv)%2 != 0 { + panic("even number of args expected") + } + out := make([]jaegermodel.KeyValue, len(kv)/2) + for i := range out { + k := kv[2*i].(string) + v := kv[2*i+1] + out[i] = jaegerKeyValue(k, v) + } + return out +} + +func jaegerKeyValue(k string, v interface{}) jaegermodel.KeyValue { + kv := jaegermodel.KeyValue{Key: k} + switch v := v.(type) { + case string: + kv.VType = jaegermodel.ValueType_STRING + kv.VStr = v + case float64: + kv.VType = jaegermodel.ValueType_FLOAT64 + kv.VFloat64 = v + case int64: + kv.VType = jaegermodel.ValueType_INT64 + kv.VInt64 = v + case bool: + kv.VType = jaegermodel.ValueType_BOOL + kv.VBool = v + default: + panic(fmt.Errorf("unhandled %q value type %#v", k, v)) + } + return kv +} + +func transformTransactionWithAttributes(t *testing.T, attrs map[string]pdata.AttributeValue, configFns ...func(pdata.Span)) model.APMEvent { + traces, spans := newTracesSpans() + otelSpan := spans.Spans().AppendEmpty() + otelSpan.SetTraceID(pdata.NewTraceID([16]byte{1})) + otelSpan.SetSpanID(pdata.NewSpanID([8]byte{2})) + for _, fn := range configFns { + fn(otelSpan) + } + otelSpan.Attributes().InitFromMap(attrs) + events := transformTraces(t, traces) + return events[0] +} + +func transformSpanWithAttributes(t *testing.T, attrs map[string]pdata.AttributeValue, configFns ...func(pdata.Span)) model.APMEvent { + traces, spans := newTracesSpans() + otelSpan := spans.Spans().AppendEmpty() + otelSpan.SetTraceID(pdata.NewTraceID([16]byte{1})) + otelSpan.SetSpanID(pdata.NewSpanID([8]byte{2})) + otelSpan.SetParentSpanID(pdata.NewSpanID([8]byte{3})) + for _, fn := range configFns { + fn(otelSpan) + } + otelSpan.Attributes().InitFromMap(attrs) + events := transformTraces(t, traces) + return events[0] +} + +func transformTransactionSpanEvents(t *testing.T, language string, spanEvents ...pdata.SpanEvent) (transaction model.APMEvent, events []model.APMEvent) { + traces, spans := newTracesSpans() + traces.ResourceSpans().At(0).Resource().Attributes().InitFromMap(map[string]pdata.AttributeValue{ + semconv.AttributeTelemetrySDKLanguage: pdata.NewAttributeValueString(language), + }) + otelSpan := spans.Spans().AppendEmpty() + otelSpan.SetTraceID(pdata.NewTraceID([16]byte{1})) + otelSpan.SetSpanID(pdata.NewSpanID([8]byte{2})) + for _, spanEvent := range spanEvents { + spanEvent.CopyTo(otelSpan.Events().AppendEmpty()) + } + + allEvents := transformTraces(t, traces) + require.NotEmpty(t, allEvents) + return allEvents[0], allEvents[1:] +} + +func transformTraces(t *testing.T, traces pdata.Traces) model.Batch { + var processed model.Batch + processor := model.ProcessBatchFunc(func(ctx context.Context, batch *model.Batch) error { + if processed != nil { + panic("already processes batch") + } + processed = *batch + return nil + }) + require.NoError(t, (&otel.Consumer{Processor: processor}).ConsumeTraces(context.Background(), traces)) + return processed +} + +func newTracesSpans() (pdata.Traces, pdata.InstrumentationLibrarySpans) { + traces := pdata.NewTraces() + resourceSpans := traces.ResourceSpans().AppendEmpty() + librarySpans := resourceSpans.InstrumentationLibrarySpans().AppendEmpty() + return traces, librarySpans +} + +func newInt(v int) *int { + return &v +} + +func newBool(v bool) *bool { + return &v +} diff --git a/processor/stream/benchmark_test.go b/processor/stream/benchmark_test.go index 865bcae8a0c..428053b6ee3 100644 --- a/processor/stream/benchmark_test.go +++ b/processor/stream/benchmark_test.go @@ -21,59 +21,50 @@ import ( "bytes" "context" "io/ioutil" - "math" "path/filepath" "testing" - "golang.org/x/time/rate" - "github.com/elastic/apm-server/beater/config" "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/publish" ) func BenchmarkBackendProcessor(b *testing.B) { - processor := BackendProcessor(&config.Config{MaxEventSize: 300 * 1024}) + processor := BackendProcessor(config.DefaultConfig()) files, _ := filepath.Glob(filepath.FromSlash("../../testdata/intake-v2/*.ndjson")) benchmarkStreamProcessor(b, processor, files) } func BenchmarkRUMV3Processor(b *testing.B) { - processor := RUMV3Processor(&config.Config{MaxEventSize: 300 * 1024}) + processor := RUMV3Processor(config.DefaultConfig()) files, _ := filepath.Glob(filepath.FromSlash("../../testdata/intake-v3/rum_*.ndjson")) benchmarkStreamProcessor(b, processor, files) } func benchmarkStreamProcessor(b *testing.B, processor *Processor, files []string) { - report := func(ctx context.Context, p publish.PendingReq) error { - return nil - } - //ensure to not hit rate limit as blocking wait would be measured otherwise - rl := rate.NewLimiter(rate.Limit(math.MaxFloat64-1), math.MaxInt32) + const batchSize = 10 + batchProcessor := nopBatchProcessor{} + benchmark := func(b *testing.B, filename string) { + data, err := ioutil.ReadFile(filename) + if err != nil { + b.Error(err) + } + r := bytes.NewReader(data) + b.ReportAllocs() + b.SetBytes(int64(len(data))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + r.Reset(data) + b.StartTimer() - benchmark := func(filename string, rl *rate.Limiter) func(b *testing.B) { - return func(b *testing.B) { - data, err := ioutil.ReadFile(filename) - if err != nil { - b.Error(err) - } - r := bytes.NewReader(data) - b.ReportAllocs() - b.SetBytes(int64(len(data))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - b.StopTimer() - r.Reset(data) - b.StartTimer() - processor.HandleStream(context.Background(), rl, &model.Metadata{}, r, report) - } + var result Result + processor.HandleStream(context.Background(), model.APMEvent{}, r, batchSize, batchProcessor, &result) } } for _, f := range files { b.Run(filepath.Base(f), func(b *testing.B) { - b.Run("NoRateLimit", benchmark(f, nil)) - b.Run("WithRateLimit", benchmark(f, rl)) + benchmark(b, f) }) } } diff --git a/processor/stream/package_tests/doc.go b/processor/stream/package_tests/doc.go deleted file mode 100644 index 8c684cf7680..00000000000 --- a/processor/stream/package_tests/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package package_tests diff --git a/processor/stream/package_tests/error_attrs_test.go b/processor/stream/package_tests/error_attrs_test.go deleted file mode 100644 index 52459203497..00000000000 --- a/processor/stream/package_tests/error_attrs_test.go +++ /dev/null @@ -1,264 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package package_tests - -import ( - "encoding/json" - "testing" - - "github.com/elastic/apm-server/beater/config" - "github.com/elastic/apm-server/model/error/generated/schema" - "github.com/elastic/apm-server/processor/stream" - "github.com/elastic/apm-server/tests" -) - -func errorProcSetup() *tests.ProcessorSetup { - return &tests.ProcessorSetup{ - Proc: &intakeTestProcessor{ - Processor: *stream.BackendProcessor(&config.Config{MaxEventSize: lrSize}), - }, - FullPayloadPath: "../testdata/intake-v2/errors.ndjson", - TemplatePaths: []string{ - "../../../model/error/_meta/fields.yml", - "../../../_meta/fields.common.yml", - }, - Schema: schema.ModelSchema, - SchemaPrefix: "error", - } -} - -func errorPayloadAttrsNotInFields() *tests.Set { - return tests.NewSet( - tests.Group("error.exception.attributes"), - tests.Group("error.exception.stacktrace"), - tests.Group("error.exception.cause"), - tests.Group("error.exception.parent"), - tests.Group("error.log.stacktrace"), - tests.Group("context"), - tests.Group("error.page"), - tests.Group("http.request.cookies"), - ) -} - -func errorFieldsNotInPayloadAttrs() *tests.Set { - return tests.NewSet( - "view errors", "error id icon", - "host.ip", "transaction.name", "source.ip", - tests.Group("event"), - tests.Group("observer"), - tests.Group("user"), - tests.Group("client"), - tests.Group("destination"), - tests.Group("http"), - tests.Group("url"), - tests.Group("span"), - tests.Group("transaction.self_time"), - tests.Group("transaction.breakdown"), - tests.Group("transaction.duration"), - "experimental", - ) -} - -func errorPayloadAttrsNotInJsonSchema() *tests.Set { - return tests.NewSet( - "error", - "error.log.stacktrace.vars.key", - "error.exception.stacktrace.vars.key", - "error.exception.attributes.foo", - tests.Group("error.exception.cause."), - tests.Group("error.context.custom"), - tests.Group("error.context.request.env"), - tests.Group("error.context.request.cookies"), - tests.Group("error.context.tags"), - tests.Group("error.context.request.headers."), - tests.Group("error.context.response.headers."), - ) -} - -func errorRequiredKeys() *tests.Set { - return tests.NewSet( - "error", - "error.id", - "error.log", - "error.exception", - "error.exception.type", - "error.exception.message", - "error.log.message", - "error.exception.stacktrace.filename", - "error.log.stacktrace.filename", - "error.exception.stacktrace.classname", - "error.log.stacktrace.classname", - "error.context.request.method", - "error.context.request.url", - - "error.trace_id", - "error.parent_id", - ) -} - -type val = []interface{} -type obj = map[string]interface{} - -func errorCondRequiredKeys() map[string]tests.Condition { - return map[string]tests.Condition{ - "error.exception": {Absence: []string{"error.log"}}, - "error.exception.message": {Absence: []string{"error.exception.type"}}, - "error.exception.type": {Absence: []string{"error.exception.message"}}, - "error.exception.stacktrace.filename": {Absence: []string{"error.exception.stacktrace.classname"}}, - "error.exception.stacktrace.classname": {Absence: []string{"error.exception.stacktrace.filename"}}, - "error.log": {Absence: []string{"error.exception"}}, - "error.log.stacktrace.filename": {Absence: []string{"error.log.stacktrace.classname"}}, - "error.log.stacktrace.classname": {Absence: []string{"error.log.stacktrace.filename"}}, - - "error.trace_id": {Existence: obj{"error.parent_id": "abc123"}}, - "error.parent_id": {Existence: obj{"error.trace_id": "abc123"}}, - } -} - -func errorKeywordExceptionKeys() *tests.Set { - return tests.NewSet( - "processor.event", "processor.name", "error.grouping_key", - "context.tags", "transaction.name", - "event.outcome", // not relevant - "view errors", "error id icon", - tests.Group("url"), - tests.Group("http"), - tests.Group("destination"), - // metadata field - tests.Group("agent"), - tests.Group("container"), - tests.Group("host"), - tests.Group("kubernetes"), - tests.Group("observer"), - tests.Group("process"), - tests.Group("service"), - tests.Group("user"), - tests.Group("span"), - tests.Group("cloud"), - ) -} - -func TestErrorPayloadAttrsMatchFields(t *testing.T) { - errorProcSetup().PayloadAttrsMatchFields(t, - errorPayloadAttrsNotInFields(), - errorFieldsNotInPayloadAttrs()) -} - -func TestErrorPayloadAttrsMatchJsonSchema(t *testing.T) { - errorProcSetup().PayloadAttrsMatchJsonSchema(t, - errorPayloadAttrsNotInJsonSchema(), - tests.NewSet( - "error.context.user.email", - "error.context.experimental", - "error.exception.parent", // it will never be present in the top (first) exception - tests.Group("error.context.message"), - "error.context.response.decoded_body_size", - "error.context.response.encoded_body_size", - "error.context.response.transfer_size", - )) -} - -func TestErrorAttrsPresenceInError(t *testing.T) { - errorProcSetup().AttrsPresence(t, errorRequiredKeys(), errorCondRequiredKeys()) -} - -func TestErrorKeywordLimitationOnErrorAttributes(t *testing.T) { - errorProcSetup().KeywordLimitation( - t, - errorKeywordExceptionKeys(), - []tests.FieldTemplateMapping{ - {Template: "error."}, - {Template: "transaction.id", Mapping: "transaction_id"}, - {Template: "parent.id", Mapping: "parent_id"}, - {Template: "trace.id", Mapping: "trace_id"}, - }, - ) -} - -func TestPayloadDataForError(t *testing.T) { - //// add test data for testing - //// * specific edge cases - //// * multiple allowed data types - //// * regex pattern, time formats - //// * length restrictions, other than keyword length restrictions - errorProcSetup().DataValidation(t, - []tests.SchemaTestData{ - {Key: "error", - Invalid: []tests.Invalid{{Msg: `invalid input type`, Values: val{false}}}}, - {Key: "error.exception.code", Valid: val{"success", ""}, - Invalid: []tests.Invalid{{Msg: `exception/properties/code/type`, Values: val{false}}}}, - {Key: "error.exception.attributes", Valid: val{map[string]interface{}{}}, - Invalid: []tests.Invalid{{Msg: `exception/properties/attributes/type`, Values: val{123}}}}, - {Key: "error.timestamp", - Valid: val{json.Number("1496170422281000")}, - Invalid: []tests.Invalid{ - {Msg: `timestamp/type`, Values: val{"1496170422281000"}}}}, - {Key: "error.log.stacktrace.post_context", - Valid: val{[]interface{}{}, []interface{}{"context"}}, - Invalid: []tests.Invalid{ - {Msg: `log/properties/stacktrace/items/properties/post_context/items/type`, Values: val{[]interface{}{123}}}, - {Msg: `log/properties/stacktrace/items/properties/post_context/type`, Values: val{"test"}}}}, - {Key: "error.log.stacktrace.pre_context", - Valid: val{[]interface{}{}, []interface{}{"context"}}, - Invalid: []tests.Invalid{ - {Msg: `log/properties/stacktrace/items/properties/pre_context/items/type`, Values: val{[]interface{}{123}}}, - {Msg: `log/properties/stacktrace/items/properties/pre_context/type`, Values: val{"test"}}}}, - {Key: "error.exception.stacktrace.post_context", - Valid: val{[]interface{}{}, []interface{}{"context"}}, - Invalid: []tests.Invalid{ - {Msg: `exception/properties/stacktrace/items/properties/post_context/items/type`, Values: val{[]interface{}{123}}}, - {Msg: `exception/properties/stacktrace/items/properties/post_context/type`, Values: val{"test"}}}}, - {Key: "error.exception.stacktrace.pre_context", - Valid: val{[]interface{}{}, []interface{}{"context"}}, - Invalid: []tests.Invalid{ - {Msg: `exception/properties/stacktrace/items/properties/pre_context/items/type`, Values: val{[]interface{}{123}}}, - {Msg: `exception/properties/stacktrace/items/properties/pre_context/type`, Values: val{"test"}}}}, - {Key: "error.context.custom", - Valid: val{obj{"whatever": obj{"comes": obj{"end": -45}}}, obj{"whatever": 123}}, - Invalid: []tests.Invalid{ - {Msg: `context/properties/custom/additionalproperties`, Values: val{ - obj{"what.ever": 123}, obj{"what*ever": 123}, obj{"what\"ever": 123}}}, - {Msg: `context/properties/custom/type`, Values: val{"context"}}}}, - {Key: "error.context.request.body", Valid: val{tests.Str1025, obj{}}, - Invalid: []tests.Invalid{{Msg: `/context/properties/request/properties/body/type`, Values: val{102}}}}, - {Key: "error.context.request.headers", Valid: val{ - obj{"User-Agent": "go-1.1"}, - obj{"foo-bar": "a,b"}, - obj{"foo": []interface{}{"a", "b"}}}, - Invalid: []tests.Invalid{{Msg: `properties/headers`, Values: val{102, obj{"foo": obj{"bar": "a"}}}}}}, - {Key: "error.context.response.headers", Valid: val{ - obj{"User-Agent": "go-1.1"}, - obj{"foo-bar": "a,b"}, - obj{"foo": []interface{}{"a", "b"}}}, - Invalid: []tests.Invalid{{Msg: `properties/headers`, Values: val{102, obj{"foo": obj{"bar": "a"}}}}}}, - {Key: "error.context.request.env", Valid: val{obj{}}, - Invalid: []tests.Invalid{{Msg: `/context/properties/request/properties/env/type`, Values: val{102, "a"}}}}, - {Key: "error.context.request.cookies", Valid: val{obj{}}, - Invalid: []tests.Invalid{{Msg: `/context/properties/request/properties/cookies/type`, Values: val{102, "a"}}}}, - {Key: "error.context.tags", - Valid: val{obj{tests.Str1024Special: tests.Str1024Special}, obj{tests.Str1024: 123.45}, obj{tests.Str1024: true}}, - Invalid: []tests.Invalid{ - {Msg: `context/properties/tags/type`, Values: val{"tags"}}, - {Msg: `context/properties/tags/patternproperties`, Values: val{obj{"invalid": tests.Str1025}, obj{tests.Str1024: obj{}}}}, - {Msg: `context/properties/tags/additionalproperties`, Values: val{obj{"invali*d": "hello"}, obj{"invali\"d": "hello"}, obj{"invali.d": "hello"}}}}}, - {Key: "error.context.user.id", Valid: val{123, tests.Str1024Special}, - Invalid: []tests.Invalid{ - {Msg: `context/properties/user/properties/id/type`, Values: val{obj{}}}, - {Msg: `context/properties/user/properties/id/maxlength`, Values: val{tests.Str1025}}}}, - }) -} diff --git a/processor/stream/package_tests/intake_test_processor.go b/processor/stream/package_tests/intake_test_processor.go deleted file mode 100644 index 66e90cdb494..00000000000 --- a/processor/stream/package_tests/intake_test_processor.go +++ /dev/null @@ -1,118 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package package_tests - -import ( - "bytes" - "context" - "errors" - "io" - "time" - - "github.com/elastic/beats/v7/libbeat/beat" - - "github.com/elastic/apm-server/decoder" - "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/processor/stream" - "github.com/elastic/apm-server/publish" - "github.com/elastic/apm-server/tests" - "github.com/elastic/apm-server/tests/loader" - "github.com/elastic/apm-server/transform" -) - -type intakeTestProcessor struct { - stream.Processor -} - -const lrSize = 100 * 1024 - -func (v *intakeTestProcessor) getDecoder(path string) (*decoder.NDJSONStreamDecoder, error) { - reader, err := loader.LoadDataAsStream(path) - if err != nil { - return nil, err - } - return decoder.NewNDJSONStreamDecoder(reader, lrSize), nil -} - -func (v *intakeTestProcessor) readEvents(dec *decoder.NDJSONStreamDecoder) ([]interface{}, error) { - var ( - err error - events []interface{} - ) - - for err != io.EOF { - var e map[string]interface{} - if err = dec.Decode(&e); err != nil && err != io.EOF { - return events, err - } - if e != nil { - events = append(events, e) - } - } - return events, nil -} - -func (p *intakeTestProcessor) LoadPayload(path string) (interface{}, error) { - ndjson, err := p.getDecoder(path) - if err != nil { - return nil, err - } - - // read and discard metadata - var m map[string]interface{} - ndjson.Decode(&m) - - return p.readEvents(ndjson) -} - -func (p *intakeTestProcessor) Decode(data interface{}) error { - events := data.([]interface{}) - for _, e := range events { - err := p.Processor.HandleRawModel(e.(map[string]interface{}), &model.Batch{}, time.Now(), model.Metadata{}) - if err != nil { - return err - } - } - - return nil -} - -func (p *intakeTestProcessor) Validate(data interface{}) error { - return p.Decode(data) -} - -func (p *intakeTestProcessor) Process(buf []byte) ([]beat.Event, error) { - var reqs []publish.PendingReq - report := tests.TestReporter(&reqs) - - result := p.HandleStream(context.TODO(), nil, &model.Metadata{}, bytes.NewBuffer(buf), report) - var events []beat.Event - for _, req := range reqs { - if req.Transformables != nil { - for _, transformable := range req.Transformables { - events = append(events, transformable.Transform(context.Background(), &transform.Config{})...) - } - } - } - - if len(result.Errors) > 0 { - return events, errors.New(result.Error()) - } - - return events, nil -} diff --git a/processor/stream/package_tests/metadata_attrs_test.go b/processor/stream/package_tests/metadata_attrs_test.go deleted file mode 100644 index 8ce99524688..00000000000 --- a/processor/stream/package_tests/metadata_attrs_test.go +++ /dev/null @@ -1,194 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package package_tests - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/elastic/apm-server/decoder" - "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/model/metadata/generated/schema" - "github.com/elastic/apm-server/model/modeldecoder" - "github.com/elastic/apm-server/processor/stream" - "github.com/elastic/apm-server/tests" - "github.com/elastic/apm-server/tests/loader" -) - -type MetadataProcessor struct { - intakeTestProcessor -} - -func (p *MetadataProcessor) LoadPayload(path string) (interface{}, error) { - ndjson, err := p.getDecoder(path) - if err != nil { - return nil, err - } - - return p.readEvents(ndjson) -} - -func (p *MetadataProcessor) Validate(data interface{}) error { - events := data.([]interface{}) - for _, e := range events { - rawEvent := e.(map[string]interface{}) - rawMetadata, ok := rawEvent["metadata"].(map[string]interface{}) - if !ok { - return stream.ErrUnrecognizedObject - } - - // validate the metadata object against our jsonschema - if err := modeldecoder.DecodeMetadata(rawMetadata, false, &model.Metadata{}); err != nil { - return err - } - } - - return nil -} - -func (p *MetadataProcessor) Decode(data interface{}) error { - return p.Validate(data) -} - -func metadataProcSetup() *tests.ProcessorSetup { - return &tests.ProcessorSetup{ - Proc: &MetadataProcessor{ - intakeTestProcessor{Processor: stream.Processor{MaxEventSize: lrSize}}}, - Schema: schema.ModelSchema, - TemplatePaths: []string{ - "../../../_meta/fields.common.yml", - }, - FullPayloadPath: "../testdata/intake-v2/metadata.ndjson", - } -} - -func getMetadataEventAttrs(t *testing.T, prefix string) *tests.Set { - payloadStream, err := loader.LoadDataAsStream("../testdata/intake-v2/metadata.ndjson") - require.NoError(t, err) - - var metadata map[string]interface{} - require.NoError(t, decoder.NewNDJSONStreamDecoder(payloadStream, lrSize).Decode(&metadata)) - - contextMetadata := metadata["metadata"] - - eventFields := tests.NewSet() - tests.FlattenMapStr(contextMetadata, prefix, nil, eventFields) - t.Logf("Event field: %s", eventFields) - return eventFields -} - -func TestMetadataPayloadAttrsMatchFields(t *testing.T) { - setup := metadataProcSetup() - eventFields := getMetadataEventAttrs(t, "") - - var mappingFields = []tests.FieldTemplateMapping{ - {Template: "system.container.", Mapping: "container."}, // move system.container.* - {Template: "system.container", Mapping: ""}, // delete system.container - {Template: "system.kubernetes.node.", Mapping: "kubernetes.node."}, // move system.kubernetes.node.* - {Template: "system.kubernetes.node", Mapping: ""}, // delete system.kubernetes.node - {Template: "system.kubernetes.pod.", Mapping: "kubernetes.pod."}, // move system.kubernetes.pod.* - {Template: "system.kubernetes.pod", Mapping: ""}, // delete system.kubernetes.pod - {Template: "system.kubernetes.", Mapping: "kubernetes."}, // move system.kubernetes.* - {Template: "system.kubernetes", Mapping: ""}, // delete system.kubernetes - {Template: "system.platform", Mapping: "host.os.platform"}, - {Template: "system.configured_hostname", Mapping: "host.name"}, - {Template: "system.detected_hostname", Mapping: "host.hostname"}, - {Template: "system", Mapping: "host"}, - {Template: "service.agent", Mapping: "agent"}, - {Template: "user.username", Mapping: "user.name"}, - {Template: "process.argv", Mapping: "process.args"}, - {Template: "labels.*", Mapping: "labels"}, - {Template: "service.node.configured_name", Mapping: "service.node.name"}, - {Template: "cloud", Mapping: "cloud"}, - } - setup.EventFieldsMappedToTemplateFields(t, eventFields, mappingFields) -} - -func TestMetadataPayloadMatchJsonSchema(t *testing.T) { - metadataProcSetup().AttrsMatchJsonSchema(t, - getMetadataEventAttrs(t, ""), - tests.NewSet(tests.Group("labels"), "system.ip"), - nil, - ) -} - -func TestKeywordLimitationOnMetadataAttrs(t *testing.T) { - metadataProcSetup().KeywordLimitation( - t, - tests.NewSet("processor.event", "processor.name", - "process.args", - tests.Group("observer"), - tests.Group("event"), - tests.Group("http"), - tests.Group("url"), - tests.Group("context.tags"), - tests.Group("transaction"), - tests.Group("span"), - tests.Group("parent"), - tests.Group("trace"), - tests.Group("user_agent"), - tests.Group("destination"), - ), - []tests.FieldTemplateMapping{ - {Template: "agent.", Mapping: "service.agent."}, - {Template: "container.", Mapping: "system.container."}, - {Template: "kubernetes.", Mapping: "system.kubernetes."}, - {Template: "host.os.platform", Mapping: "system.platform"}, - {Template: "host.name", Mapping: "system.configured_hostname"}, - {Template: "host.", Mapping: "system."}, - {Template: "user.name", Mapping: "user.username"}, - {Template: "service.node.name", Mapping: "service.node.configured_name"}, - //{Template: "url.", Mapping:"context.request.url."}, - }, - ) -} - -func metadataRequiredKeys() *tests.Set { - return tests.NewSet( - "metadata", - "metadata.cloud.provider", - "metadata.service", - "metadata.service.name", - "metadata.service.agent", - "metadata.service.agent.name", - "metadata.service.agent.version", - "metadata.service.runtime.name", - "metadata.service.runtime.version", - "metadata.service.language.name", - "metadata.system.container.id", - "metadata.process.pid", - ) -} - -func TestAttrsPresenceInMetadata(t *testing.T) { - metadataProcSetup().AttrsPresence(t, metadataRequiredKeys(), nil) -} - -func TestInvalidPayloadsForMetadata(t *testing.T) { - type val []interface{} - - payloadData := []tests.SchemaTestData{ - {Key: "metadata.service.name", - Valid: val{"m"}, - Invalid: []tests.Invalid{ - {Msg: "service/properties/name", Values: val{tests.Str1024Special}}, - {Msg: "service/properties/name", Values: val{""}}}, - }} - metadataProcSetup().DataValidation(t, payloadData) -} diff --git a/processor/stream/package_tests/metricset_attrs_test.go b/processor/stream/package_tests/metricset_attrs_test.go deleted file mode 100644 index 16e98a1f1c4..00000000000 --- a/processor/stream/package_tests/metricset_attrs_test.go +++ /dev/null @@ -1,95 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package package_tests - -import ( - "encoding/json" - "testing" - - "github.com/elastic/apm-server/beater/config" - "github.com/elastic/apm-server/model/metricset/generated/schema" - "github.com/elastic/apm-server/processor/stream" - "github.com/elastic/apm-server/tests" -) - -func metricsetProcSetup() *tests.ProcessorSetup { - return &tests.ProcessorSetup{ - Proc: &intakeTestProcessor{ - Processor: *stream.BackendProcessor(&config.Config{MaxEventSize: lrSize}), - }, - FullPayloadPath: "../testdata/intake-v2/metricsets.ndjson", - TemplatePaths: []string{ - "../../../model/metricset/_meta/fields.yml", - "../../../_meta/fields.common.yml", - }, - Schema: schema.ModelSchema, - } -} - -func TestAttributesPresenceInMetric(t *testing.T) { - requiredKeys := tests.NewSet( - "service", - "metricset", - "metricset.samples", - "metricset.samples.+.value", - ) - metricsetProcSetup().AttrsPresence(t, requiredKeys, nil) -} - -func TestInvalidPayloads(t *testing.T) { - type obj = map[string]interface{} - type val = []interface{} - - validMetric := obj{"value": json.Number("1.0")} - payloadData := []tests.SchemaTestData{ - {Key: "metricset.timestamp", - Valid: val{json.Number("1496170422281000")}, - Invalid: []tests.Invalid{ - {Msg: `timestamp/type`, Values: val{"1496170422281000"}}}}, - {Key: "metricset.tags", - Valid: val{obj{tests.Str1024Special: tests.Str1024Special}, obj{tests.Str1024: 123.45}, obj{tests.Str1024: true}}, - Invalid: []tests.Invalid{ - {Msg: `tags/type`, Values: val{"tags"}}, - {Msg: `tags/patternproperties`, Values: val{obj{"invalid": tests.Str1025}, obj{tests.Str1024: obj{}}}}, - {Msg: `tags/additionalproperties`, Values: val{obj{"invali*d": "hello"}, obj{"invali\"d": "hello"}}}}, - }, - { - Key: "metricset.samples", - Valid: val{ - obj{"valid-metric": validMetric}, - }, - Invalid: []tests.Invalid{ - { - Msg: "/properties/samples/additionalproperties", - Values: val{ - obj{"metric\"key\"_quotes": validMetric}, - obj{"metric-*-key-star": validMetric}, - }, - }, - { - Msg: "/properties/samples/patternproperties", - Values: val{ - obj{"nil-value": obj{"value": nil}}, - obj{"string-value": obj{"value": "foo"}}, - }, - }, - }, - }, - } - metricsetProcSetup().DataValidation(t, payloadData) -} diff --git a/processor/stream/package_tests/span_attrs_test.go b/processor/stream/package_tests/span_attrs_test.go deleted file mode 100644 index 1863d1294b2..00000000000 --- a/processor/stream/package_tests/span_attrs_test.go +++ /dev/null @@ -1,241 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package package_tests - -import ( - "encoding/json" - "testing" - - "github.com/elastic/apm-server/beater/config" - "github.com/elastic/apm-server/model/span/generated/schema" - "github.com/elastic/apm-server/processor/stream" - "github.com/elastic/apm-server/tests" -) - -func spanProcSetup() *tests.ProcessorSetup { - return &tests.ProcessorSetup{ - Proc: &intakeTestProcessor{ - Processor: *stream.BackendProcessor(&config.Config{MaxEventSize: lrSize}), - }, - FullPayloadPath: "../testdata/intake-v2/spans.ndjson", - Schema: schema.ModelSchema, - SchemaPrefix: "span", - TemplatePaths: []string{ - "../../../model/span/_meta/fields.yml", - "../../../_meta/fields.common.yml", - }, - } -} - -func spanPayloadAttrsNotInFields() *tests.Set { - return tests.NewSet( - tests.Group("span.stacktrace"), - tests.Group("context"), - tests.Group("span.db"), - tests.Group("span.http"), - "span.message.body", "span.message.headers", - ) -} - -// fields in _meta/fields.common.yml that are shared between several data types, but not with spans -func spanFieldsNotInPayloadAttrs() *tests.Set { - return tests.Union( - tests.NewSet( - "view spans", - "transaction.sampled", - "transaction.type", - "transaction.name", - tests.Group("container"), - tests.Group("host"), - tests.Group("kubernetes"), - tests.Group("observer"), - tests.Group("process"), - tests.Group("service"), - tests.Group("user"), - tests.Group("client"), - tests.Group("source"), - tests.Group("http"), - tests.Group("url"), - tests.Group("span.self_time"), - tests.Group("transaction.self_time"), - tests.Group("transaction.breakdown"), - tests.Group("transaction.duration"), - "experimental", - ), - // not valid for the span context - transactionContext(), - ) - -} - -func spanPayloadAttrsNotInJsonSchema() *tests.Set { - return tests.NewSet( - "span", - "span.stacktrace.vars.key", - tests.Group("span.context.tags"), - "span.context.http.response.headers.content-type", - "span.context.service.environment", //used to check that only defined service fields are set on spans - ) -} - -func spanJsonSchemaNotInPayloadAttrs() *tests.Set { - return tests.NewSet( - "span.transaction_id", - "span.context.experimental", - "span.context.message.body", - "span.sample_rate", - "span.context.message.headers", - ) -} - -func spanRequiredKeys() *tests.Set { - return tests.NewSet( - "span", - "span.name", - "span.trace_id", - "span.parent_id", - "span.id", - "span.duration", - "span.type", - "span.start", - "span.timestamp", - "span.stacktrace.filename", - ) -} - -func spanCondRequiredKeys() map[string]tests.Condition { - return map[string]tests.Condition{ - "span.start": {Absence: []string{"span.timestamp"}}, - "span.timestamp": {Absence: []string{"span.start"}}, - - "span.context.destination.service.type": {Existence: map[string]interface{}{ - "span.context.destination.service.name": "postgresql", - "span.context.destination.service.resource": "postgresql", - }}, - "span.context.destination.service.name": {Existence: map[string]interface{}{ - "span.context.destination.service.type": "db", - "span.context.destination.service.resource": "postgresql", - }}, - "span.context.destination.service.resource": {Existence: map[string]interface{}{ - "span.context.destination.service.type": "db", - "span.context.destination.service.name": "postgresql", - }}, - } -} - -func transactionContext() *tests.Set { - return tests.NewSet( - tests.Group("context.user"), - tests.Group("context.response"), - tests.Group("context.request"), - ) -} - -func spanKeywordExceptionKeys() *tests.Set { - return tests.Union(tests.NewSet( - "processor.event", "processor.name", - "context.tags", "transaction.type", "transaction.name", - "event.outcome", - tests.Group("observer"), - - // metadata fields - tests.Group("agent"), - tests.Group("container"), - tests.Group("host"), - tests.Group("kubernetes"), - tests.Group("process"), - tests.Group("service"), - tests.Group("user"), - tests.Group("url"), - tests.Group("http"), - tests.Group("cloud"), - ), - transactionContext(), - ) -} - -func TestSpanPayloadMatchFields(t *testing.T) { - spanProcSetup().PayloadAttrsMatchFields(t, - spanPayloadAttrsNotInFields(), - spanFieldsNotInPayloadAttrs()) - -} - -func TestSpanPayloadMatchJsonSchema(t *testing.T) { - spanProcSetup().PayloadAttrsMatchJsonSchema(t, - spanPayloadAttrsNotInJsonSchema(), - spanJsonSchemaNotInPayloadAttrs()) -} - -func TestAttrsPresenceInSpan(t *testing.T) { - spanProcSetup().AttrsPresence(t, spanRequiredKeys(), spanCondRequiredKeys()) -} - -func TestKeywordLimitationOnSpanAttrs(t *testing.T) { - spanProcSetup().KeywordLimitation( - t, - spanKeywordExceptionKeys(), - []tests.FieldTemplateMapping{ - {Template: "transaction.id", Mapping: "transaction_id"}, - {Template: "child.id", Mapping: "child_ids"}, - {Template: "parent.id", Mapping: "parent_id"}, - {Template: "trace.id", Mapping: "trace_id"}, - {Template: "span.id", Mapping: "id"}, - {Template: "span.db.link", Mapping: "context.db.link"}, - {Template: "span.destination.service", Mapping: "context.destination.service"}, - {Template: "span.message.", Mapping: "context.message."}, - {Template: "span.", Mapping: ""}, - {Template: "destination.address", Mapping: "context.destination.address"}, - {Template: "destination.port", Mapping: "context.destination.port"}, - {Template: "span.message.queue.name", Mapping: "context.message.queue.name"}, - }, - ) -} - -func TestPayloadDataForSpans(t *testing.T) { - // add test data for testing - // * specific edge cases - // * multiple allowed dataypes - // * regex pattern, time formats - // * length restrictions, other than keyword length restrictions - - spanProcSetup().DataValidation(t, - []tests.SchemaTestData{ - {Key: "span.context.tags", - Valid: val{obj{tests.Str1024Special: tests.Str1024Special}, obj{tests.Str1024: 123.45}, obj{tests.Str1024: true}}, - Invalid: []tests.Invalid{ - {Msg: `tags/type`, Values: val{"tags"}}, - {Msg: `tags/patternproperties`, Values: val{obj{"invalid": tests.Str1025}, obj{tests.Str1024: obj{}}}}, - {Msg: `tags/additionalproperties`, Values: val{obj{"invali*d": "hello"}, obj{"invali\"d": "hello"}, obj{"invali.d": "hello"}}}}, - }, - {Key: "span.timestamp", - Valid: val{json.Number("1496170422281000")}, - Invalid: []tests.Invalid{ - {Msg: `timestamp/type`, Values: val{"1496170422281000"}}}}, - {Key: "span.stacktrace.pre_context", - Valid: val{[]interface{}{}, []interface{}{"context"}}, - Invalid: []tests.Invalid{ - {Msg: `/stacktrace/items/properties/pre_context/items/type`, Values: val{[]interface{}{123}}}, - {Msg: `stacktrace/items/properties/pre_context/type`, Values: val{"test"}}}}, - {Key: "span.stacktrace.post_context", - Valid: val{[]interface{}{}, []interface{}{"context"}}, - Invalid: []tests.Invalid{ - {Msg: `/stacktrace/items/properties/post_context/items/type`, Values: val{[]interface{}{123}}}, - {Msg: `stacktrace/items/properties/post_context/type`, Values: val{"test"}}}}, - }) -} diff --git a/processor/stream/package_tests/transaction_attrs_test.go b/processor/stream/package_tests/transaction_attrs_test.go deleted file mode 100644 index 13581aef9ce..00000000000 --- a/processor/stream/package_tests/transaction_attrs_test.go +++ /dev/null @@ -1,228 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package package_tests - -import ( - "encoding/json" - "testing" - - "github.com/elastic/apm-server/beater/config" - "github.com/elastic/apm-server/model/transaction/generated/schema" - "github.com/elastic/apm-server/processor/stream" - "github.com/elastic/apm-server/tests" -) - -func transactionProcSetup() *tests.ProcessorSetup { - return &tests.ProcessorSetup{ - Proc: &intakeTestProcessor{ - Processor: *stream.BackendProcessor(&config.Config{MaxEventSize: lrSize}), - }, - FullPayloadPath: "../testdata/intake-v2/transactions.ndjson", - Schema: schema.ModelSchema, - SchemaPrefix: "transaction", - TemplatePaths: []string{ - "../../../model/transaction/_meta/fields.yml", - "../../../_meta/fields.common.yml", - }, - } -} - -func transactionPayloadAttrsNotInFields() *tests.Set { - return tests.NewSet( - tests.Group("transaction.marks."), - "transaction.span_count.started", - tests.Group("context"), - tests.Group("transaction.page"), - tests.Group("http.request.cookies"), - "transaction.message.body", "transaction.message.headers", - "http.response.decoded_body_size", "http.response.encoded_body_size", "http.response.transfer_size", - ) -} - -func transactionFieldsNotInPayloadAttrs() *tests.Set { - return tests.NewSet( - "context.http", - "context.http.status_code", - "host.ip", - "transaction.duration.count", - "transaction.marks.*.*", - "source.ip", - tests.Group("observer"), - tests.Group("user"), - tests.Group("client"), - tests.Group("destination"), - tests.Group("url"), - tests.Group("http"), - tests.Group("span"), - tests.Group("transaction.self_time"), - tests.Group("transaction.breakdown"), - tests.Group("transaction.duration.sum"), - "experimental", - ) -} - -func transactionPayloadAttrsNotInJsonSchema() *tests.Set { - return tests.NewSet( - "transaction", - tests.Group("transaction.context.request.env."), - tests.Group("transaction.context.request.body"), - tests.Group("transaction.context.request.cookies"), - tests.Group("transaction.context.custom"), - tests.Group("transaction.context.tags"), - tests.Group("transaction.marks"), - tests.Group("transaction.context.request.headers."), - tests.Group("transaction.context.response.headers."), - tests.Group("transaction.context.message.headers."), - ) -} - -func transactionRequiredKeys() *tests.Set { - return tests.NewSet( - "transaction", - "transaction.span_count", - "transaction.span_count.started", - "transaction.trace_id", - "transaction.id", - "transaction.duration", - "transaction.type", - "transaction.context.request.method", - "transaction.context.request.url", - ) -} - -func transactionKeywordExceptionKeys() *tests.Set { - return tests.NewSet( - "processor.event", "processor.name", - "transaction.marks", - "context.tags", - "event.outcome", - tests.Group("observer"), - tests.Group("url"), - tests.Group("http"), - tests.Group("destination"), - - // metadata fields - tests.Group("agent"), - tests.Group("container"), - tests.Group("host"), - tests.Group("kubernetes"), - tests.Group("process"), - tests.Group("service"), - tests.Group("user"), - tests.Group("span"), - tests.Group("cloud"), - ) -} - -func TestTransactionPayloadMatchFields(t *testing.T) { - transactionProcSetup().PayloadAttrsMatchFields(t, - transactionPayloadAttrsNotInFields(), - transactionFieldsNotInPayloadAttrs()) -} - -func TestTransactionPayloadMatchJsonSchema(t *testing.T) { - transactionProcSetup().PayloadAttrsMatchJsonSchema(t, - transactionPayloadAttrsNotInJsonSchema(), - tests.NewSet("transaction.context.user.email", "transaction.context.experimental", "transaction.sample_rate")) -} - -func TestAttrsPresenceInTransaction(t *testing.T) { - transactionProcSetup().AttrsPresence(t, transactionRequiredKeys(), nil) -} - -func TestKeywordLimitationOnTransactionAttrs(t *testing.T) { - transactionProcSetup().KeywordLimitation( - t, - transactionKeywordExceptionKeys(), - []tests.FieldTemplateMapping{ - {Template: "parent.id", Mapping: "parent_id"}, - {Template: "trace.id", Mapping: "trace_id"}, - {Template: "transaction.message.", Mapping: "context.message."}, - {Template: "transaction."}, - }, - ) -} - -func TestPayloadDataForTransaction(t *testing.T) { - // add test data for testing - // * specific edge cases - // * multiple allowed dataypes - // * regex pattern, time formats - // * length restrictions, other than keyword length restrictions - - transactionProcSetup().DataValidation(t, - []tests.SchemaTestData{ - {Key: "transaction.duration", - Valid: []interface{}{12.4}, - Invalid: []tests.Invalid{{Msg: `duration/type`, Values: val{"123"}}}}, - {Key: "transaction.timestamp", - Valid: val{json.Number("1496170422281000")}, - Invalid: []tests.Invalid{ - {Msg: `timestamp/type`, Values: val{"1496170422281000"}}}}, - {Key: "transaction.marks", - Valid: []interface{}{obj{}, obj{tests.Str1024: obj{tests.Str1024: 21.0, "end": -45}}}, - Invalid: []tests.Invalid{ - {Msg: `marks/type`, Values: val{"marks"}}, - {Msg: `marks/patternproperties`, Values: val{ - obj{"timing": obj{"start": "start"}}, - obj{"timing": obj{"start": obj{}}}, - obj{"timing": obj{"m*e": -45}}, - obj{"timing": obj{"m\"": -45}}, - obj{"timing": obj{"m.": -45}}}}, - {Msg: `marks/additionalproperties`, Values: val{ - obj{"tim*ing": obj{"start": -45}}, - obj{"tim\"ing": obj{"start": -45}}, - obj{"tim.ing": obj{"start": -45}}}}}}, - {Key: "transaction.context.custom", - Valid: val{obj{"whatever": obj{"comes": obj{"end": -45}}}, - obj{"whatever": 123}}, - Invalid: []tests.Invalid{ - {Msg: `context/properties/custom/additionalproperties`, Values: val{obj{"what.ever": 123}, obj{"what*ever": 123}, obj{"what\"ever": 123}}}, - {Msg: `context/properties/custom/type`, Values: val{"context"}}}}, - {Key: "transaction.context.request.body", - Valid: []interface{}{obj{}, tests.Str1025}, - Invalid: []tests.Invalid{{Msg: `context/properties/request/properties/body/type`, Values: val{102}}}}, - {Key: "transaction.context.request.headers", Valid: val{ - obj{"User-Agent": "go-1.1"}, - obj{"foo-bar": "a,b"}, - obj{"foo": []interface{}{"a", "b"}}}, - Invalid: []tests.Invalid{{Msg: `properties/headers`, Values: val{102, obj{"foo": obj{"bar": "a"}}}}}}, - {Key: "transaction.context.request.env", - Valid: []interface{}{obj{}}, - Invalid: []tests.Invalid{{Msg: `context/properties/request/properties/env/type`, Values: val{102, "a"}}}}, - {Key: "transaction.context.request.cookies", - Valid: []interface{}{obj{}}, - Invalid: []tests.Invalid{{Msg: `context/properties/request/properties/cookies/type`, Values: val{123, ""}}}}, - {Key: "transaction.context.response.headers", Valid: val{ - obj{"User-Agent": "go-1.1"}, - obj{"foo-bar": "a,b"}, - obj{"foo": []interface{}{"a", "b"}}}, - Invalid: []tests.Invalid{{Msg: `properties/headers`, Values: val{102, obj{"foo": obj{"bar": "a"}}}}}}, - {Key: "transaction.context.tags", - Valid: val{obj{tests.Str1024Special: tests.Str1024Special}, obj{tests.Str1024: 123.45}, obj{tests.Str1024: true}}, - Invalid: []tests.Invalid{ - {Msg: `tags/type`, Values: val{"tags"}}, - {Msg: `tags/patternproperties`, Values: val{obj{"invalid": tests.Str1025}, obj{tests.Str1024: obj{}}}}, - {Msg: `tags/additionalproperties`, Values: val{obj{"invali*d": "hello"}, obj{"invali\"d": "hello"}, obj{"invali.d": "hello"}}}}}, - {Key: "transaction.context.user.id", - Valid: val{123, tests.Str1024Special}, - Invalid: []tests.Invalid{ - {Msg: `context/properties/user/properties/id/type`, Values: val{obj{}}}, - {Msg: `context/properties/user/properties/id/maxlength`, Values: val{tests.Str1025}}}}, - }) -} diff --git a/processor/stream/processor.go b/processor/stream/processor.go index 61249d11460..952cb515041 100644 --- a/processor/stream/processor.go +++ b/processor/stream/processor.go @@ -18,259 +18,221 @@ package stream import ( + "bytes" "context" - "errors" "io" "sync" - "time" - - "golang.org/x/time/rate" "go.elastic.co/apm" + "github.com/pkg/errors" + "github.com/elastic/apm-server/beater/config" "github.com/elastic/apm-server/decoder" "github.com/elastic/apm-server/model" "github.com/elastic/apm-server/model/modeldecoder" - "github.com/elastic/apm-server/model/modeldecoder/field" - "github.com/elastic/apm-server/publish" + "github.com/elastic/apm-server/model/modeldecoder/rumv3" + v2 "github.com/elastic/apm-server/model/modeldecoder/v2" "github.com/elastic/apm-server/utility" - "github.com/elastic/apm-server/validation" ) var ( - ErrUnrecognizedObject = errors.New("did not recognize object type") + errUnrecognizedObject = errors.New("did not recognize object type") ) const ( - batchSize = 10 + errorEventType = "error" + metricsetEventType = "metricset" + spanEventType = "span" + transactionEventType = "transaction" + rumv3ErrorEventType = "e" + rumv3TransactionEventType = "x" ) -type decodeMetadataFunc func(interface{}, bool, *model.Metadata) error - -// functions with the decodeEventFunc signature decode their input argument into their batch argument (output) -type decodeEventFunc func(modeldecoder.Input, *model.Batch) error +type decodeMetadataFunc func(decoder.Decoder, *model.APMEvent) error type Processor struct { - Mconfig modeldecoder.Config MaxEventSize int streamReaderPool sync.Pool decodeMetadata decodeMetadataFunc - models map[string]decodeEventFunc } func BackendProcessor(cfg *config.Config) *Processor { return &Processor{ - Mconfig: modeldecoder.Config{Experimental: cfg.Mode == config.ModeExperimental}, MaxEventSize: cfg.MaxEventSize, - decodeMetadata: modeldecoder.DecodeMetadata, - models: map[string]decodeEventFunc{ - "transaction": modeldecoder.DecodeTransaction, - "span": modeldecoder.DecodeSpan, - "metricset": modeldecoder.DecodeMetricset, - "error": modeldecoder.DecodeError, - }, + decodeMetadata: v2.DecodeNestedMetadata, } } func RUMV2Processor(cfg *config.Config) *Processor { return &Processor{ - Mconfig: modeldecoder.Config{Experimental: cfg.Mode == config.ModeExperimental}, MaxEventSize: cfg.MaxEventSize, - decodeMetadata: modeldecoder.DecodeMetadata, - models: map[string]decodeEventFunc{ - "transaction": modeldecoder.DecodeRUMV2Transaction, - "span": modeldecoder.DecodeRUMV2Span, - "metricset": modeldecoder.DecodeRUMV2Metricset, - "error": modeldecoder.DecodeRUMV2Error, - }, + decodeMetadata: v2.DecodeNestedMetadata, } } func RUMV3Processor(cfg *config.Config) *Processor { return &Processor{ - Mconfig: modeldecoder.Config{Experimental: cfg.Mode == config.ModeExperimental, HasShortFieldNames: true}, MaxEventSize: cfg.MaxEventSize, - decodeMetadata: modeldecoder.DecodeRUMV3Metadata, - models: map[string]decodeEventFunc{ - "x": modeldecoder.DecodeRUMV3Transaction, - "e": modeldecoder.DecodeRUMV3Error, - "me": modeldecoder.DecodeRUMV3Metricset, - }, + decodeMetadata: rumv3.DecodeNestedMetadata, } } -func (p *Processor) readMetadata(metadata *model.Metadata, reader *streamReader) error { - var rawModel map[string]interface{} - err := reader.Read(&rawModel) - if err != nil { +func (p *Processor) readMetadata(reader *streamReader, out *model.APMEvent) error { + if err := p.decodeMetadata(reader, out); err != nil { + err = reader.wrapError(err) if err == io.EOF { - return &Error{ - Type: InvalidInputErrType, + return &InvalidInputError{ Message: "EOF while reading metadata", Document: string(reader.LatestLine()), } } - return err - } - - fieldName := field.Mapper(p.Mconfig.HasShortFieldNames) - rawMetadata, ok := rawModel[fieldName("metadata")].(map[string]interface{}) - if !ok { - return &Error{ - Type: InvalidInputErrType, - Message: ErrUnrecognizedObject.Error(), + if _, ok := err.(*InvalidInputError); ok { + return err + } + return &InvalidInputError{ + Message: err.Error(), Document: string(reader.LatestLine()), } } - - if err := p.decodeMetadata(rawMetadata, p.Mconfig.HasShortFieldNames, metadata); err != nil { - var ve *validation.Error - if errors.As(err, &ve) { - return &Error{ - Type: InvalidInputErrType, - Message: err.Error(), - Document: string(reader.LatestLine()), - } - } - return err - } return nil } -// HandleRawModel validates and decodes a single json object into its struct form -func (p *Processor) HandleRawModel(rawModel map[string]interface{}, batch *model.Batch, requestTime time.Time, streamMetadata model.Metadata) error { - for key, decodeEvent := range p.models { - entry, ok := rawModel[key] - if !ok { - continue - } - err := decodeEvent(modeldecoder.Input{ - Raw: entry, - RequestTime: requestTime, - Metadata: streamMetadata, - Config: p.Mconfig, - }, batch) - if err != nil { - return err +// identifyEventType takes a reader and reads ahead the first key of the +// underlying json input. This method makes some assumptions met by the +// input format: +// - the input is in JSON format +// - every valid ndjson line only has one root key +// - the bytes that we must match on are ASCII +func (p *Processor) identifyEventType(body []byte) []byte { + // find event type, trim spaces and account for single and double quotes + var quote byte + var key []byte + for i, r := range body { + if r == '"' || r == '\'' { + quote = r + key = body[i+1:] + break } + } + end := bytes.IndexByte(key, quote) + if end == -1 { return nil } - return ErrUnrecognizedObject + return key[:end] } -// readBatch will read up to `batchSize` objects from the ndjson stream, -// returning a slice of Transformables and a boolean indicating that there -// might be more to read. +// readBatch reads up to `batchSize` events from the ndjson stream into +// batch, returning the number of events read and any error encountered. +// Callers should always process the n > 0 events returned before considering +// the error err. func (p *Processor) readBatch( ctx context.Context, - ipRateLimiter *rate.Limiter, - requestTime time.Time, - streamMetadata *model.Metadata, + baseEvent model.APMEvent, batchSize int, batch *model.Batch, reader *streamReader, - response *Result, -) bool { - - if ipRateLimiter != nil { - // use provided rate limiter to throttle batch read - ctxT, cancel := context.WithTimeout(ctx, time.Second) - err := ipRateLimiter.WaitN(ctxT, batchSize) - cancel() - if err != nil { - response.Add(&Error{ - Type: RateLimitErrType, - Message: "rate limit exceeded", - }) - return true - } - } + result *Result, +) (int, error) { // input events are decoded and appended to the batch + origLen := len(*batch) for i := 0; i < batchSize && !reader.IsEOF(); i++ { - var rawModel map[string]interface{} - err := reader.Read(&rawModel) + body, err := reader.ReadAhead() if err != nil && err != io.EOF { - if e, ok := err.(*Error); ok && (e.Type == InvalidInputErrType || e.Type == InputTooLargeErrType) { - response.LimitedAdd(e) + err := reader.wrapError(err) + var invalidInput *InvalidInputError + if errors.As(err, &invalidInput) { + result.LimitedAdd(err) continue } // return early, we assume we can only recover from a input error types - response.Add(err) - return true + return len(*batch) - origLen, err } - if len(rawModel) > 0 { - - err := p.HandleRawModel(rawModel, batch, requestTime, *streamMetadata) - if err != nil { - response.LimitedAdd(&Error{ - Type: InvalidInputErrType, - Message: err.Error(), - Document: string(reader.LatestLine()), - }) - continue - } + if len(body) == 0 { + // required for backwards compatibility - sending empty lines was permitted in previous versions + continue } + input := modeldecoder.Input{Base: baseEvent} + switch eventType := p.identifyEventType(body); string(eventType) { + case errorEventType: + err = v2.DecodeNestedError(reader, &input, batch) + case metricsetEventType: + err = v2.DecodeNestedMetricset(reader, &input, batch) + case spanEventType: + err = v2.DecodeNestedSpan(reader, &input, batch) + case transactionEventType: + err = v2.DecodeNestedTransaction(reader, &input, batch) + case rumv3ErrorEventType: + err = rumv3.DecodeNestedError(reader, &input, batch) + case rumv3TransactionEventType: + err = rumv3.DecodeNestedTransaction(reader, &input, batch) + default: + err = errors.Wrap(errUnrecognizedObject, string(eventType)) + } + if err != nil && err != io.EOF { + result.LimitedAdd(&InvalidInputError{ + Message: err.Error(), + Document: string(reader.LatestLine()), + }) + } + } + if reader.IsEOF() { + return len(*batch) - origLen, io.EOF } - return reader.IsEOF() + return len(*batch) - origLen, nil } -// HandleStream processes a stream of events -func (p *Processor) HandleStream(ctx context.Context, ipRateLimiter *rate.Limiter, meta *model.Metadata, reader io.Reader, report publish.Reporter) *Result { - res := &Result{} - +// HandleStream processes a stream of events in batches of batchSize at a time, +// updating result as events are accepted, or per-event errors occur. +// +// HandleStream will return an error when a terminal stream-level error occurs, +// such as the rate limit being exceeded, or due to authorization errors. In +// this case the result will only cover the subset of events accepted. +// +// Callers must not access result concurrently with HandleStream. +func (p *Processor) HandleStream( + ctx context.Context, + baseEvent model.APMEvent, + reader io.Reader, + batchSize int, + processor model.BatchProcessor, + result *Result, +) error { sr := p.getStreamReader(reader) defer sr.release() // first item is the metadata object - err := p.readMetadata(meta, sr) - if err != nil { + if err := p.readMetadata(sr, &baseEvent); err != nil { // no point in continuing if we couldn't read the metadata - res.Add(err) - return res + return err } - - requestTime := utility.RequestTime(ctx) + baseEvent.Timestamp = utility.RequestTime(ctx) sp, ctx := apm.StartSpan(ctx, "Stream", "Reporter") defer sp.End() - var batch model.Batch - var done bool - for !done { - done = p.readBatch(ctx, ipRateLimiter, requestTime, meta, batchSize, &batch, sr, res) - if batch.Len() == 0 { - continue - } - // NOTE(axw) `report` takes ownership of transformables, which - // means we cannot reuse the slice memory. We should investigate - // alternative interfaces between the processor and publisher - // which would enable better memory reuse. - if err := report(ctx, publish.PendingReq{ - Transformables: batch.Transformables(), - Trace: !sp.Dropped(), - }); err != nil { - switch err { - case publish.ErrChannelClosed: - res.Add(&Error{ - Type: ShuttingDownErrType, - Message: "server is shutting down", - }) - case publish.ErrFull: - res.Add(&Error{ - Type: QueueFullErrType, - Message: err.Error(), - }) - default: - res.Add(err) + for { + var batch model.Batch + n, readErr := p.readBatch(ctx, baseEvent, batchSize, &batch, sr, result) + if n > 0 { + // NOTE(axw) ProcessBatch takes ownership of batch, which means we cannot reuse + // the slice memory. We should investigate alternative interfaces between the + // processor and publisher which would enable better memory reuse, e.g. by using + // a sync.Pool for creating batches, and having the publisher (terminal processor) + // release batches back into the pool. + if err := processor.ProcessBatch(ctx, &batch); err != nil { + return err } - return res + result.AddAccepted(len(batch)) + } + if readErr == io.EOF { + break + } else if readErr != nil { + return readErr } - res.AddAccepted(batch.Len()) - batch.Reset() } - return res + return nil } // getStreamReader returns a streamReader that reads ND-JSON lines from r. @@ -298,26 +260,26 @@ func (sr *streamReader) release() { sr.processor.streamReaderPool.Put(sr) } -func (sr *streamReader) Read(v *map[string]interface{}) error { - // TODO(axw) decode into a reused map, clearing out the - // map between reads. We would require that decoders copy - // any contents of rawModel that they wish to retain after - // the call, in order to safely reuse the map. - err := sr.NDJSONStreamDecoder.Decode(v) - if err != nil { - if _, ok := err.(decoder.JSONDecodeError); ok { - return &Error{ - Type: InvalidInputErrType, - Message: err.Error(), - Document: string(sr.LatestLine()), - } +func (sr *streamReader) wrapError(err error) error { + if err == nil { + return nil + } + if _, ok := err.(decoder.JSONDecodeError); ok { + return &InvalidInputError{ + Message: err.Error(), + Document: string(sr.LatestLine()), } - if err == decoder.ErrLineTooLong { - return &Error{ - Type: InputTooLargeErrType, - Message: "event exceeded the permitted size.", - Document: string(sr.LatestLine()), - } + } + + var e = err + if err, ok := err.(modeldecoder.DecoderError); ok { + e = err.Unwrap() + } + if errors.Is(e, decoder.ErrLineTooLong) { + return &InvalidInputError{ + TooLarge: true, + Message: "event exceeded the permitted size.", + Document: string(sr.LatestLine()), } } return err diff --git a/processor/stream/processor_test.go b/processor/stream/processor_test.go index 656b82a55a9..1f3a29783d7 100644 --- a/processor/stream/processor_test.go +++ b/processor/stream/processor_test.go @@ -20,8 +20,8 @@ package stream import ( "bytes" "context" - "encoding/json" "fmt" + "io/ioutil" "net" "path/filepath" "testing" @@ -30,105 +30,166 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "golang.org/x/time/rate" - - "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/apm-server/approvaltest" "github.com/elastic/apm-server/beater/beatertest" "github.com/elastic/apm-server/beater/config" "github.com/elastic/apm-server/model" "github.com/elastic/apm-server/publish" - "github.com/elastic/apm-server/tests" - "github.com/elastic/apm-server/tests/loader" - "github.com/elastic/apm-server/transform" "github.com/elastic/apm-server/utility" ) -func assertApproveResult(t *testing.T, actualResponse *Result, name string) { - resultName := fmt.Sprintf("test_approved_stream_result/testIntegrationResult%s", name) - resultJSON, err := json.Marshal(actualResponse) - require.NoError(t, err) - approvaltest.ApproveJSON(t, resultName, resultJSON) -} - func TestHandlerReadStreamError(t *testing.T) { - var pendingReqs []publish.PendingReq - report := tests.TestReporter(&pendingReqs) + var accepted int + processor := model.ProcessBatchFunc(func(ctx context.Context, batch *model.Batch) error { + events := batch.Transform(ctx) + accepted += len(events) + return nil + }) - b, err := loader.LoadDataAsBytes("../testdata/intake-v2/transactions.ndjson") + payload, err := ioutil.ReadFile("../../testdata/intake-v2/transactions.ndjson") require.NoError(t, err) - bodyReader := bytes.NewBuffer(b) - timeoutReader := iotest.TimeoutReader(bodyReader) + timeoutReader := iotest.TimeoutReader(bytes.NewReader(payload)) sp := BackendProcessor(&config.Config{MaxEventSize: 100 * 1024}) - actualResult := sp.HandleStream(context.Background(), nil, &model.Metadata{}, timeoutReader, report) - assertApproveResult(t, actualResult, "ReadError") + + var actualResult Result + err = sp.HandleStream(context.Background(), model.APMEvent{}, timeoutReader, 10, processor, &actualResult) + assert.EqualError(t, err, "timeout") + assert.Equal(t, Result{Accepted: accepted}, actualResult) } func TestHandlerReportingStreamError(t *testing.T) { - for _, test := range []struct { - name string - report func(ctx context.Context, p publish.PendingReq) error - }{ - { - name: "ShuttingDown", - report: func(ctx context.Context, p publish.PendingReq) error { - return publish.ErrChannelClosed - }, - }, { - name: "QueueFull", - report: func(ctx context.Context, p publish.PendingReq) error { - return publish.ErrFull - }, - }, - } { - - b, err := loader.LoadDataAsBytes("../testdata/intake-v2/transactions.ndjson") - require.NoError(t, err) - bodyReader := bytes.NewBuffer(b) + payload, err := ioutil.ReadFile("../../testdata/intake-v2/transactions.ndjson") + require.NoError(t, err) + for _, test := range []struct { + name string + err error + }{{ + name: "ShuttingDown", + err: publish.ErrChannelClosed, + }, { + name: "QueueFull", + err: publish.ErrFull, + }} { sp := BackendProcessor(&config.Config{MaxEventSize: 100 * 1024}) - actualResult := sp.HandleStream(context.Background(), nil, &model.Metadata{}, bodyReader, test.report) - assertApproveResult(t, actualResult, test.name) + processor := model.ProcessBatchFunc(func(context.Context, *model.Batch) error { + return test.err + }) + + var actualResult Result + err := sp.HandleStream( + context.Background(), model.APMEvent{}, + bytes.NewReader(payload), 10, processor, &actualResult, + ) + assert.Equal(t, test.err, err) + assert.Zero(t, actualResult) } } func TestIntegrationESOutput(t *testing.T) { for _, test := range []struct { - path string - name string - }{ - {path: "errors.ndjson", name: "Errors"}, - {path: "transactions.ndjson", name: "Transactions"}, - {path: "spans.ndjson", name: "Spans"}, - {path: "metricsets.ndjson", name: "Metricsets"}, - {path: "events.ndjson", name: "Events"}, - {path: "minimal-service.ndjson", name: "MinimalService"}, - {path: "metadata-null-values.ndjson", name: "MetadataNullValues"}, - {path: "invalid-event.ndjson", name: "InvalidEvent"}, - {path: "invalid-json-event.ndjson", name: "InvalidJSONEvent"}, - {path: "invalid-json-metadata.ndjson", name: "InvalidJSONMetadata"}, - {path: "invalid-metadata.ndjson", name: "InvalidMetadata"}, - {path: "invalid-metadata-2.ndjson", name: "InvalidMetadata2"}, - {path: "unrecognized-event.ndjson", name: "UnrecognizedEvent"}, - {path: "optional-timestamps.ndjson", name: "OptionalTimestamps"}, - } { + name string + path string + errors []error // per-event errors + err error // stream-level error + }{{ + name: "Errors", + path: "errors.ndjson", + }, { + name: "Transactions", + path: "transactions.ndjson", + }, { + name: "Spans", + path: "spans.ndjson", + }, { + name: "Metricsets", + path: "metricsets.ndjson", + }, { + name: "Events", + path: "events.ndjson", + }, { + name: "MinimalService", + path: "minimal-service.ndjson", + }, { + name: "MetadataNullValues", + path: "metadata-null-values.ndjson", + }, { + name: "OptionalTimestamps", + path: "optional-timestamps.ndjson", + }, { + name: "InvalidEvent", + path: "invalid-event.ndjson", + errors: []error{ + &InvalidInputError{ + Message: `decode error: data read error: v2.transactionRoot.Transaction: v2.transaction.ID: ReadString: expects " or n,`, + Document: `{ "transaction": { "id": 12345, "trace_id": "0123456789abcdef0123456789abcdef", "parent_id": "abcdefabcdef01234567", "type": "request", "duration": 32.592981, "span_count": { "started": 21 } } } `, + }, + }, + }, { + name: "InvalidJSONEvent", + path: "invalid-json-event.ndjson", + errors: []error{ + &InvalidInputError{ + Message: "invalid-json: did not recognize object type", + Document: `{ "invalid-json" }`, + }, + }, + }, { + name: "InvalidJSONMetadata", + path: "invalid-json-metadata.ndjson", + err: &InvalidInputError{ + Message: "decode error: data read error: v2.metadataRoot.Metadata: v2.metadata.readFieldHash: expect :,", + Document: `{"metadata": {"invalid-json"}}`, + }, + }, { + name: "InvalidMetadata", + path: "invalid-metadata.ndjson", + err: &InvalidInputError{ + Message: "validation error: 'metadata' required", + Document: `{"metadata": {"user": null}}`, + }, + }, { + name: "InvalidMetadata2", + path: "invalid-metadata-2.ndjson", + err: &InvalidInputError{ + Message: "validation error: 'metadata' required", + Document: `{"not": "metadata"}`, + }, + }, { + name: "UnrecognizedEvent", + path: "invalid-event-type.ndjson", + errors: []error{ + &InvalidInputError{ + Message: "tennis-court: did not recognize object type", + Document: `{"tennis-court": {"name": "Centre Court, Wimbledon"}}`, + }, + }, + }} { t.Run(test.name, func(t *testing.T) { - b, err := loader.LoadDataAsBytes(filepath.Join("../testdata/intake-v2/", test.path)) + payload, err := ioutil.ReadFile(filepath.Join("../../testdata/intake-v2", test.path)) require.NoError(t, err) - bodyReader := bytes.NewBuffer(b) + var accepted int name := fmt.Sprintf("test_approved_es_documents/testIntakeIntegration%s", test.name) reqTimestamp := time.Date(2018, 8, 1, 10, 0, 0, 0, time.UTC) ctx := utility.ContextWithRequestTime(context.Background(), reqTimestamp) - report := makeApproveEventsReporter(t, name) + batchProcessor := makeApproveEventsBatchProcessor(t, name, &accepted) - reqDecoderMeta := &model.Metadata{System: model.System{IP: net.ParseIP("192.0.0.1")}} + baseEvent := model.APMEvent{ + Host: model.Host{IP: net.ParseIP("192.0.0.1")}, + } p := BackendProcessor(&config.Config{MaxEventSize: 100 * 1024}) - actualResult := p.HandleStream(ctx, nil, reqDecoderMeta, bodyReader, report) - assertApproveResult(t, actualResult, test.name) + var actualResult Result + err = p.HandleStream(ctx, baseEvent, bytes.NewReader(payload), 10, batchProcessor, &actualResult) + if test.err != nil { + assert.Equal(t, test.err, err) + } else { + require.NoError(t, err) + } + assert.Equal(t, Result{Accepted: accepted, Errors: test.errors}, actualResult) }) } } @@ -142,23 +203,26 @@ func TestIntegrationRum(t *testing.T) { {path: "transactions_spans_rum.ndjson", name: "RumTransactions"}, } { t.Run(test.name, func(t *testing.T) { - b, err := loader.LoadDataAsBytes(filepath.Join("../testdata/intake-v2/", test.path)) + payload, err := ioutil.ReadFile(filepath.Join("../../testdata/intake-v2", test.path)) require.NoError(t, err) - bodyReader := bytes.NewBuffer(b) + var accepted int name := fmt.Sprintf("test_approved_es_documents/testIntakeIntegration%s", test.name) - ctx := context.WithValue(context.Background(), "name", name) reqTimestamp := time.Date(2018, 8, 1, 10, 0, 0, 0, time.UTC) - ctx = utility.ContextWithRequestTime(ctx, reqTimestamp) - report := makeApproveEventsReporter(t, name) + ctx := utility.ContextWithRequestTime(context.Background(), reqTimestamp) + batchProcessor := makeApproveEventsBatchProcessor(t, name, &accepted) - reqDecoderMeta := model.Metadata{ + baseEvent := model.APMEvent{ UserAgent: model.UserAgent{Original: "rum-2.0"}, - Client: model.Client{IP: net.ParseIP("192.0.0.1")}} + Source: model.Source{IP: net.ParseIP("192.0.0.1")}, + Client: model.Client{IP: net.ParseIP("192.0.0.2")}, // X-Forwarded-For + } p := RUMV2Processor(&config.Config{MaxEventSize: 100 * 1024}) - actualResult := p.HandleStream(ctx, nil, &reqDecoderMeta, bodyReader, report) - assertApproveResult(t, actualResult, test.name) + var actualResult Result + err = p.HandleStream(ctx, baseEvent, bytes.NewReader(payload), 10, batchProcessor, &actualResult) + require.NoError(t, err) + assert.Equal(t, Result{Accepted: accepted}, actualResult) }) } } @@ -172,63 +236,42 @@ func TestRUMV3(t *testing.T) { {path: "rum_events.ndjson", name: "RUMV3Events"}, } { t.Run(test.name, func(t *testing.T) { - b, err := loader.LoadDataAsBytes(filepath.Join("../testdata/intake-v3/", test.path)) + payload, err := ioutil.ReadFile(filepath.Join("../../testdata/intake-v3", test.path)) require.NoError(t, err) - bodyReader := bytes.NewBuffer(b) + var accepted int name := fmt.Sprintf("test_approved_es_documents/testIntake%s", test.name) reqTimestamp := time.Date(2018, 8, 1, 10, 0, 0, 0, time.UTC) ctx := utility.ContextWithRequestTime(context.Background(), reqTimestamp) - report := makeApproveEventsReporter(t, name) + batchProcessor := makeApproveEventsBatchProcessor(t, name, &accepted) - reqDecoderMeta := model.Metadata{ + baseEvent := model.APMEvent{ UserAgent: model.UserAgent{Original: "rum-2.0"}, - Client: model.Client{IP: net.ParseIP("192.0.0.1")}} + Source: model.Source{IP: net.ParseIP("192.0.0.1")}, + Client: model.Client{IP: net.ParseIP("192.0.0.2")}, // X-Forwarded-For + } p := RUMV3Processor(&config.Config{MaxEventSize: 100 * 1024}) - actualResult := p.HandleStream(ctx, nil, &reqDecoderMeta, bodyReader, report) - assertApproveResult(t, actualResult, test.name) + var actualResult Result + err = p.HandleStream(ctx, baseEvent, bytes.NewReader(payload), 10, batchProcessor, &actualResult) + require.NoError(t, err) + assert.Equal(t, Result{Accepted: accepted}, actualResult) }) } } -func TestRateLimiting(t *testing.T) { - report := func(ctx context.Context, p publish.PendingReq) error { - return nil - } - - b, err := loader.LoadDataAsBytes("../testdata/intake-v2/ratelimit.ndjson") - require.NoError(t, err) - for _, test := range []struct { - name string - lim *rate.Limiter - hit int - }{ - {name: "NoLimiter"}, - {name: "LimiterDenyAll", lim: rate.NewLimiter(rate.Limit(0), 2)}, - {name: "LimiterAllowAll", lim: rate.NewLimiter(rate.Limit(40), 40*5)}, - {name: "LimiterPartiallyUsedLimitAllow", lim: rate.NewLimiter(rate.Limit(10), 10*2), hit: 10}, - {name: "LimiterPartiallyUsedLimitDeny", lim: rate.NewLimiter(rate.Limit(7), 7*2), hit: 10}, - {name: "LimiterDeny", lim: rate.NewLimiter(rate.Limit(6), 6*2)}, - } { - if test.hit > 0 { - assert.True(t, test.lim.AllowN(time.Now(), test.hit)) - } - - actualResult := BackendProcessor(&config.Config{MaxEventSize: 100 * 1024}).HandleStream( - context.Background(), test.lim, &model.Metadata{}, bytes.NewReader(b), report) - assertApproveResult(t, actualResult, test.name) - } -} - -func makeApproveEventsReporter(t *testing.T, name string) publish.Reporter { - return func(ctx context.Context, p publish.PendingReq) error { - var events []beat.Event - for _, transformable := range p.Transformables { - events = append(events, transformable.Transform(ctx, &transform.Config{})...) - } +func makeApproveEventsBatchProcessor(t *testing.T, name string, count *int) model.BatchProcessor { + return model.ProcessBatchFunc(func(ctx context.Context, b *model.Batch) error { + events := b.Transform(ctx) + *count += len(events) docs := beatertest.EncodeEventDocs(events...) approvaltest.ApproveEventDocs(t, name, docs) return nil - } + }) +} + +type nopBatchProcessor struct{} + +func (nopBatchProcessor) ProcessBatch(context.Context, *model.Batch) error { + return nil } diff --git a/processor/stream/result.go b/processor/stream/result.go index 92d925cf9cf..b0812c026e5 100644 --- a/processor/stream/result.go +++ b/processor/stream/result.go @@ -18,52 +18,25 @@ package stream import ( - "strings" + "errors" "github.com/elastic/beats/v7/libbeat/monitoring" ) -type Error struct { - Type StreamError `json:"-"` - Message string `json:"message"` - Document string `json:"document,omitempty"` -} - -func (s *Error) Error() string { - return s.Message -} - -type StreamError int - -const ( - QueueFullErrType StreamError = iota - InvalidInputErrType - InputTooLargeErrType - ShuttingDownErrType - ServerErrType - MethodForbiddenErrType - RateLimitErrType -) - const ( errorsLimit = 5 ) var ( - m = monitoring.Default.NewRegistry("apm-server.processor.stream") - mAccepted = monitoring.NewInt(m, "accepted") - monitoringMap = map[StreamError]*monitoring.Int{ - QueueFullErrType: monitoring.NewInt(m, "errors.queue"), - InvalidInputErrType: monitoring.NewInt(m, "errors.invalid"), - InputTooLargeErrType: monitoring.NewInt(m, "errors.toolarge"), - ShuttingDownErrType: monitoring.NewInt(m, "errors.server"), - ServerErrType: monitoring.NewInt(m, "errors.closed"), - } + m = monitoring.Default.NewRegistry("apm-server.processor.stream") + mAccepted = monitoring.NewInt(m, "accepted") + mInvalid = monitoring.NewInt(m, "errors.invalid") + mTooLarge = monitoring.NewInt(m, "errors.toolarge") ) type Result struct { - Accepted int `json:"accepted"` - Errors []*Error `json:"errors,omitempty"` + Accepted int + Errors []error } func (r *Result) LimitedAdd(err error) { @@ -79,29 +52,26 @@ func (r *Result) AddAccepted(ct int) { mAccepted.Add(int64(ct)) } -func (r *Result) Error() string { - var errorList []string - for _, e := range r.Errors { - errorList = append(errorList, e.Error()) - } - return strings.Join(errorList, ", ") -} - func (r *Result) add(err error, add bool) { - e, ok := err.(*Error) - if !ok { - e = &Error{Message: err.Error(), Type: ServerErrType} + var invalid *InvalidInputError + if errors.As(err, &invalid) { + if invalid.TooLarge { + mTooLarge.Inc() + } else { + mInvalid.Inc() + } } if add { - r.Errors = append(r.Errors, e) + r.Errors = append(r.Errors, err) } - countErr(e.Type) } -func countErr(e StreamError) { - if i, ok := monitoringMap[e]; ok { - i.Inc() - } else { - monitoringMap[ServerErrType].Inc() - } +type InvalidInputError struct { + TooLarge bool + Message string + Document string +} + +func (e *InvalidInputError) Error() string { + return e.Message } diff --git a/processor/stream/result_test.go b/processor/stream/result_test.go index dc0c246743a..b93718295b2 100644 --- a/processor/stream/result_test.go +++ b/processor/stream/result_test.go @@ -18,62 +18,51 @@ package stream import ( - "errors" "testing" + "github.com/pkg/errors" "github.com/stretchr/testify/assert" - - "github.com/elastic/beats/v7/libbeat/monitoring" ) -func TestStreamResponseSimple(t *testing.T) { - sr := Result{} - sr.LimitedAdd(&Error{Type: QueueFullErrType, Message: "err1", Document: "buf1"}) - sr.LimitedAdd(errors.New("transmogrifier error")) - sr.LimitedAdd(&Error{Type: InvalidInputErrType, Message: "err2", Document: "buf2"}) - sr.LimitedAdd(&Error{Type: InvalidInputErrType, Message: "err3", Document: "buf3"}) - - sr.LimitedAdd(&Error{Message: "err4"}) - sr.LimitedAdd(&Error{Message: "err5"}) - - // not added - sr.LimitedAdd(&Error{Message: "err6"}) +func TestResultAdd(t *testing.T) { + err1 := &InvalidInputError{Message: "err1", Document: "buf1"} + err2 := &InvalidInputError{Message: "err2", Document: "buf2"} + err3 := &InvalidInputError{Message: "err3", Document: "buf3"} + err4 := &InvalidInputError{Message: "err4"} + err5 := &InvalidInputError{Message: "err5"} + err6 := &InvalidInputError{Message: "err6"} + err7 := &InvalidInputError{Message: "err7"} - // added - sr.Add(&Error{Message: "err6"}) + result := Result{} + result.LimitedAdd(err1) + result.LimitedAdd(err2) + result.LimitedAdd(err3) + result.LimitedAdd(err4) + result.LimitedAdd(err5) + result.LimitedAdd(err5) + result.LimitedAdd(err6) // limited, not added + result.Add(err7) // unconditionally added - assert.Len(t, sr.Errors, 6) - - expectedStr := `err1, transmogrifier error, err2, err3, err4, err6` - assert.Equal(t, expectedStr, sr.Error()) + assert.Len(t, result.Errors, 6) + assert.Equal(t, []error{err1, err2, err3, err4, err5, err7}, result.Errors) } func TestMonitoring(t *testing.T) { - for _, test := range []struct { - counter *monitoring.Int - expected int64 - }{ - {monitoringMap[QueueFullErrType], 1}, - {monitoringMap[InvalidInputErrType], 2}, - {monitoringMap[InputTooLargeErrType], 1}, - {monitoringMap[ShuttingDownErrType], 1}, - {monitoringMap[ServerErrType], 2}, - {mAccepted, 12}, - } { - // get current value for counter - ct := test.counter.Get() - - sr := Result{} - sr.AddAccepted(9) - sr.AddAccepted(3) - sr.LimitedAdd(&Error{Type: QueueFullErrType}) - sr.LimitedAdd(errors.New("error")) - sr.LimitedAdd(&Error{Type: InvalidInputErrType}) - sr.LimitedAdd(&Error{Type: ShuttingDownErrType}) - sr.LimitedAdd(&Error{Type: ServerErrType}) - sr.LimitedAdd(&Error{Type: InputTooLargeErrType, Message: "err3", Document: "buf3"}) - sr.Add(&Error{Type: InvalidInputErrType}) + initialAccepted := mAccepted.Get() + initialInvalid := mInvalid.Get() + initialTooLarge := mTooLarge.Get() - assert.Equal(t, ct+test.expected, test.counter.Get()) + var result Result + result.AddAccepted(9) + result.AddAccepted(3) + for i := 0; i < 10; i++ { + result.LimitedAdd(&InvalidInputError{TooLarge: false}) } + result.LimitedAdd(&InvalidInputError{TooLarge: true}) + result.Add(&InvalidInputError{TooLarge: true}) + result.Add(errors.New("error")) + + assert.Equal(t, int64(12), mAccepted.Get()-initialAccepted) + assert.Equal(t, int64(10), mInvalid.Get()-initialInvalid) + assert.Equal(t, int64(2), mTooLarge.Get()-initialTooLarge) } diff --git a/processor/stream/test_approved_es_documents/testIntakeIntegrationErrors.approved.json b/processor/stream/test_approved_es_documents/testIntakeIntegrationErrors.approved.json index f8036f7ce98..a49292b8c9a 100644 --- a/processor/stream/test_approved_es_documents/testIntakeIntegrationErrors.approved.json +++ b/processor/stream/test_approved_es_documents/testIntakeIntegrationErrors.approved.json @@ -28,7 +28,10 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" @@ -136,7 +139,6 @@ "type": "ConnectionError" } ], - "grouping_key": "d72b25a26fde3f3aaad1c86950acd070", "id": "0123456789012345", "log": { "level": "warning", @@ -164,7 +166,6 @@ "exclude_from_grouping": false, "filename": "/webpack/file/name.py", "function": "foo", - "library_frame": false, "line": { "column": 4, "context": "line3", @@ -209,15 +210,11 @@ } } ] - }, - "page": { - "referer": "http://localhost:8000/test/e2e/", - "url": "http://localhost:8000/test/e2e/general-usecase/" } }, "host": { "architecture": "x64", - "hostname": "node-name", + "hostname": "myhostname", "ip": "192.0.0.1", "name": "prod.example", "os": { @@ -226,9 +223,7 @@ }, "http": { "request": { - "body": { - "original": "Hello World" - }, + "body.original": "Hello World", "cookies": { "c1": "v1", "c2": "v2" @@ -256,12 +251,8 @@ "Mozilla Chrome Edge" ] }, - "method": "post", - "referrer": "http://localhost:8000/test/e2e/", - "socket": { - "encrypted": true, - "remote_address": "12.53.12.1" - } + "method": "POST", + "referrer": "http://localhost:8000/test/e2e/" }, "response": { "finished": true, @@ -338,6 +329,7 @@ "scheme": "https" }, "user": { + "domain": "ldap://abc", "id": "99", "name": "foo" }, @@ -370,13 +362,15 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" }, "error": { - "grouping_key": "dc8dd667f7036ec5f0bae87bf2188243", "id": "xFoaabb123FFFFFF", "log": { "message": "no user found", @@ -390,7 +384,7 @@ }, "host": { "architecture": "x64", - "hostname": "node-name", + "hostname": "myhostname", "ip": "192.0.0.1", "name": "prod.example", "os": { @@ -444,6 +438,7 @@ "us": 1533826745999000 }, "user": { + "domain": "ldap://abc", "email": "bar@example.com", "id": "123", "name": "bar" @@ -474,7 +469,10 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" @@ -485,12 +483,11 @@ "message": "Cannot read property 'baz' no defined" } ], - "grouping_key": "ae0232fed4cb40e7ebc62a585a421d60", "id": "cdefab0123456789" }, "host": { "architecture": "x64", - "hostname": "node-name", + "hostname": "myhostname", "ip": "192.0.0.1", "name": "prod.example", "os": { @@ -544,6 +541,7 @@ "us": 1533826745999000 }, "user": { + "domain": "ldap://abc", "email": "bar@example.com", "id": "123", "name": "bar" @@ -574,7 +572,10 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" @@ -585,12 +586,11 @@ "type": "DbError" } ], - "grouping_key": "c3868d6704b923014eaffea034e70a3d", "id": "cdefab0123456780" }, "host": { "architecture": "x64", - "hostname": "node-name", + "hostname": "myhostname", "ip": "192.0.0.1", "name": "prod.example", "os": { @@ -650,6 +650,7 @@ "id": "0123456789abcdeffedcba0123456789" }, "user": { + "domain": "ldap://abc", "email": "bar@example.com", "id": "123", "name": "bar" @@ -680,13 +681,15 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" }, "error": { - "grouping_key": "d6b3f958dfea98dc9ed2b57d5f0c48bb", "id": "abcdef0123456789", "log": { "level": "custom log level", @@ -695,7 +698,7 @@ }, "host": { "architecture": "x64", - "hostname": "node-name", + "hostname": "myhostname", "ip": "192.0.0.1", "name": "prod.example", "os": { @@ -760,6 +763,7 @@ "type": "request" }, "user": { + "domain": "ldap://abc", "email": "bar@example.com", "id": "123", "name": "bar" diff --git a/processor/stream/test_approved_es_documents/testIntakeIntegrationEvents.approved.json b/processor/stream/test_approved_es_documents/testIntakeIntegrationEvents.approved.json index b2c41c84ff1..ea2b683e5f4 100644 --- a/processor/stream/test_approved_es_documents/testIntakeIntegrationEvents.approved.json +++ b/processor/stream/test_approved_es_documents/testIntakeIntegrationEvents.approved.json @@ -5,20 +5,142 @@ "agent": { "ephemeral_id": "e71be9ac-93b0-44b9-a997-5638f6ccfc36", "name": "java", - "version": "1.10.0-SNAPSHOT" + "version": "1.10.0" }, "client": { - "ip": "12.53.12.1" + "ip": "192.168.0.1" }, "container": { "id": "8ec7ceb990749e79b37f6dc6cd3628633618d6ce412553a552a0fa6b69419ad4" }, - "event": { - "outcome": "success" + "error": { + "culprit": "opbeans.controllers.DTInterceptor.preHandle(DTInterceptor.java:73)", + "custom": { + "and_objects": { + "foo": [ + "bar", + "baz" + ] + }, + "my_key": 1, + "some_other_value": "foobar" + }, + "exception": [ + { + "attributes": { + "foo": "bar" + }, + "code": "42", + "handled": false, + "message": "Theusernamerootisunknown", + "module": "org.springframework.http.client", + "stacktrace": [ + { + "abs_path": "/tmp/AbstractPlainSocketImpl.java", + "context": { + "post": [ + "line4", + "line5" + ], + "pre": [ + "line1", + "line2" + ] + }, + "exclude_from_grouping": false, + "filename": "AbstractPlainSocketImpl.java", + "function": "connect", + "library_frame": true, + "line": { + "column": 4, + "context": "3", + "number": 3 + }, + "module": "java.net", + "vars": { + "key": "value" + } + }, + { + "exclude_from_grouping": false, + "filename": "AbstractClientHttpRequest.java", + "function": "execute", + "line": { + "number": 102 + }, + "vars": { + "key": "value" + } + } + ], + "type": "java.net.UnknownHostException" + }, + { + "message": "something wrong writing a file", + "type": "InternalDbError" + }, + { + "message": "disk spinning way too fast", + "type": "VeryInternalDbError" + }, + { + "message": "on top of it,internet doesn't work", + "parent": 1, + "type": "ConnectionError" + } + ], + "id": "9876543210abcdeffedcba0123456789", + "log": { + "level": "error", + "logger_name": "http404", + "message": "Request method 'POST' not supported", + "param_message": "Request method 'POST' /events/:event not supported", + "stacktrace": [ + { + "abs_path": "/tmp/Socket.java", + "classname": "Request::Socket", + "context": { + "post": [ + "line4", + "line5" + ], + "pre": [ + "line1", + "line2" + ] + }, + "exclude_from_grouping": false, + "filename": "Socket.java", + "function": "connect", + "library_frame": true, + "line": { + "column": 4, + "context": "line3", + "number": 3 + }, + "module": "java.net", + "vars": { + "key": "value" + } + }, + { + "abs_path": "/tmp/SimpleBufferingClientHttpRequest.java", + "exclude_from_grouping": false, + "filename": "SimpleBufferingClientHttpRequest.java", + "function": "executeInternal", + "line": { + "number": 102 + }, + "vars": { + "key": "value" + } + } + ] + } }, "host": { "architecture": "amd64", - "hostname": "node-name", + "hostname": "8ec7ceb99074", "ip": "192.0.0.1", "name": "host1", "os": { @@ -27,15 +149,7 @@ }, "http": { "request": { - "body": { - "original": { - "additional": { - "bar": 123, - "req": "additionalinformation" - }, - "string": "helloworld" - } - }, + "body.original": "HelloWorld", "cookies": { "c1": "v1", "c2": "v2" @@ -45,29 +159,26 @@ "SERVER_SOFTWARE": "nginx" }, "headers": { - "Content-Type": [ - "text/html" + "Content-Length": [ + "0" ], "Cookie": [ - "c1=v1,c2=v2" + "c1=v1", + "c2=v2" ], "Elastic-Apm-Traceparent": [ - "00-33a0bd4cceff0370a7c57d807032688e-69feaabc5b88d7e8-01" + "00-8c21b4b556467a0b17ae5da959b5f388-31301f1fb2998121-01" ], - "User-Agent": [ - "Mozilla/5.0(Macintosh;IntelMacOSX10_10_5)AppleWebKit/537.36(KHTML,likeGecko)Chrome/51.0.2704.103Safari/537.36", - "MozillaChromeEdge" + "Forwarded": [ + "for=192.168.0.1" + ], + "Host": [ + "opbeans-java:3000" ] }, - "method": "post", - "socket": { - "encrypted": true, - "remote_address": "12.53.12.1:8080" - } + "method": "POST" }, "response": { - "decoded_body_size": 401.9, - "encoded_body_size": 356.9, "finished": true, "headers": { "Content-Type": [ @@ -75,8 +186,7 @@ ] }, "headers_sent": true, - "status_code": 200, - "transfer_size": 300 + "status_code": 200 }, "version": "1.1" }, @@ -97,7 +207,7 @@ "segment": 5 }, "parent": { - "id": "abcdefabcdef01234567" + "id": "9632587410abcdef" }, "process": { "args": [ @@ -108,22 +218,22 @@ "title": "/usr/lib/jvm/java-10-openjdk-amd64/bin/java" }, "processor": { - "event": "transaction", - "name": "transaction" + "event": "error", + "name": "error" }, "service": { "environment": "production", "framework": { - "name": "spring", - "version": "5.0.0" + "name": "Node", + "version": "1" }, "language": { "name": "Java", - "version": "10.0.2" + "version": "1.2" }, - "name": "experimental-java", + "name": "service1", "node": { - "name": "8ec7ceb990749e79b37f6dc6cd3628633618d6ce412553a552a0fa6b69419ad4" + "name": "node-xyz" }, "runtime": { "name": "Java", @@ -138,32 +248,12 @@ "us": 1571657444929001 }, "trace": { - "id": "0acd456789abcdef0123456789abcdef" + "id": "0123456789abcdeffedcba0123456789" }, "transaction": { - "custom": { - "(": "notavalidregexandthatisfine", - "and_objects": { - "foo": [ - "bar", - "baz" - ] - }, - "my_key": 1, - "some_other_value": "foobar" - }, - "duration": { - "us": 32592 - }, - "id": "4340a8e0df1906ecbfa9", - "name": "ResourceHttpRequestHandler", - "result": "HTTP2xx", + "id": "1234567890987654", "sampled": true, - "span_count": { - "dropped": 0, - "started": 17 - }, - "type": "http" + "type": "request" }, "url": { "domain": "www.example.com", @@ -176,12 +266,9 @@ "scheme": "https" }, "user": { - "email": "foo@mail.com", + "email": "user@foo.mail", "id": "99", "name": "foo" - }, - "user_agent": { - "original": "Mozilla/5.0(Macintosh;IntelMacOSX10_10_5)AppleWebKit/537.36(KHTML,likeGecko)Chrome/51.0.2704.103Safari/537.36, MozillaChromeEdge" } }, { @@ -199,13 +286,29 @@ }, "host": { "architecture": "amd64", - "hostname": "node-name", + "hostname": "8ec7ceb99074", "ip": "192.0.0.1", "name": "host1", "os": { "platform": "Linux" } }, + "http": { + "request": { + "method": "GET" + }, + "response": { + "decoded_body_size": 401, + "encoded_body_size": 356, + "headers": { + "Content-Type": [ + "application/json" + ] + }, + "status_code": 302, + "transfer_size": 300.12 + } + }, "kubernetes": { "namespace": "default", "node": { @@ -271,7 +374,7 @@ "us": 3781 }, "http": { - "method": "get", + "method": "GET", "response": { "decoded_body_size": 401, "encoded_body_size": 356, @@ -280,13 +383,11 @@ "application/json" ] }, - "status_code": 200, + "status_code": 302, "transfer_size": 300.12 - }, - "url": { - "original": "http://localhost:8000" } }, + "http.url.original": "http://localhost:8000", "id": "1234567890aaaade", "name": "GET users-authenticated", "stacktrace": [ @@ -326,6 +427,9 @@ }, "transaction": { "id": "1234567890987654" + }, + "url": { + "original": "http://localhost:8000" } }, { @@ -333,33 +437,79 @@ "agent": { "ephemeral_id": "e71be9ac-93b0-44b9-a997-5638f6ccfc36", "name": "java", - "version": "1.10.0" + "version": "1.10.0-SNAPSHOT" + }, + "client": { + "ip": "12.53.12.1" }, - "byte_counter": 1, "container": { "id": "8ec7ceb990749e79b37f6dc6cd3628633618d6ce412553a552a0fa6b69419ad4" }, - "dotted": { - "float": { - "gauge": 6.12 - } + "event": { + "outcome": "success" }, - "double_gauge": 3.141592653589793, - "float_gauge": 9.16, "host": { "architecture": "amd64", - "hostname": "node-name", + "hostname": "8ec7ceb99074", "ip": "192.0.0.1", "name": "host1", "os": { "platform": "Linux" } }, - "integer_gauge": 42767, - "kubernetes": { - "namespace": "default", - "node": { - "name": "node-name" + "http": { + "request": { + "body.original": { + "additional": { + "bar": 123, + "req": "additionalinformation" + }, + "string": "helloworld" + }, + "cookies": { + "c1": "v1", + "c2": "v2" + }, + "env": { + "GATEWAY_INTERFACE": "CGI/1.1", + "SERVER_SOFTWARE": "nginx" + }, + "headers": { + "Content-Type": [ + "text/html" + ], + "Cookie": [ + "c1=v1,c2=v2" + ], + "Elastic-Apm-Traceparent": [ + "00-33a0bd4cceff0370a7c57d807032688e-69feaabc5b88d7e8-01" + ], + "User-Agent": [ + "Mozilla/5.0(Macintosh;IntelMacOSX10_10_5)AppleWebKit/537.36(KHTML,likeGecko)Chrome/51.0.2704.103Safari/537.36", + "MozillaChromeEdge" + ] + }, + "method": "POST" + }, + "response": { + "decoded_body_size": 401.9, + "encoded_body_size": 356.9, + "finished": true, + "headers": { + "Content-Type": [ + "application/json" + ] + }, + "headers_sent": true, + "status_code": 200, + "transfer_size": 300 + }, + "version": "1.1" + }, + "kubernetes": { + "namespace": "default", + "node": { + "name": "node-name" }, "pod": { "name": "instrumented-java-service", @@ -368,24 +518,12 @@ }, "labels": { "ab_testing": true, - "code": 200, "group": "experimental", - "segment": 5, - "success": true + "organization_uuid": "9f0e9d64-c185-4d21-a6f4-4673ed561ec8", + "segment": 5 }, - "long_gauge": 3147483648, - "negative": { - "d": { - "o": { - "t": { - "t": { - "e": { - "d": -1022 - } - } - } - } - } + "parent": { + "id": "abcdefabcdef01234567" }, "process": { "args": [ @@ -396,8 +534,8 @@ "title": "/usr/lib/jvm/java-10-openjdk-amd64/bin/java" }, "processor": { - "event": "metric", - "name": "metric" + "event": "transaction", + "name": "transaction" }, "service": { "environment": "production", @@ -409,7 +547,7 @@ "name": "Java", "version": "10.0.2" }, - "name": "1234_service-12a3", + "name": "experimental-java", "node": { "name": "8ec7ceb990749e79b37f6dc6cd3628633618d6ce412553a552a0fa6b69419ad4" }, @@ -419,35 +557,57 @@ }, "version": "4.3.0" }, - "short_counter": 227, - "span": { - "self_time": { - "count": 1, - "sum": { - "us": 633.288 - } - }, - "subtype": "mysql", - "type": "db" + "source": { + "ip": "12.53.12.1" + }, + "timestamp": { + "us": 1571657444929001 + }, + "trace": { + "id": "0acd456789abcdef0123456789abcdef" }, "transaction": { - "breakdown": { - "count": 12 + "custom": { + "(": "notavalidregexandthatisfine", + "and_objects": { + "foo": [ + "bar", + "baz" + ] + }, + "my_key": 1, + "some_other_value": "foobar" }, "duration": { - "count": 2, - "sum": { - "us": 12 - } + "us": 32592 }, - "name": "GET/", - "self_time": { - "count": 2, - "sum": { - "us": 10 - } + "id": "4340a8e0df1906ecbfa9", + "name": "ResourceHttpRequestHandler", + "result": "HTTP2xx", + "sampled": true, + "span_count": { + "dropped": 0, + "started": 17 }, - "type": "request" + "type": "http" + }, + "url": { + "domain": "www.example.com", + "fragment": "#hash", + "full": "https://www.example.com/p/a/t/h?query=string#hash", + "original": "/p/a/t/h?query=string#hash", + "path": "/p/a/t/h", + "port": 8080, + "query": "?query=string", + "scheme": "https" + }, + "user": { + "email": "foo@mail.com", + "id": "99", + "name": "foo" + }, + "user_agent": { + "original": "Mozilla/5.0(Macintosh;IntelMacOSX10_10_5)AppleWebKit/537.36(KHTML,likeGecko)Chrome/51.0.2704.103Safari/537.36, MozillaChromeEdge" } }, { @@ -457,196 +617,18 @@ "name": "java", "version": "1.10.0" }, - "client": { - "ip": "192.168.0.1" - }, "container": { "id": "8ec7ceb990749e79b37f6dc6cd3628633618d6ce412553a552a0fa6b69419ad4" }, - "error": { - "culprit": "opbeans.controllers.DTInterceptor.preHandle(DTInterceptor.java:73)", - "custom": { - "and_objects": { - "foo": [ - "bar", - "baz" - ] - }, - "my_key": 1, - "some_other_value": "foobar" - }, - "exception": [ - { - "attributes": { - "foo": "bar" - }, - "code": "42", - "handled": false, - "message": "Theusernamerootisunknown", - "module": "org.springframework.http.client", - "stacktrace": [ - { - "abs_path": "/tmp/AbstractPlainSocketImpl.java", - "context": { - "post": [ - "line4", - "line5" - ], - "pre": [ - "line1", - "line2" - ] - }, - "exclude_from_grouping": false, - "filename": "AbstractPlainSocketImpl.java", - "function": "connect", - "library_frame": true, - "line": { - "column": 4, - "context": "3", - "number": 3 - }, - "module": "java.net", - "vars": { - "key": "value" - } - }, - { - "exclude_from_grouping": false, - "filename": "AbstractClientHttpRequest.java", - "function": "execute", - "line": { - "number": 102 - }, - "vars": { - "key": "value" - } - } - ], - "type": "java.net.UnknownHostException" - }, - { - "message": "something wrong writing a file", - "type": "InternalDbError" - }, - { - "message": "disk spinning way too fast", - "type": "VeryInternalDbError" - }, - { - "message": "on top of it,internet doesn't work", - "parent": 1, - "type": "ConnectionError" - } - ], - "grouping_key": "9a4054e958afe722b5877e8fac578ff3", - "id": "9876543210abcdeffedcba0123456789", - "log": { - "level": "error", - "logger_name": "http404", - "message": "Request method 'POST' not supported", - "param_message": "Request method 'POST' /events/:event not supported", - "stacktrace": [ - { - "abs_path": "/tmp/Socket.java", - "classname": "Request::Socket", - "context": { - "post": [ - "line4", - "line5" - ], - "pre": [ - "line1", - "line2" - ] - }, - "exclude_from_grouping": false, - "filename": "Socket.java", - "function": "connect", - "library_frame": true, - "line": { - "column": 4, - "context": "line3", - "number": 3 - }, - "module": "java.net", - "vars": { - "key": "value" - } - }, - { - "abs_path": "/tmp/SimpleBufferingClientHttpRequest.java", - "exclude_from_grouping": false, - "filename": "SimpleBufferingClientHttpRequest.java", - "function": "executeInternal", - "line": { - "number": 102 - }, - "vars": { - "key": "value" - } - } - ] - } - }, "host": { "architecture": "amd64", - "hostname": "node-name", + "hostname": "8ec7ceb99074", "ip": "192.0.0.1", "name": "host1", "os": { "platform": "Linux" } }, - "http": { - "request": { - "body": { - "original": "HelloWorld" - }, - "cookies": { - "c1": "v1", - "c2": "v2" - }, - "env": { - "GATEWAY_INTERFACE": "CGI/1.1", - "SERVER_SOFTWARE": "nginx" - }, - "headers": { - "Content-Length": [ - "0" - ], - "Cookie": [ - "c1=v1", - "c2=v2" - ], - "Elastic-Apm-Traceparent": [ - "00-8c21b4b556467a0b17ae5da959b5f388-31301f1fb2998121-01" - ], - "Forwarded": [ - "for=192.168.0.1" - ], - "Host": [ - "opbeans-java:3000" - ] - }, - "method": "post", - "socket": { - "encrypted": true, - "remote_address": "12.53.12.1" - } - }, - "response": { - "finished": true, - "headers": { - "Content-Type": [ - "application/json" - ] - }, - "headers_sent": true, - "status_code": 200 - }, - "version": "1.1" - }, "kubernetes": { "namespace": "default", "node": { @@ -659,12 +641,10 @@ }, "labels": { "ab_testing": true, + "code": 200, "group": "experimental", - "organization_uuid": "9f0e9d64-c185-4d21-a6f4-4673ed561ec8", - "segment": 5 - }, - "parent": { - "id": "9632587410abcdef" + "segment": 5, + "success": true }, "process": { "args": [ @@ -675,22 +655,22 @@ "title": "/usr/lib/jvm/java-10-openjdk-amd64/bin/java" }, "processor": { - "event": "error", - "name": "error" + "event": "metric", + "name": "metric" }, "service": { "environment": "production", "framework": { - "name": "Node", - "version": "1" + "name": "spring", + "version": "5.0.0" }, "language": { "name": "Java", - "version": "1.2" + "version": "10.0.2" }, - "name": "service1", + "name": "1234_service-12a3", "node": { - "name": "node-xyz" + "name": "8ec7ceb990749e79b37f6dc6cd3628633618d6ce412553a552a0fa6b69419ad4" }, "runtime": { "name": "Java", @@ -698,34 +678,22 @@ }, "version": "4.3.0" }, - "source": { - "ip": "192.168.0.1" - }, - "timestamp": { - "us": 1571657444929001 - }, - "trace": { - "id": "0123456789abcdeffedcba0123456789" + "span": { + "self_time": { + "count": 1, + "sum.us": 633 + }, + "subtype": "mysql", + "type": "db" }, "transaction": { - "id": "1234567890987654", - "sampled": true, + "breakdown.count": 12, + "duration": { + "count": 2, + "sum.us": 12 + }, + "name": "GET/", "type": "request" - }, - "url": { - "domain": "www.example.com", - "fragment": "#hash", - "full": "https://www.example.com/p/a/t/h?query=string#hash", - "original": "/p/a/t/h?query=string#hash", - "path": "/p/a/t/h", - "port": 8080, - "query": "?query=string", - "scheme": "https" - }, - "user": { - "email": "user@foo.mail", - "id": "99", - "name": "foo" } } ] diff --git a/processor/stream/test_approved_es_documents/testIntakeIntegrationMetadataNullValues.approved.json b/processor/stream/test_approved_es_documents/testIntakeIntegrationMetadataNullValues.approved.json index a287923824a..6625ef95762 100644 --- a/processor/stream/test_approved_es_documents/testIntakeIntegrationMetadataNullValues.approved.json +++ b/processor/stream/test_approved_es_documents/testIntakeIntegrationMetadataNullValues.approved.json @@ -7,7 +7,6 @@ "version": "3.14.0" }, "error": { - "grouping_key": "d6b3f958dfea98dc9ed2b57d5f0c48bb", "id": "abcdef0123456789", "log": { "level": "custom log level", diff --git a/processor/stream/test_approved_es_documents/testIntakeIntegrationMetricsets.approved.json b/processor/stream/test_approved_es_documents/testIntakeIntegrationMetricsets.approved.json index 4184a08cc35..28d10aad1c5 100644 --- a/processor/stream/test_approved_es_documents/testIntakeIntegrationMetricsets.approved.json +++ b/processor/stream/test_approved_es_documents/testIntakeIntegrationMetricsets.approved.json @@ -6,18 +6,9 @@ "name": "elastic-node", "version": "3.14.0" }, - "byte_counter": 1, - "dotted": { - "float": { - "gauge": 6.12 - } - }, - "double_gauge": 3.141592653589793, - "float_gauge": 9.16, "host": { "ip": "192.0.0.1" }, - "integer_gauge": 42767, "labels": { "code": 200, "some": "abc", @@ -25,20 +16,6 @@ "tag1": "one", "tag2": 2 }, - "long_gauge": 3147483648, - "negative": { - "d": { - "o": { - "t": { - "t": { - "e": { - "d": -1022 - } - } - } - } - } - }, "process": { "pid": 1234 }, @@ -55,34 +32,21 @@ "name": "node-1" } }, - "short_counter": 227, "span": { "self_time": { "count": 1, - "sum": { - "us": 633.288 - } + "sum.us": 633 }, "subtype": "mysql", "type": "db" }, "transaction": { - "breakdown": { - "count": 12 - }, + "breakdown.count": 12, "duration": { "count": 2, - "sum": { - "us": 12 - } + "sum.us": 12 }, "name": "GET /", - "self_time": { - "count": 2, - "sum": { - "us": 10 - } - }, "type": "request" }, "user": { @@ -97,15 +61,42 @@ "name": "elastic-node", "version": "3.14.0" }, - "go": { - "memstats": { - "heap": { - "sys": { - "bytes": 6520832 - } - } + "go.memstats.heap.sys.bytes": 6520832, + "host": { + "ip": "192.0.0.1" + }, + "labels": { + "tag1": "one", + "tag2": 2 + }, + "process": { + "pid": 1234 + }, + "processor": { + "event": "metric", + "name": "metric" + }, + "service": { + "language": { + "name": "ecmascript" + }, + "name": "1234_service-12a3", + "node": { + "name": "node-1" } }, + "user": { + "email": "user@mail.com", + "id": "axb123hg", + "name": "logged-in-user" + } + }, + { + "@timestamp": "2017-05-30T18:53:41.366Z", + "agent": { + "name": "elastic-node", + "version": "3.14.0" + }, "host": { "ip": "192.0.0.1" }, @@ -129,6 +120,8 @@ "name": "node-1" } }, + "system.process.cgroup.memory.mem.limit.bytes": 2048, + "system.process.cgroup.memory.mem.usage.bytes": 1024, "user": { "email": "user@mail.com", "id": "axb123hg", @@ -164,25 +157,65 @@ "name": "node-1" } }, - "system": { - "process": { - "cgroup": { - "memory": { - "mem": { - "limit": { - "bytes": 2048 - }, - "usage": { - "bytes": 1024 - } - }, - "stats": { - "inactive_file": { - "bytes": 48 - } - } - } - } + "system.process.cgroup.cpu.cfs.period.us": 1024, + "system.process.cgroup.cpu.cfs.quota.us": 2048, + "system.process.cgroup.cpu.id": 2048, + "system.process.cgroup.cpu.stats.periods": 2048, + "system.process.cgroup.cpu.stats.throttled.ns": 2048, + "system.process.cgroup.cpu.stats.throttled.periods": 2048, + "system.process.cgroup.cpuacct.id": 2048, + "system.process.cgroup.cpuacct.total.ns": 2048, + "user": { + "email": "user@mail.com", + "id": "axb123hg", + "name": "logged-in-user" + } + }, + { + "@timestamp": "2017-05-30T18:53:41.366Z", + "_metric_descriptions": { + "latency_distribution": { + "type": "histogram", + "unit": "s" + } + }, + "agent": { + "name": "elastic-node", + "version": "3.14.0" + }, + "host": { + "ip": "192.0.0.1" + }, + "labels": { + "tag1": "one", + "tag2": 2 + }, + "latency_distribution": { + "counts": [ + 1, + 2, + 3 + ], + "values": [ + 1.1, + 2.2, + 3.3 + ] + }, + "process": { + "pid": 1234 + }, + "processor": { + "event": "metric", + "name": "metric" + }, + "service": { + "language": { + "name": "ecmascript" + }, + "name": "1234_service-12a3", + "node": { + "name": "node-1" } }, "user": { diff --git a/processor/stream/test_approved_es_documents/testIntakeIntegrationMinimalService.approved.json b/processor/stream/test_approved_es_documents/testIntakeIntegrationMinimalService.approved.json index bc593968dfd..8d8b5eaf825 100644 --- a/processor/stream/test_approved_es_documents/testIntakeIntegrationMinimalService.approved.json +++ b/processor/stream/test_approved_es_documents/testIntakeIntegrationMinimalService.approved.json @@ -1,63 +1,54 @@ { "events": [ { - "@timestamp": "2017-05-30T18:53:42.281Z", + "@timestamp": "2018-08-09T15:04:05.999Z", "agent": { "name": "elastic-node", "version": "3.14.0" }, - "go": { - "memstats": { - "heap": { - "sys": { - "bytes": 61235 - } - } + "error": { + "id": "abcdef0123456789", + "log": { + "level": "custom log level", + "message": "Cannot read property 'baz' of undefined" } }, "host": { "ip": "192.0.0.1" }, "processor": { - "event": "metric", - "name": "metric" + "event": "error", + "name": "error" }, "service": { "language": { "name": "ecmascript" }, "name": "1234_service-12a3" + }, + "timestamp": { + "us": 1533827045999000 } }, { - "@timestamp": "2018-08-09T15:04:05.999Z", + "@timestamp": "2017-05-30T18:53:42.281Z", "agent": { "name": "elastic-node", "version": "3.14.0" }, - "error": { - "grouping_key": "d6b3f958dfea98dc9ed2b57d5f0c48bb", - "id": "abcdef0123456789", - "log": { - "level": "custom log level", - "message": "Cannot read property 'baz' of undefined" - } - }, + "go.memstats.heap.sys.bytes": 61235, "host": { "ip": "192.0.0.1" }, "processor": { - "event": "error", - "name": "error" + "event": "metric", + "name": "metric" }, "service": { "language": { "name": "ecmascript" }, "name": "1234_service-12a3" - }, - "timestamp": { - "us": 1533827045999000 } } ] diff --git a/processor/stream/test_approved_es_documents/testIntakeIntegrationOptionalTimestamps.approved.json b/processor/stream/test_approved_es_documents/testIntakeIntegrationOptionalTimestamps.approved.json index e4c1afa17f6..94fb271ace9 100644 --- a/processor/stream/test_approved_es_documents/testIntakeIntegrationOptionalTimestamps.approved.json +++ b/processor/stream/test_approved_es_documents/testIntakeIntegrationOptionalTimestamps.approved.json @@ -13,7 +13,6 @@ "architecture": "x64", "hostname": "prod1.example.com", "ip": "192.0.0.1", - "name": "prod1.example.com", "os": { "platform": "darwin" } @@ -42,9 +41,6 @@ "version": "8" }, "name": "backendspans", - "node": { - "name": "prod1.example.com" - }, "runtime": { "name": "node", "version": "8.0.0" @@ -88,7 +84,6 @@ "architecture": "x64", "hostname": "prod1.example.com", "ip": "192.0.0.1", - "name": "prod1.example.com", "os": { "platform": "darwin" } @@ -120,9 +115,6 @@ "version": "8" }, "name": "backendspans", - "node": { - "name": "prod1.example.com" - }, "runtime": { "name": "node", "version": "8.0.0" @@ -165,7 +157,6 @@ "architecture": "x64", "hostname": "prod1.example.com", "ip": "192.0.0.1", - "name": "prod1.example.com", "os": { "platform": "darwin" } @@ -195,9 +186,6 @@ "version": "8" }, "name": "backendspans", - "node": { - "name": "prod1.example.com" - }, "runtime": { "name": "node", "version": "8.0.0" diff --git a/processor/stream/test_approved_es_documents/testIntakeIntegrationRumErrors.approved.json b/processor/stream/test_approved_es_documents/testIntakeIntegrationRumErrors.approved.json index 75223646be4..7159c99723d 100644 --- a/processor/stream/test_approved_es_documents/testIntakeIntegrationRumErrors.approved.json +++ b/processor/stream/test_approved_es_documents/testIntakeIntegrationRumErrors.approved.json @@ -7,7 +7,7 @@ "version": "0.0.0" }, "client": { - "ip": "192.0.0.1" + "ip": "192.0.0.2" }, "error": { "culprit": "test/e2e/general-usecase/bundle.js.map", @@ -31,7 +31,6 @@ "exclude_from_grouping": false, "filename": "~/test/e2e/general-usecase/bundle.js.map", "function": "invokeTask", - "library_frame": false, "line": { "column": 181, "number": 1 @@ -71,7 +70,6 @@ "type": "Error" } ], - "grouping_key": "52fbc9c2d1a61bf905b4a11c708006fd", "id": "aba2688e033848ce9c4e4005f1caa534", "log": { "message": "Uncaught Error: log timeout test error", @@ -87,10 +85,6 @@ } } ] - }, - "page": { - "referer": "http://localhost:8000/test/e2e/", - "url": "http://localhost:8000/test/e2e/general-usecase/" } }, "http": { diff --git a/processor/stream/test_approved_es_documents/testIntakeIntegrationRumTransactions.approved.json b/processor/stream/test_approved_es_documents/testIntakeIntegrationRumTransactions.approved.json index 8fc3791df8c..57459e69096 100644 --- a/processor/stream/test_approved_es_documents/testIntakeIntegrationRumTransactions.approved.json +++ b/processor/stream/test_approved_es_documents/testIntakeIntegrationRumTransactions.approved.json @@ -7,7 +7,7 @@ "version": "0.0.0" }, "client": { - "ip": "192.0.0.1" + "ip": "192.0.0.2" }, "event": { "outcome": "unknown" @@ -17,6 +17,11 @@ "referrer": "http://localhost:8000/test/e2e/" } }, + "network": { + "connection": { + "type": "5G" + } + }, "processor": { "event": "transaction", "name": "transaction" @@ -44,10 +49,6 @@ "tbt": 3.4 }, "id": "611f4fa950f04631", - "page": { - "referer": "http://localhost:8000/test/e2e/", - "url": "http://localhost:8000/test/e2e/general-usecase/" - }, "sampled": true, "span_count": { "started": 1 @@ -73,11 +74,16 @@ "version": "0.0.0" }, "client": { - "ip": "192.0.0.1" + "ip": "192.0.0.2" }, "event": { "outcome": "unknown" }, + "network": { + "connection": { + "type": "5G" + } + }, "parent": { "id": "611f4fa950f04631" }, @@ -89,15 +95,14 @@ "name": "apm-agent-js", "version": "1.0.0" }, + "source": { + "ip": "192.0.0.1" + }, "span": { "duration": { "us": 643000 }, - "http": { - "url": { - "original": "http://localhost:8000/test/e2e/general-usecase/span" - } - }, + "http.url.original": "http://localhost:8000/test/e2e/general-usecase/span", "id": "aaaaaaaaaaaaaaaa", "name": "transaction", "stacktrace": [ @@ -136,6 +141,9 @@ "transaction": { "id": "611f4fa950f04631" }, + "url": { + "original": "http://localhost:8000/test/e2e/general-usecase/span" + }, "user_agent": { "original": "rum-2.0" } diff --git a/processor/stream/test_approved_es_documents/testIntakeIntegrationSpans.approved.json b/processor/stream/test_approved_es_documents/testIntakeIntegrationSpans.approved.json index a644de5a3ea..7e1ad13b4e7 100644 --- a/processor/stream/test_approved_es_documents/testIntakeIntegrationSpans.approved.json +++ b/processor/stream/test_approved_es_documents/testIntakeIntegrationSpans.approved.json @@ -29,7 +29,10 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" @@ -39,9 +42,8 @@ }, "host": { "architecture": "x64", - "hostname": "node-name", + "hostname": "prod1.example.com", "ip": "192.0.0.1", - "name": "node-name", "os": { "platform": "darwin" } @@ -86,9 +88,6 @@ "version": "8" }, "name": "backendspans", - "node": { - "name": "container-id" - }, "runtime": { "name": "node", "version": "8.0.0" @@ -115,6 +114,7 @@ "id": "01af25874dec69dd" }, "user": { + "domain": "ldap://abc", "email": "s@test.com", "id": "123", "name": "john" @@ -144,7 +144,10 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" @@ -154,9 +157,8 @@ }, "host": { "architecture": "x64", - "hostname": "node-name", + "hostname": "prod1.example.com", "ip": "192.0.0.1", - "name": "node-name", "os": { "platform": "darwin" } @@ -201,9 +203,6 @@ "version": "8" }, "name": "backendspans", - "node": { - "name": "container-id" - }, "runtime": { "name": "node", "version": "8.0.0" @@ -231,6 +230,7 @@ "id": "ab45781d265894fe" }, "user": { + "domain": "ldap://abc", "email": "s@test.com", "id": "123", "name": "john" @@ -260,7 +260,10 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" @@ -270,9 +273,8 @@ }, "host": { "architecture": "x64", - "hostname": "node-name", + "hostname": "prod1.example.com", "ip": "192.0.0.1", - "name": "node-name", "os": { "platform": "darwin" } @@ -320,9 +322,6 @@ "version": "8" }, "name": "backendspans", - "node": { - "name": "container-id" - }, "runtime": { "name": "node", "version": "8.0.0" @@ -351,6 +350,7 @@ "id": "ab23456a89012345" }, "user": { + "domain": "ldap://abc", "email": "s@test.com", "id": "123", "name": "john" @@ -380,7 +380,10 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" @@ -390,9 +393,8 @@ }, "host": { "architecture": "x64", - "hostname": "node-name", + "hostname": "prod1.example.com", "ip": "192.0.0.1", - "name": "node-name", "os": { "platform": "darwin" } @@ -437,9 +439,6 @@ "version": "8" }, "name": "backendspans", - "node": { - "name": "container-id" - }, "runtime": { "name": "node", "version": "8.0.0" @@ -467,6 +466,7 @@ "id": "abcdef0123456789abcdef9876543210" }, "user": { + "domain": "ldap://abc", "email": "s@test.com", "id": "123", "name": "john" @@ -497,7 +497,10 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" @@ -512,13 +515,23 @@ }, "host": { "architecture": "x64", - "hostname": "node-name", + "hostname": "prod1.example.com", "ip": "192.0.0.1", - "name": "node-name", "os": { "platform": "darwin" } }, + "http": { + "request": { + "method": "GET" + }, + "response": { + "decoded_body_size": 401, + "encoded_body_size": 356, + "status_code": 200, + "transfer_size": 300.12 + } + }, "kubernetes": { "namespace": "namespace1", "node": { @@ -559,9 +572,6 @@ "version": "8" }, "name": "service1", - "node": { - "name": "container-id" - }, "runtime": { "name": "node", "version": "8.0.0" @@ -591,17 +601,15 @@ "us": 3781 }, "http": { - "method": "get", + "method": "GET", "response": { "decoded_body_size": 401, "encoded_body_size": 356, "status_code": 200, "transfer_size": 300.12 - }, - "url": { - "original": "http://localhost:8000" } }, + "http.url.original": "http://localhost:8000", "id": "1234567890aaaade", "name": "SELECT FROM product_types", "stacktrace": [ @@ -666,7 +674,11 @@ "trace": { "id": "abcdef0123456789abcdef9876543210" }, + "url": { + "original": "http://localhost:8000" + }, "user": { + "domain": "ldap://abc", "email": "s@test.com", "id": "123", "name": "john" @@ -696,7 +708,10 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" @@ -710,9 +725,8 @@ }, "host": { "architecture": "x64", - "hostname": "node-name", + "hostname": "prod1.example.com", "ip": "192.0.0.1", - "name": "node-name", "os": { "platform": "darwin" } @@ -757,9 +771,6 @@ "version": "8" }, "name": "backendspans", - "node": { - "name": "container-id" - }, "runtime": { "name": "node", "version": "8.0.0" @@ -794,6 +805,132 @@ "id": "01af25874dec69dd" }, "user": { + "domain": "ldap://abc", + "email": "s@test.com", + "id": "123", + "name": "john" + } + }, + { + "@timestamp": "2021-07-06T11:58:05.682Z", + "agent": { + "name": "elastic-node", + "version": "3.14.0" + }, + "cloud": { + "account": { + "id": "account_id", + "name": "account_name" + }, + "availability_zone": "cloud_availability_zone", + "instance": { + "id": "instance_id", + "name": "instance_name" + }, + "machine": { + "type": "machine_type" + }, + "project": { + "id": "project_id", + "name": "project_name" + }, + "provider": "cloud_provider", + "region": "cloud_region", + "service": { + "name": "lambda" + } + }, + "container": { + "id": "container-id" + }, + "event": { + "outcome": "success" + }, + "host": { + "architecture": "x64", + "hostname": "prod1.example.com", + "ip": "192.0.0.1", + "os": { + "platform": "darwin" + } + }, + "kubernetes": { + "namespace": "namespace1", + "node": { + "name": "node-name" + }, + "pod": { + "name": "pod-name", + "uid": "pod-uid" + } + }, + "labels": { + "tag1": "label1" + }, + "parent": { + "id": "abcdef0123456789" + }, + "process": { + "args": [ + "node", + "server.js" + ], + "pid": 1234, + "ppid": 6789, + "title": "node" + }, + "processor": { + "event": "span", + "name": "transaction" + }, + "service": { + "environment": "staging", + "framework": { + "name": "Express", + "version": "1.2.3" + }, + "language": { + "name": "ecmascript", + "version": "8" + }, + "name": "backendspans", + "runtime": { + "name": "node", + "version": "8.0.0" + }, + "version": "5.1.3" + }, + "span": { + "action": "query", + "composite": { + "compression_strategy": "exact_match", + "count": 10, + "sum": { + "us": 359298 + } + }, + "duration": { + "us": 378191 + }, + "id": "abcdef01234567", + "name": "SELECT FROM p_details", + "start": { + "us": 2830 + }, + "subtype": "postgresql", + "type": "db" + }, + "timestamp": { + "us": 1625572685682272 + }, + "trace": { + "id": "edcbaf0123456789abcdef9876543210" + }, + "transaction": { + "id": "01af25874dec69dd" + }, + "user": { + "domain": "ldap://abc", "email": "s@test.com", "id": "123", "name": "john" diff --git a/processor/stream/test_approved_es_documents/testIntakeIntegrationTransactions.approved.json b/processor/stream/test_approved_es_documents/testIntakeIntegrationTransactions.approved.json index d6daabe4465..b001fcb1ad5 100644 --- a/processor/stream/test_approved_es_documents/testIntakeIntegrationTransactions.approved.json +++ b/processor/stream/test_approved_es_documents/testIntakeIntegrationTransactions.approved.json @@ -24,7 +24,10 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" @@ -34,9 +37,8 @@ }, "host": { "architecture": "x64", - "hostname": "node-name", + "hostname": "prod1.example.com", "ip": "192.0.0.1", - "name": "node-name", "os": { "platform": "darwin" } @@ -141,7 +143,10 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" @@ -151,23 +156,20 @@ }, "host": { "architecture": "x64", - "hostname": "node-name", + "hostname": "prod1.example.com", "ip": "192.0.0.1", - "name": "node-name", "os": { "platform": "darwin" } }, "http": { "request": { - "body": { - "original": { - "additional": { - "bar": 123, - "req": "additional information" - }, - "str": "hello world" - } + "body.original": { + "additional": { + "bar": 123, + "req": "additional information" + }, + "str": "hello world" }, "cookies": { "c1": "v1", @@ -197,12 +199,8 @@ "Mozilla Chrome Edge" ] }, - "method": "post", - "referrer": "http://localhost:8000/test/e2e/", - "socket": { - "encrypted": true, - "remote_address": "12.53.12.1" - } + "method": "POST", + "referrer": "http://localhost:8000/test/e2e/" }, "response": { "decoded_body_size": 29.9, @@ -295,10 +293,6 @@ }, "id": "4340a8e0df1906ecbfa9", "name": "GET /api/types", - "page": { - "referer": "http://localhost:8000/test/e2e/", - "url": "http://localhost:8000/test/e2e/general-usecase/" - }, "result": "success", "sampled": true, "span_count": { @@ -317,6 +311,7 @@ "scheme": "https" }, "user": { + "domain": "ldap://abc", "id": "99", "name": "foo" }, @@ -349,7 +344,10 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" @@ -359,19 +357,15 @@ }, "host": { "architecture": "x64", - "hostname": "node-name", + "hostname": "prod1.example.com", "ip": "192.0.0.1", - "name": "node-name", "os": { "platform": "darwin" } }, "http": { "request": { - "method": "post", - "socket": { - "remote_address": "192.0.1" - } + "method": "POST" } }, "kubernetes": { @@ -434,6 +428,11 @@ "experience": { "cls": 1, "fid": 2, + "longtask": { + "count": 3, + "max": 1, + "sum": 2.5 + }, "tbt": 3.4 }, "id": "cdef4340a8e0df19", @@ -484,7 +483,10 @@ "name": "project_name" }, "provider": "cloud_provider", - "region": "cloud_region" + "region": "cloud_region", + "service": { + "name": "lambda" + } }, "container": { "id": "container-id" @@ -494,9 +496,8 @@ }, "host": { "architecture": "x64", - "hostname": "node-name", + "hostname": "prod1.example.com", "ip": "192.0.0.1", - "name": "node-name", "os": { "platform": "darwin" } @@ -551,6 +552,10 @@ }, "version": "5.1.3" }, + "session": { + "id": "sunday", + "sequence": 123 + }, "timestamp": { "us": 1533117600000000 }, diff --git a/processor/stream/test_approved_es_documents/testIntakeRUMV3Errors.approved.json b/processor/stream/test_approved_es_documents/testIntakeRUMV3Errors.approved.json index c35eb2463e9..47a7f5c8cba 100644 --- a/processor/stream/test_approved_es_documents/testIntakeRUMV3Errors.approved.json +++ b/processor/stream/test_approved_es_documents/testIntakeRUMV3Errors.approved.json @@ -7,7 +7,7 @@ "version": "4.8.1" }, "client": { - "ip": "192.0.0.1" + "ip": "192.0.0.2" }, "error": { "culprit": "test/e2e/general-usecase/app.e2e-bundle.min.js?token=secret", @@ -42,12 +42,7 @@ "type": "Error" } ], - "grouping_key": "e5fa86c0df837d142aa0520eb2661d8d", - "id": "3661352868c17c78b773d2f1beae6d41", - "page": { - "referer": "http://localhost:8000/test/e2e/", - "url": "http://localhost:8000/test/e2e/general-usecase/" - } + "id": "3661352868c17c78b773d2f1beae6d41" }, "http": { "request": { diff --git a/processor/stream/test_approved_es_documents/testIntakeRUMV3Events.approved.json b/processor/stream/test_approved_es_documents/testIntakeRUMV3Events.approved.json index 25fc7bfa09f..3518cc5d796 100644 --- a/processor/stream/test_approved_es_documents/testIntakeRUMV3Events.approved.json +++ b/processor/stream/test_approved_es_documents/testIntakeRUMV3Events.approved.json @@ -7,7 +7,7 @@ "version": "4.8.1" }, "client": { - "ip": "192.0.0.1" + "ip": "192.0.0.2" }, "event": { "outcome": "success" @@ -19,7 +19,7 @@ "application/json" ] }, - "method": "get", + "method": "GET", "referrer": "http://localhost:8000/test/e2e/" }, "response": { @@ -38,6 +38,11 @@ "labels": { "testTagKey": "testTagValue" }, + "network": { + "connection": { + "type": "5G" + } + }, "parent": { "id": "1ef08ac234fca23b455d9e27c660f1ab" }, @@ -81,6 +86,11 @@ "experience": { "cls": 1, "fid": 2, + "longtask": { + "count": 3, + "max": 1, + "sum": 2.5 + }, "tbt": 3.4 }, "id": "ec2e280be8345240", @@ -113,10 +123,6 @@ } }, "name": "general-usecase-initial-p-load", - "page": { - "referer": "http://localhost:8000/test/e2e/", - "url": "http://localhost:8000/test/e2e/general-usecase/" - }, "sampled": true, "span_count": { "dropped": 1, @@ -142,20 +148,206 @@ } }, { - "@timestamp": "2018-08-01T10:00:00.004Z", + "@timestamp": "2018-08-01T10:00:00.000Z", "agent": { "name": "js-base", "version": "4.8.1" }, "client": { + "ip": "192.0.0.2" + }, + "labels": { + "testTagKey": "testTagValue" + }, + "network": { + "connection": { + "type": "5G" + } + }, + "processor": { + "event": "metric", + "name": "metric" + }, + "service": { + "environment": "prod", + "framework": { + "name": "angular", + "version": "2" + }, + "language": { + "name": "javascript", + "version": "6" + }, + "name": "apm-a-rum-test-e2e-general-usecase", + "runtime": { + "name": "v8", + "version": "8.0" + }, + "version": "0.0.1" + }, + "source": { "ip": "192.0.0.1" }, + "transaction": { + "breakdown.count": 1, + "duration": { + "count": 1, + "sum.us": 295 + }, + "name": "general-usecase-initial-p-load", + "type": "p-load" + }, + "user": { + "email": "user@email.com", + "id": "123", + "name": "John Doe" + }, + "user_agent": { + "original": "rum-2.0" + } + }, + { + "@timestamp": "2018-08-01T10:00:00.000Z", + "agent": { + "name": "js-base", + "version": "4.8.1" + }, + "client": { + "ip": "192.0.0.2" + }, + "labels": { + "testTagKey": "testTagValue" + }, + "network": { + "connection": { + "type": "5G" + } + }, + "processor": { + "event": "metric", + "name": "metric" + }, + "service": { + "environment": "prod", + "framework": { + "name": "angular", + "version": "2" + }, + "language": { + "name": "javascript", + "version": "6" + }, + "name": "apm-a-rum-test-e2e-general-usecase", + "runtime": { + "name": "v8", + "version": "8.0" + }, + "version": "0.0.1" + }, + "source": { + "ip": "192.0.0.1" + }, + "span": { + "self_time": { + "count": 1, + "sum.us": 1 + }, + "type": "Request" + }, + "transaction": { + "name": "general-usecase-initial-p-load", + "type": "p-load" + }, + "user": { + "email": "user@email.com", + "id": "123", + "name": "John Doe" + }, + "user_agent": { + "original": "rum-2.0" + } + }, + { + "@timestamp": "2018-08-01T10:00:00.000Z", + "agent": { + "name": "js-base", + "version": "4.8.1" + }, + "client": { + "ip": "192.0.0.2" + }, + "labels": { + "testTagKey": "testTagValue" + }, + "network": { + "connection": { + "type": "5G" + } + }, + "processor": { + "event": "metric", + "name": "metric" + }, + "service": { + "environment": "prod", + "framework": { + "name": "angular", + "version": "2" + }, + "language": { + "name": "javascript", + "version": "6" + }, + "name": "apm-a-rum-test-e2e-general-usecase", + "runtime": { + "name": "v8", + "version": "8.0" + }, + "version": "0.0.1" + }, + "source": { + "ip": "192.0.0.1" + }, + "span": { + "self_time": { + "count": 1, + "sum.us": 1 + }, + "type": "Response" + }, + "transaction": { + "name": "general-usecase-initial-p-load", + "type": "p-load" + }, + "user": { + "email": "user@email.com", + "id": "123", + "name": "John Doe" + }, + "user_agent": { + "original": "rum-2.0" + } + }, + { + "@timestamp": "2018-08-01T10:00:00.004Z", + "agent": { + "name": "js-base", + "version": "4.8.1" + }, + "client": { + "ip": "192.0.0.2" + }, "event": { "outcome": "unknown" }, "labels": { "testTagKey": "testTagValue" }, + "network": { + "connection": { + "type": "5G" + } + }, "parent": { "id": "ec2e280be8345240" }, @@ -180,6 +372,9 @@ }, "version": "0.0.1" }, + "source": { + "ip": "192.0.0.1" + }, "span": { "duration": { "us": 2000 @@ -217,7 +412,7 @@ "version": "4.8.1" }, "client": { - "ip": "192.0.0.1" + "ip": "192.0.0.2" }, "event": { "outcome": "unknown" @@ -225,6 +420,11 @@ "labels": { "testTagKey": "testTagValue" }, + "network": { + "connection": { + "type": "5G" + } + }, "parent": { "id": "ec2e280be8345240" }, @@ -249,6 +449,9 @@ }, "version": "0.0.1" }, + "source": { + "ip": "192.0.0.1" + }, "span": { "duration": { "us": 106000 @@ -286,7 +489,7 @@ "version": "4.8.1" }, "client": { - "ip": "192.0.0.1" + "ip": "192.0.0.2" }, "destination": { "address": "localhost", @@ -295,9 +498,21 @@ "event": { "outcome": "unknown" }, + "http": { + "response": { + "decoded_body_size": 676864, + "encoded_body_size": 676864, + "transfer_size": 677175 + } + }, "labels": { "testTagKey": "testTagValue" }, + "network": { + "connection": { + "type": "5G" + } + }, "parent": { "id": "ec2e280be8345240" }, @@ -322,6 +537,9 @@ }, "version": "0.0.1" }, + "source": { + "ip": "192.0.0.1" + }, "span": { "destination": { "service": { @@ -338,11 +556,9 @@ "decoded_body_size": 676864, "encoded_body_size": 676864, "transfer_size": 677175 - }, - "url": { - "original": "http://localhost:8000/test/e2e/general-usecase/app.e2e-bundle.min.js?token=REDACTED" } }, + "http.url.original": "http://localhost:8000/test/e2e/general-usecase/app.e2e-bundle.min.js?token=REDACTED", "id": "fb8f717930697299", "name": "http://localhost:8000/test/e2e/general-usecase/app.e2e-bundle.min.js", "start": { @@ -360,6 +576,9 @@ "transaction": { "id": "ec2e280be8345240" }, + "url": { + "original": "http://localhost:8000/test/e2e/general-usecase/app.e2e-bundle.min.js?token=REDACTED" + }, "user": { "email": "user@email.com", "id": "123", @@ -376,7 +595,7 @@ "version": "4.8.1" }, "client": { - "ip": "192.0.0.1" + "ip": "192.0.0.2" }, "event": { "outcome": "unknown" @@ -384,6 +603,11 @@ "labels": { "testTagKey": "testTagValue" }, + "network": { + "connection": { + "type": "5G" + } + }, "parent": { "id": "ec2e280be8345240" }, @@ -408,6 +632,9 @@ }, "version": "0.0.1" }, + "source": { + "ip": "192.0.0.1" + }, "span": { "duration": { "us": 198070 @@ -444,7 +671,7 @@ "version": "4.8.1" }, "client": { - "ip": "192.0.0.1" + "ip": "192.0.0.2" }, "destination": { "address": "localhost", @@ -453,9 +680,22 @@ "event": { "outcome": "success" }, + "http": { + "request": { + "method": "GET" + }, + "response": { + "status_code": 200 + } + }, "labels": { "testTagKey": "testTagValue" }, + "network": { + "connection": { + "type": "5G" + } + }, "parent": { "id": "ec2e280be8345240" }, @@ -480,6 +720,9 @@ }, "version": "0.0.1" }, + "source": { + "ip": "192.0.0.1" + }, "span": { "destination": { "service": { @@ -492,14 +735,12 @@ "us": 6724 }, "http": { - "method": "get", + "method": "GET", "response": { "status_code": 200 - }, - "url": { - "original": "http://localhost:8000/test/e2e/common/data.json?test=hamid" } }, + "http.url.original": "http://localhost:8000/test/e2e/common/data.json?test=hamid", "id": "5ecb8ee030749715", "name": "GET /test/e2e/common/data.json", "start": { @@ -518,6 +759,9 @@ "transaction": { "id": "ec2e280be8345240" }, + "url": { + "original": "http://localhost:8000/test/e2e/common/data.json?test=hamid" + }, "user": { "email": "user@email.com", "id": "123", @@ -534,7 +778,7 @@ "version": "4.8.1" }, "client": { - "ip": "192.0.0.1" + "ip": "192.0.0.2" }, "destination": { "address": "localhost", @@ -543,9 +787,22 @@ "event": { "outcome": "success" }, + "http": { + "request": { + "method": "POST" + }, + "response": { + "status_code": 200 + } + }, "labels": { "testTagKey": "testTagValue" }, + "network": { + "connection": { + "type": "5G" + } + }, "parent": { "id": "ec2e280be8345240" }, @@ -570,6 +827,9 @@ }, "version": "0.0.1" }, + "source": { + "ip": "192.0.0.1" + }, "span": { "destination": { "service": { @@ -582,14 +842,12 @@ "us": 11584 }, "http": { - "method": "post", + "method": "POST", "response": { "status_code": 200 - }, - "url": { - "original": "http://localhost:8003/data" } }, + "http.url.original": "http://localhost:8003/data", "id": "27f45fd274f976d4", "name": "POST http://localhost:8003/data", "start": { @@ -608,6 +866,9 @@ "transaction": { "id": "ec2e280be8345240" }, + "url": { + "original": "http://localhost:8003/data" + }, "user": { "email": "user@email.com", "id": "123", @@ -624,7 +885,7 @@ "version": "4.8.1" }, "client": { - "ip": "192.0.0.1" + "ip": "192.0.0.2" }, "destination": { "address": "localhost", @@ -633,9 +894,22 @@ "event": { "outcome": "success" }, + "http": { + "request": { + "method": "POST" + }, + "response": { + "status_code": 200 + } + }, "labels": { "testTagKey": "testTagValue" }, + "network": { + "connection": { + "type": "5G" + } + }, "parent": { "id": "bbd8bcc3be14d814" }, @@ -660,6 +934,9 @@ }, "version": "0.0.1" }, + "source": { + "ip": "192.0.0.1" + }, "span": { "action": "action", "destination": { @@ -673,14 +950,12 @@ "us": 15949 }, "http": { - "method": "post", + "method": "POST", "response": { "status_code": 200 - }, - "url": { - "original": "http://localhost:8003/fetch" } }, + "http.url.original": "http://localhost:8003/fetch", "id": "a3c043330bc2015e", "name": "POST http://localhost:8003/fetch", "start": { @@ -699,6 +974,9 @@ "transaction": { "id": "ec2e280be8345240" }, + "url": { + "original": "http://localhost:8003/fetch" + }, "user": { "email": "user@email.com", "id": "123", @@ -715,7 +993,7 @@ "version": "4.8.1" }, "client": { - "ip": "192.0.0.1" + "ip": "192.0.0.2" }, "event": { "outcome": "success" @@ -723,6 +1001,11 @@ "labels": { "testTagKey": "testTagValue" }, + "network": { + "connection": { + "type": "5G" + } + }, "parent": { "id": "ec2e280be8345240" }, @@ -747,6 +1030,9 @@ }, "version": "0.0.1" }, + "source": { + "ip": "192.0.0.1" + }, "span": { "duration": { "us": 2000 @@ -798,224 +1084,6 @@ "user_agent": { "original": "rum-2.0" } - }, - { - "@timestamp": "2018-08-01T10:00:00.000Z", - "agent": { - "name": "js-base", - "version": "4.8.1" - }, - "client": { - "ip": "192.0.0.1" - }, - "labels": { - "testTagKey": "testTagValue" - }, - "processor": { - "event": "metric", - "name": "metric" - }, - "service": { - "environment": "prod", - "framework": { - "name": "angular", - "version": "2" - }, - "language": { - "name": "javascript", - "version": "6" - }, - "name": "apm-a-rum-test-e2e-general-usecase", - "runtime": { - "name": "v8", - "version": "8.0" - }, - "version": "0.0.1" - }, - "transaction": { - "breakdown": { - "count": 1 - }, - "duration": { - "count": 1, - "sum": { - "us": 295 - } - }, - "name": "general-usecase-initial-p-load", - "type": "p-load" - }, - "user": { - "email": "user@email.com", - "id": "123", - "name": "John Doe" - }, - "user_agent": { - "original": "rum-2.0" - } - }, - { - "@timestamp": "2018-08-01T10:00:00.000Z", - "agent": { - "name": "js-base", - "version": "4.8.1" - }, - "client": { - "ip": "192.0.0.1" - }, - "labels": { - "testTagKey": "testTagValue" - }, - "processor": { - "event": "metric", - "name": "metric" - }, - "service": { - "environment": "prod", - "framework": { - "name": "angular", - "version": "2" - }, - "language": { - "name": "javascript", - "version": "6" - }, - "name": "apm-a-rum-test-e2e-general-usecase", - "runtime": { - "name": "v8", - "version": "8.0" - }, - "version": "0.0.1" - }, - "span": { - "self_time": { - "count": 1, - "sum": { - "us": 1 - } - }, - "type": "Request" - }, - "transaction": { - "name": "general-usecase-initial-p-load", - "type": "p-load" - }, - "user": { - "email": "user@email.com", - "id": "123", - "name": "John Doe" - }, - "user_agent": { - "original": "rum-2.0" - } - }, - { - "@timestamp": "2018-08-01T10:00:00.000Z", - "agent": { - "name": "js-base", - "version": "4.8.1" - }, - "client": { - "ip": "192.0.0.1" - }, - "labels": { - "testTagKey": "testTagValue" - }, - "processor": { - "event": "metric", - "name": "metric" - }, - "service": { - "environment": "prod", - "framework": { - "name": "angular", - "version": "2" - }, - "language": { - "name": "javascript", - "version": "6" - }, - "name": "apm-a-rum-test-e2e-general-usecase", - "runtime": { - "name": "v8", - "version": "8.0" - }, - "version": "0.0.1" - }, - "span": { - "self_time": { - "count": 1, - "sum": { - "us": 1 - } - }, - "type": "Response" - }, - "transaction": { - "name": "general-usecase-initial-p-load", - "type": "p-load" - }, - "user": { - "email": "user@email.com", - "id": "123", - "name": "John Doe" - }, - "user_agent": { - "original": "rum-2.0" - } - }, - { - "@timestamp": "2018-08-01T10:00:00.000Z", - "agent": { - "name": "js-base", - "version": "4.8.1" - }, - "client": { - "ip": "192.0.0.1" - }, - "labels": { - "tag1": "value1", - "testTagKey": "testTagValue" - }, - "processor": { - "event": "metric", - "name": "metric" - }, - "service": { - "environment": "prod", - "framework": { - "name": "angular", - "version": "2" - }, - "language": { - "name": "javascript", - "version": "6" - }, - "name": "apm-a-rum-test-e2e-general-usecase", - "runtime": { - "name": "v8", - "version": "8.0" - }, - "version": "0.0.1" - }, - "span": { - "self_time": { - "count": 1, - "sum": { - "us": 124 - } - }, - "subtype": "subtype", - "type": "Processing" - }, - "user": { - "email": "user@email.com", - "id": "123", - "name": "John Doe" - }, - "user_agent": { - "original": "rum-2.0" - } } ] } diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultCloudMetadata.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultCloudMetadata.approved.json deleted file mode 100644 index 172488d4a5e..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultCloudMetadata.approved.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "accepted": 1 -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultErrors.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultErrors.approved.json deleted file mode 100644 index 93ed508ba6f..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultErrors.approved.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "accepted": 5 -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultEvents.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultEvents.approved.json deleted file mode 100644 index 6d6a93fd6b1..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultEvents.approved.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "accepted": 4 -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultInvalidEvent.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultInvalidEvent.approved.json deleted file mode 100644 index 703797831cb..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultInvalidEvent.approved.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "accepted": 1, - "errors": [ - { - "document": "{ \"transaction\": { \"id\": 12345, \"trace_id\": \"0123456789abcdef0123456789abcdef\", \"parent_id\": \"abcdefabcdef01234567\", \"type\": \"request\", \"duration\": 32.592981, \"span_count\": { \"started\": 21 } } } ", - "message": "failed to validate transaction: error validating JSON: I[#] S[#] doesn't validate with \"transaction#\"\n I[#] S[#/allOf/3] allOf failed\n I[#/id] S[#/allOf/3/properties/id/type] expected string, but got number" - } - ] -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultInvalidJSONEvent.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultInvalidJSONEvent.approved.json deleted file mode 100644 index ac1e798d6f8..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultInvalidJSONEvent.approved.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "accepted": 1, - "errors": [ - { - "document": "{ \"invalid-json\" }", - "message": "data read error: invalid character '}' after object key" - } - ] -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultInvalidJSONMetadata.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultInvalidJSONMetadata.approved.json deleted file mode 100644 index b1f27fbb794..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultInvalidJSONMetadata.approved.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "accepted": 0, - "errors": [ - { - "document": "{\"metadata\": {\"invalid-json\"}}", - "message": "data read error: invalid character '}' after object key" - } - ] -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultInvalidMetadata.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultInvalidMetadata.approved.json deleted file mode 100644 index da67acd9c67..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultInvalidMetadata.approved.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "accepted": 0, - "errors": [ - { - "document": "{\"metadata\": {\"user\": null}}", - "message": "failed to validate metadata: error validating JSON: I[#] S[#] doesn't validate with \"metadata#\"\n I[#] S[#/required] missing properties: \"service\"" - } - ] -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultInvalidMetadata2.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultInvalidMetadata2.approved.json deleted file mode 100644 index 26640294ec7..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultInvalidMetadata2.approved.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "accepted": 0, - "errors": [ - { - "document": "{\"not\": \"metadata\"}", - "message": "did not recognize object type" - } - ] -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultLimiterAllowAll.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultLimiterAllowAll.approved.json deleted file mode 100644 index c612a4faf20..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultLimiterAllowAll.approved.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "accepted": 19 -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultLimiterDeny.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultLimiterDeny.approved.json deleted file mode 100644 index 8333c5a198a..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultLimiterDeny.approved.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "accepted": 10, - "errors": [ - { - "message": "rate limit exceeded" - } - ] -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultLimiterDenyAll.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultLimiterDenyAll.approved.json deleted file mode 100644 index 507b15773aa..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultLimiterDenyAll.approved.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "accepted": 0, - "errors": [ - { - "message": "rate limit exceeded" - } - ] -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultLimiterPartiallyUsedLimitAllow.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultLimiterPartiallyUsedLimitAllow.approved.json deleted file mode 100644 index c612a4faf20..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultLimiterPartiallyUsedLimitAllow.approved.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "accepted": 19 -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultLimiterPartiallyUsedLimitDeny.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultLimiterPartiallyUsedLimitDeny.approved.json deleted file mode 100644 index 8333c5a198a..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultLimiterPartiallyUsedLimitDeny.approved.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "accepted": 10, - "errors": [ - { - "message": "rate limit exceeded" - } - ] -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultMetadataNullValues.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultMetadataNullValues.approved.json deleted file mode 100644 index 172488d4a5e..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultMetadataNullValues.approved.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "accepted": 1 -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultMetrics.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultMetrics.approved.json deleted file mode 100644 index 9421fa0d0b2..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultMetrics.approved.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "accepted": 2 -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultMetricsets.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultMetricsets.approved.json deleted file mode 100644 index e36e2c0c42e..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultMetricsets.approved.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "accepted": 3 -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultMinimalService.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultMinimalService.approved.json deleted file mode 100644 index 9421fa0d0b2..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultMinimalService.approved.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "accepted": 2 -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultNoLimiter.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultNoLimiter.approved.json deleted file mode 100644 index c612a4faf20..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultNoLimiter.approved.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "accepted": 19 -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultOptionalTimestamps.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultOptionalTimestamps.approved.json deleted file mode 100644 index e36e2c0c42e..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultOptionalTimestamps.approved.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "accepted": 3 -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultQueueFull.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultQueueFull.approved.json deleted file mode 100644 index ce2dd7e6357..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultQueueFull.approved.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "accepted": 0, - "errors": [ - { - "message": "queue is full" - } - ] -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultRUMV3Errors.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultRUMV3Errors.approved.json deleted file mode 100644 index 172488d4a5e..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultRUMV3Errors.approved.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "accepted": 1 -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultRUMV3Events.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultRUMV3Events.approved.json deleted file mode 100644 index 50057722600..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultRUMV3Events.approved.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "accepted": 13 -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultRUMV3Transactions.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultRUMV3Transactions.approved.json deleted file mode 100644 index 9421fa0d0b2..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultRUMV3Transactions.approved.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "accepted": 2 -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultReadError.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultReadError.approved.json deleted file mode 100644 index 0e4a36a0497..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultReadError.approved.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "accepted": 4, - "errors": [ - { - "message": "timeout" - } - ] -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultRumErrors.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultRumErrors.approved.json deleted file mode 100644 index 172488d4a5e..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultRumErrors.approved.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "accepted": 1 -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultRumTransactions.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultRumTransactions.approved.json deleted file mode 100644 index 9421fa0d0b2..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultRumTransactions.approved.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "accepted": 2 -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultShuttingDown.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultShuttingDown.approved.json deleted file mode 100644 index 83208724e96..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultShuttingDown.approved.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "accepted": 0, - "errors": [ - { - "message": "server is shutting down" - } - ] -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultSpans.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultSpans.approved.json deleted file mode 100644 index c3430736bfb..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultSpans.approved.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "accepted": 6 -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultTransactions.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultTransactions.approved.json deleted file mode 100644 index 6d6a93fd6b1..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultTransactions.approved.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "accepted": 4 -} diff --git a/processor/stream/test_approved_stream_result/testIntegrationResultUnrecognizedEvent.approved.json b/processor/stream/test_approved_stream_result/testIntegrationResultUnrecognizedEvent.approved.json deleted file mode 100644 index 0dfff3f52d8..00000000000 --- a/processor/stream/test_approved_stream_result/testIntegrationResultUnrecognizedEvent.approved.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "accepted": 0, - "errors": [ - { - "document": "{\"tennis-court\": {\"name\": \"Centre Court, Wimbledon\"}}", - "message": "did not recognize object type" - } - ] -} diff --git a/publish/acker.go b/publish/acker.go deleted file mode 100644 index 08a0c226733..00000000000 --- a/publish/acker.go +++ /dev/null @@ -1,84 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package publish - -import ( - "context" - "sync" - "sync/atomic" - - "github.com/elastic/beats/v7/libbeat/beat" -) - -// waitPublishedAcker is a beat.ACKer which keeps track of the number of -// events published. waitPublishedAcker provides an interruptible Wait method -// that blocks until all events published at the time the client is closed are -// acknowledged. -type waitPublishedAcker struct { - active int64 // atomic - - mu sync.RWMutex - closed bool - done chan struct{} -} - -func newWaitPublishedAcker() *waitPublishedAcker { - return &waitPublishedAcker{done: make(chan struct{})} -} - -// AddEvent is called when an event has been published or dropped by the client, -// and increments a counter for published events. -func (w *waitPublishedAcker) AddEvent(event beat.Event, published bool) { - if !published { - return - } - atomic.AddInt64(&w.active, 1) -} - -// ACKEvents is called when published events have been acknowledged. -func (w *waitPublishedAcker) ACKEvents(n int) { - w.mu.RLock() - defer w.mu.RUnlock() - if atomic.AddInt64(&w.active, int64(-n)) == 0 && w.closed { - close(w.done) - } -} - -// Close closes w, unblocking Wait when all previously published events have -// been acknowledged. -func (w *waitPublishedAcker) Close() { - w.mu.Lock() - defer w.mu.Unlock() - if !w.closed { - w.closed = true - if atomic.LoadInt64(&w.active) == 0 { - close(w.done) - } - } -} - -// Wait waits for w to be closed and all previously published events to be -// acknowledged. -func (w *waitPublishedAcker) Wait(ctx context.Context) error { - select { - case <-ctx.Done(): - return ctx.Err() - case <-w.done: - return nil - } -} diff --git a/publish/pub.go b/publish/pub.go index 1f59b19e35c..5d0c84de203 100644 --- a/publish/pub.go +++ b/publish/pub.go @@ -26,26 +26,24 @@ import ( "github.com/pkg/errors" "go.elastic.co/apm" - "github.com/elastic/apm-server/transform" "github.com/elastic/beats/v7/libbeat/beat" - "github.com/elastic/beats/v7/libbeat/common" ) type Reporter func(context.Context, PendingReq) error -// Publisher forwards batches of events to libbeat. It uses GuaranteedSend -// to enable infinite retry of events being processed. +// Publisher forwards batches of events to libbeat. +// // If the publisher's input channel is full, an error is returned immediately. -// Number of concurrent requests waiting for processing do depend on the configured -// queue size. As the publisher is not waiting for the outputs ACK, the total -// number requests(events) active in the system can exceed the queue size. Only -// the number of concurrent HTTP requests trying to publish at the same time is limited. +// Publisher uses GuaranteedSend to enable infinite retry of events being processed. +// +// The number of concurrent requests waiting for processing depends on the configured +// queue size in libbeat. As the publisher is not waiting for the outputs ACK, the total +// number of events active in the system can exceed the queue size. Only the number of +// concurrent HTTP requests trying to publish at the same time is limited. type Publisher struct { - stopped chan struct{} - tracer *apm.Tracer - client beat.Client - waitPublished *waitPublishedAcker - transformConfig *transform.Config + stopped chan struct{} + tracer *apm.Tracer + client beat.Client mu sync.RWMutex stopping bool @@ -53,21 +51,24 @@ type Publisher struct { } type PendingReq struct { - Transformables []transform.Transformable - Trace bool + Transformable Transformer + Trace bool +} + +// Transformer is an interface implemented by types that can be transformed into beat.Events. +type Transformer interface { + Transform(context.Context) []beat.Event } // PublisherConfig is a struct holding configuration information for the publisher. type PublisherConfig struct { - Info beat.Info - Pipeline string - TransformConfig *transform.Config + Info beat.Info + Pipeline string + Namespace string + Processor beat.ProcessorList } func (cfg *PublisherConfig) Validate() error { - if cfg.TransformConfig == nil { - return errors.New("TransfromConfig unspecified") - } return nil } @@ -77,34 +78,22 @@ var ( ) // newPublisher creates a new publisher instance. -//MaxCPU new go-routines are started for forwarding events to libbeat. -//Stop must be called to close the beat.Client and free resources. +// +// GOMAXPROCS goroutines are started for forwarding events to libbeat. +// Stop must be called to close the beat.Client and free resources. func NewPublisher(pipeline beat.Pipeline, tracer *apm.Tracer, cfg *PublisherConfig) (*Publisher, error) { if err := cfg.Validate(); err != nil { return nil, errors.Wrap(err, "invalid config") } - processingCfg := beat.ProcessingConfig{ - Fields: common.MapStr{ - "observer": common.MapStr{ - "type": cfg.Info.Beat, - "hostname": cfg.Info.Hostname, - "version": cfg.Info.Version, - "version_major": 8, - "id": cfg.Info.ID.String(), - "ephemeral_id": cfg.Info.EphemeralID.String(), - }, - }, - } + processingCfg := beat.ProcessingConfig{Processor: cfg.Processor} if cfg.Pipeline != "" { processingCfg.Meta = map[string]interface{}{"pipeline": cfg.Pipeline} } p := &Publisher{ - tracer: tracer, - stopped: make(chan struct{}), - waitPublished: newWaitPublishedAcker(), - transformConfig: cfg.TransformConfig, + tracer: tracer, + stopped: make(chan struct{}), // One request will be actively processed by the // worker, while the other concurrent requests will be buffered in the queue. @@ -114,7 +103,6 @@ func NewPublisher(pipeline beat.Pipeline, tracer *apm.Tracer, cfg *PublisherConf client, err := pipeline.ConnectWith(beat.ClientConfig{ PublishMode: beat.GuaranteedSend, Processing: processingCfg, - ACKHandler: p.waitPublished, }) if err != nil { return nil, err @@ -142,7 +130,9 @@ func NewPublisher(pipeline beat.Pipeline, tracer *apm.Tracer, cfg *PublisherConf // indefinitely. // // The worker will drain the queue on shutdown, but no more requests will be -// published after Stop returns. +// published after Stop returns. Events may still exist in the libbeat pipeline +// after Stop returns; the caller is responsible for installing an ACKer as +// necessary. func (p *Publisher) Stop(ctx context.Context) error { // Prevent additional requests from being enqueued. p.mu.Lock() @@ -156,16 +146,12 @@ func (p *Publisher) Stop(ctx context.Context) error { // important here: // (1) wait for pendingRequests to be drained and published (p.stopped) // (2) close the beat.Client to prevent more events being published - // (3) wait for published events to be acknowledged select { case <-ctx.Done(): return ctx.Err() case <-p.stopped: } - if err := p.client.Close(); err != nil { - return err - } - return p.waitPublished.Wait(ctx) + return p.client.Close() } // Send tries to forward pendingReq to the publishers worker. If the queue is full, @@ -173,10 +159,6 @@ func (p *Publisher) Stop(ctx context.Context) error { // // Calling Send after Stop will return an error without enqueuing the request. func (p *Publisher) Send(ctx context.Context, req PendingReq) error { - if len(req.Transformables) == 0 { - return nil - } - p.mu.RLock() defer p.mu.RUnlock() if p.stopping { @@ -210,17 +192,8 @@ func (p *Publisher) processPendingReq(ctx context.Context, req PendingReq) { defer tx.End() ctx = apm.ContextWithTransaction(ctx, tx) } - - for _, transformable := range req.Transformables { - events := transformTransformable(ctx, transformable, p.transformConfig) - span := tx.StartSpan("PublishAll", "Publisher", nil) - p.client.PublishAll(events) - span.End() - } -} - -func transformTransformable(ctx context.Context, t transform.Transformable, cfg *transform.Config) []beat.Event { - span, ctx := apm.StartSpan(ctx, "Transform", "Publisher") + events := req.Transformable.Transform(ctx) + span := tx.StartSpan("PublishAll", "Publisher", nil) defer span.End() - return t.Transform(ctx, cfg) + p.client.PublishAll(events) } diff --git a/publish/pub_test.go b/publish/pub_test.go index 7f4856d96c1..e206eee2e59 100644 --- a/publish/pub_test.go +++ b/publish/pub_test.go @@ -36,7 +36,6 @@ import ( "github.com/elastic/beats/v7/libbeat/publisher/queue/memqueue" "github.com/elastic/apm-server/publish" - "github.com/elastic/apm-server/transform" ) func TestPublisherStop(t *testing.T) { @@ -44,9 +43,7 @@ func TestPublisherStop(t *testing.T) { // so we can simulate a pipeline that blocks indefinitely. pipeline := newBlockingPipeline(t) publisher, err := publish.NewPublisher( - pipeline, apmtest.DiscardTracer, &publish.PublisherConfig{ - TransformConfig: &transform.Config{}, - }, + pipeline, apmtest.DiscardTracer, &publish.PublisherConfig{}, ) require.NoError(t, err) defer func() { @@ -61,9 +58,7 @@ func TestPublisherStop(t *testing.T) { // time has elapsed. for { err := publisher.Send(context.Background(), publish.PendingReq{ - Transformables: []transform.Transformable{makeTransformable( - beat.Event{Fields: make(common.MapStr)}, - )}, + Transformable: makeTransformable(beat.Event{Fields: make(common.MapStr)}), }) if err == publish.ErrFull { break @@ -91,9 +86,7 @@ func TestPublisherStopShutdownInactive(t *testing.T) { publisher, err := publish.NewPublisher( newBlockingPipeline(t), apmtest.DiscardTracer, - &publish.PublisherConfig{ - TransformConfig: &transform.Config{}, - }, + &publish.PublisherConfig{}, ) require.NoError(t, err) @@ -119,16 +112,16 @@ func newBlockingPipeline(t testing.TB) *pipeline.Pipeline { return pipeline } -func makeTransformable(events ...beat.Event) transform.Transformable { - return transformableFunc(func(ctx context.Context, cfg *transform.Config) []beat.Event { +func makeTransformable(events ...beat.Event) publish.Transformer { + return transformableFunc(func(ctx context.Context) []beat.Event { return events }) } -type transformableFunc func(context.Context, *transform.Config) []beat.Event +type transformableFunc func(context.Context) []beat.Event -func (f transformableFunc) Transform(ctx context.Context, cfg *transform.Config) []beat.Event { - return f(ctx, cfg) +func (f transformableFunc) Transform(ctx context.Context) []beat.Event { + return f(ctx) } type mockClient struct{} diff --git a/sampling/sampling.go b/sampling/sampling.go index 2504b750974..60ab9b5e909 100644 --- a/sampling/sampling.go +++ b/sampling/sampling.go @@ -21,7 +21,6 @@ import ( "context" "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/publish" "github.com/elastic/beats/v7/libbeat/monitoring" ) @@ -30,30 +29,28 @@ var ( transactionsDroppedCounter = monitoring.NewInt(monitoringRegistry, "transactions_dropped") ) -// NewDiscardUnsampledReporter returns a publish.Reporter which discards -// unsampled transactions before deferring to reporter. +// NewDiscardUnsampledBatchProcessor returns a model.BatchProcessor which +// discards unsampled transactions. // -// The returned publish.Reporter does not guarantee order preservation of -// reported events. -func NewDiscardUnsampledReporter(reporter publish.Reporter) publish.Reporter { - return func(ctx context.Context, req publish.PendingReq) error { - var dropped int64 - events := req.Transformables +// The returned model.BatchProcessor does not guarantee order preservation +// of events retained in the batch. +func NewDiscardUnsampledBatchProcessor() model.BatchProcessor { + return model.ProcessBatchFunc(func(ctx context.Context, batch *model.Batch) error { + events := *batch for i := 0; i < len(events); { - tx, ok := events[i].(*model.Transaction) - if !ok || tx.Sampled == nil || *tx.Sampled { + event := events[i] + if event.Processor != model.TransactionProcessor || event.Transaction == nil || event.Transaction.Sampled { i++ continue } - n := len(req.Transformables) + n := len(events) events[i], events[n-1] = events[n-1], events[i] events = events[:n-1] - dropped++ } - if dropped > 0 { - transactionsDroppedCounter.Add(dropped) + if dropped := len(*batch) - len(events); dropped > 0 { + transactionsDroppedCounter.Add(int64(dropped)) } - req.Transformables = events - return reporter(ctx, req) - } + *batch = events + return nil + }) } diff --git a/sampling/sampling_test.go b/sampling/sampling_test.go index 16452b3296a..fecaa40db55 100644 --- a/sampling/sampling_test.go +++ b/sampling/sampling_test.go @@ -22,42 +22,54 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/publish" "github.com/elastic/apm-server/sampling" - "github.com/elastic/apm-server/transform" "github.com/elastic/beats/v7/libbeat/monitoring" ) -func TestNewDiscardUnsampledReporter(t *testing.T) { - var reported []transform.Transformable - reporter := sampling.NewDiscardUnsampledReporter( - func(ctx context.Context, req publish.PendingReq) error { - reported = req.Transformables - return nil - }, - ) +func TestNewDiscardUnsampledBatchProcessor(t *testing.T) { + batchProcessor := sampling.NewDiscardUnsampledBatchProcessor() - t1 := &model.Transaction{} - t2 := &model.Transaction{Sampled: newBool(false)} - t3 := &model.Transaction{Sampled: newBool(true)} + t1 := &model.Transaction{Sampled: false} + t2 := &model.Transaction{Sampled: true} span := &model.Span{} + t3 := &model.Transaction{Sampled: false} + t4 := &model.Transaction{Sampled: true} + + batch := model.Batch{{ + Processor: model.TransactionProcessor, + Transaction: t1, + }, { + Processor: model.TransactionProcessor, + Transaction: t2, + }, { + Processor: model.SpanProcessor, + Span: span, + // Transaction.Sampled should be disregarded, + // as Processor == SpanProcessor, i.e. this is + // a span event with transaction fields. + Transaction: &model.Transaction{}, + }, { + Processor: model.TransactionProcessor, + Transaction: t3, + }, { + Processor: model.TransactionProcessor, + Transaction: t4, + }} - reporter(context.Background(), publish.PendingReq{ - Transformables: []transform.Transformable{t1, t2, t3, span}, - }) + err := batchProcessor.ProcessBatch(context.Background(), &batch) + assert.NoError(t, err) - // Note that t3 gets sent to the back of the slice; - // this reporter is not order-preserving. - require.Len(t, reported, 3) - assert.Equal(t, t1, reported[0]) - assert.Equal(t, span, reported[1]) - assert.Equal(t, t3, reported[2]) + // Note: this processor is not order-preserving. + assert.Equal(t, model.Batch{ + {Processor: model.TransactionProcessor, Transaction: t4}, + {Processor: model.TransactionProcessor, Transaction: t2}, + {Processor: model.SpanProcessor, Span: span, Transaction: &model.Transaction{}}, + }, batch) expectedMonitoring := monitoring.MakeFlatSnapshot() - expectedMonitoring.Ints["transactions_dropped"] = 1 + expectedMonitoring.Ints["transactions_dropped"] = 2 snapshot := monitoring.CollectFlatSnapshot( monitoring.GetRegistry("apm-server.sampling"), @@ -66,7 +78,3 @@ func TestNewDiscardUnsampledReporter(t *testing.T) { ) assert.Equal(t, expectedMonitoring, snapshot) } - -func newBool(v bool) *bool { - return &v -} diff --git a/script/are_kibana_saved_objects_updated.py b/script/are_kibana_saved_objects_updated.py deleted file mode 100755 index 3a1ecf28dd5..00000000000 --- a/script/are_kibana_saved_objects_updated.py +++ /dev/null @@ -1,197 +0,0 @@ -#!/usr/bin/env python3 - -from __future__ import print_function - -import argparse - -import requests -import os -import json -import jsondiff -import sys -try: - from urlparse import urljoin, urlparse -except ImportError: - from urllib.parse import urljoin, urlparse - - -def json_val(v1, v2): - try: - return json.loads(v1), json.loads(v2) - except: - return v1, v2 - - -def find_key(item): - if "id" in item: - return "id" - elif "name" in item: - return "name" - elif "type" in item: - return "type" - elif "query" in item: - return "query" - elif "value" in item: - return "value" - else: - return "" - - -def find_item(inp, key, val): - for entry in inp: - if not isinstance(entry, dict): - return "" - if key in entry and entry[key] == val: - return entry - return "" - - -def build_key(k1, k2): - if k1 == "": - return k2 - if k2 == "": - return k1 - return "{}.{}".format(k1, k2) - - -def iterate(val_id, key, v1, v2, apm_v1=True): - ret_val = 0 - if isinstance(v1, dict) and isinstance(v2, dict): - for k, v in v1.items(): - ret_val = max(ret_val, iterate(val_id, build_key(key, k), *json_val(v, v2[k] if k in v2 else ""), - apm_v1=apm_v1)) - if ret_val == 0: - for k, v in v2.items(): - ret_val = max(ret_val, iterate(val_id, build_key(key, k), *json_val(v, v1[k] if k in v1 else ""), - apm_v1=not apm_v1)) - elif isinstance(v1, list) and isinstance(v2, list): - v1, v2 = json_val(v1, v2) - # assumption: an array only contains items of same type - if len(v1) > 0 and isinstance(v1[0], dict): - for item in v1: - qkey = find_key(item) - if qkey == "": - print("Script is missing type to compare {}".format(item)) - return 3 - - item2 = find_item(v2, qkey, item[qkey]) - ret_val = max(ret_val, iterate(val_id, build_key(key, "{}={}".format(qkey, item[qkey])), item, item2, - apm_v1=apm_v1)) - else: - v1, v2 = sorted(v1), sorted(v2) - for item1, item2 in zip(v1, v2): - ret_val = max(ret_val, iterate(val_id, key, *json_val(item1, item2), apm_v1=apm_v1)) - else: - d = jsondiff.JsonDiffer(syntax='symmetric').diff(*json_val(v1, v2)) - if d: - if key == "attributes.title" or key == "attributes.fields.name=transaction.marks.*.*": - return ret_val - ret_val = 2 - print("Difference for id '{}' for key '{}'".format(val_id, key)) - try: - print(json.dumps(d, indent=4)) - except: - print(d) - v1_label, v2_label = "APM Server", "Kibana" - if not apm_v1: - v1_label, v2_label = v2_label, v1_label - print("Value in {}: {!r}".format(v1_label, v1)) - print("Value in {}: {!r}".format(v2_label, v2)) - print("---") - return ret_val - - -def get_kibana_commit(branch): - """ - Looks up an open PR in Kibana against `branch`, and with 'apm', 'update', and 'index pattern' in the title (case insensitive). - If found, it is assumed to be a PR updating the Kibana index pattern - so this tests compares the content against - the one in that PR - Limitations: - - `index_pattern.json` must be found in HEAD (so in case of being amended, it needs to be force-pushed) - - returns the last PR open against a given branch, which might be wrong if there are several updated at a time. - """ - # TODO(axw) outsource to PyGitHub - rsp = requests.get("https://api.github.com/repos/elastic/kibana/pulls") - while rsp.status_code == 200: - for pr in rsp.json(): - matches_branch = pr['base']['ref'] == branch - matches_index_pattern_update = all( - token in pr['title'].lower() for token in ['apm', 'update', 'index pattern']) - if matches_branch and matches_index_pattern_update: - return pr['head']['sha'] - # Parse "next" link - links = requests.utils.parse_header_links(rsp.headers['link']) - links = dict((link['rel'], link['url']) for link in links) - next_url = links.get('next', None) - if next_url is None: - break - rsp = requests.get(next_url) - return None - - -def load_kibana_index_pattern_file(p): - with open(p) as f: - return json.load(f) - - -def load_kibana_index_pattern_url(p): - rsp = requests.get(p) - rsp.raise_for_status() - return rsp.json() - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument('--branch', default='master') - parser.add_argument('-I', '--index-pattern', - default='src/plugins/apm_oss/server/tutorial/index_pattern.json', - help='index-pattern file path') - parser.add_argument('-P', '--repo-path', - default='https://raw.githubusercontent.com/elastic/kibana/', - help='base repository path') - parser.add_argument('-C', '--commit', help='Commit sha to get the index-pattern from') - parser.add_argument("gen_index_pattern", type=argparse.FileType(mode="r"), help="expected index pattern") - args = parser.parse_args() - - # load expected kibana index pattern from url or local file - if args.repo_path.startswith("file://"): - parsed = urlparse(args.repo_path) - path = os.path.join(parsed.path, args.index_pattern) - load_kibana_index_pattern = load_kibana_index_pattern_file - else: - ref = args.commit - if ref is None: - ref = get_kibana_commit(args.branch) - if ref is None: - ref = args.branch - path = urljoin(args.repo_path, "/".join([ref, args.index_pattern])) - load_kibana_index_pattern = load_kibana_index_pattern_url - - # load index pattern sync'd to kibana - print("---- Comparing Generated Index Pattern with " + path) - sync_index_pattern = load_kibana_index_pattern(path) - - # load generated index pattern - gen_index_pattern = json.load(args.gen_index_pattern)["objects"][0] - gen_index_pattern["attributes"].pop("title") # detects title set in sync'd pattern - - # decode fields, they are json - sync_index_pattern["attributes"]["fields"] = json.loads(sync_index_pattern["attributes"].pop("fields")) - gen_index_pattern["attributes"]["fields"] = json.loads(gen_index_pattern["attributes"].pop("fields")) - - exit_val = 0 - exit_val = max(exit_val, iterate(sync_index_pattern["id"], "", gen_index_pattern, sync_index_pattern)) - - # double check that there is no difference - if exit_val == 0: - d = jsondiff.JsonDiffer(syntax='symmetric').diff(sync_index_pattern, gen_index_pattern) - if d: - print("index patterns differ: ", d) - return 5 - print("up-to-date") - - return exit_val - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/script/autopep8_all.sh b/script/autopep8_all.sh new file mode 100755 index 00000000000..539c607a2ab --- /dev/null +++ b/script/autopep8_all.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +AUTOPEP8FLAGS=--max-line-length=120 + +exec find . -name '*.py' -and -not -path '*build/*' -exec autopep8 $AUTOPEP8FLAGS $* '{}' + diff --git a/script/check_docker_compose.sh b/script/check_docker_compose.sh new file mode 100755 index 00000000000..5dab6bd706c --- /dev/null +++ b/script/check_docker_compose.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +# +# This checks that image versions defined in docker-compose.yml are +# up to date for the given branch name (master, 7.x, 7.13, etc.) +# +# Example usage: ./check_docker_compose.sh 7.x +set -e + +SDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +BRANCH=$* +LATEST_SNAPSHOT_VERSION=$($SDIR/latest_snapshot_version.py $BRANCH) + +# latest_snapshot_version.py returns "" if $BRANCH has no snapshot yet. +# If no snapshot is available, just exit. +[ -n "$LATEST_SNAPSHOT_VERSION" ] || exit 0 + +# Check docker.elastic.co images listed in docker-compose.yml are up to date. +# Ignore any images that don't end with "-SNAPSHOT", such as package-registry. +IMAGES=$(grep 'image: docker.elastic.co.*-SNAPSHOT' $SDIR/../docker-compose.yml | sed 's/.*image: \(.*\)/\1/') +for IMAGE in $IMAGES; do + # When using pinned snapshot versions the format is ..--SNAPSHOT + # therefore disregard the "-" + IMAGE_TAG=$(echo "$IMAGE" | cut -d: -f2 | sed 's#\(.*\)-\(.*\)-\(SNAPSHOT\)#\1-\3#') + + if [ "$IMAGE_TAG" = "$LATEST_SNAPSHOT_VERSION" ]; then + printf "docker-compose.yml: image %s up to date (latest '%s' snapshot version %s)\n" "$IMAGE" "$BRANCH" "$LATEST_SNAPSHOT_VERSION" + else + printf "docker-compose.yml: image %s is out of date (latest '%s' snapshot version is %s)\n" "$IMAGE" "$BRANCH" "$LATEST_SNAPSHOT_VERSION" + exit 1 + fi +done diff --git a/script/check_goimports.sh b/script/check_goimports.sh new file mode 100755 index 00000000000..fd8a4c73e9a --- /dev/null +++ b/script/check_goimports.sh @@ -0,0 +1,9 @@ +#!/bin/sh +set -e + +out=$(GOIMPORTSFLAGS=-l ./script/goimports.sh) +if [ -n "$out" ]; then + out=$(echo $out | sed 's/ /\n - /') + printf "goimports differs:\n - $out\n" >&2 + exit 1 +fi diff --git a/script/common.bash b/script/common.bash index 76c31b703f6..b59d4114e8f 100644 --- a/script/common.bash +++ b/script/common.bash @@ -41,11 +41,26 @@ get_go_version() { setup_go_root() { local version=${1} - # Setup GOROOT and add go to the PATH. - GIMME=${_sdir}/gimme/gimme - debug "Gimme version $(${GIMME} version)" - ${GIMME} "${version}" > /dev/null - source "${HOME}/.gimme/envs/go${version}.env" 2> /dev/null + # Use the current Go installation if the given Go version is already + # installed and configured. + if command -v go &>/dev/null ; then + debug "Found Go. Checking version..." + FOUND_GO_VERSION=$(go version|awk '{print $3}'|sed s/go//) + if [ "$FOUND_GO_VERSION" == "$version" ] ; then + debug "Versions match. No need to install Go. Exiting." + FOUND_GO="true" + fi + fi + + # Install Go with gimme in case the given Go version is not + # installed. + if [ -z $FOUND_GO ] ; then + # Setup GOROOT and add go to the PATH. + GIMME=${_sdir}/gimme/gimme + debug "Gimme version $(${GIMME} version)" + ${GIMME} "${version}" > /dev/null + source "${HOME}/.gimme/envs/go${version}.env" 2> /dev/null + fi debug "$(go version)" } diff --git a/script/copy_package.py b/script/copy_package.py new file mode 100644 index 00000000000..51054d00a3a --- /dev/null +++ b/script/copy_package.py @@ -0,0 +1,91 @@ +from functools import cmp_to_key + +import argparse +import os +import shutil +import subprocess +import sys + +import yaml + + +def semver_sorter(a, b): + a_list = a.split("-") + b_list = b.split("-") + version_cmp = trivial_cmp(a_list[0], b_list[0]) + if version_cmp != 0: + return version_cmp + if len(a_list) == 1: + return 1 + if len(b_list) == 1: + return -1 + return trivial_cmp(a_list[1], b_list[1]) + + +def trivial_cmp(a, b): + if a > b: + return 1 + elif b > a: + return -1 + return 0 + + +def bump(v): + tokens = v.split(".") + tokens[-1] = str(int(tokens[-1]) + 1) + return ".".join(tokens) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--dst', help='directory of the package-storage repo', default="../../package-storage") + parser.add_argument('--final', action='store_true') + parser.add_argument('--dry', action='store_true', help='dont copy data') + args = parser.parse_args() + + src = "../apmpackage/apm/" + + # Get the version from manifest.yml + with open(src + 'manifest.yml', 'r') as f: + manifest = yaml.safe_load(f) + original_version = manifest['version'] + + # find and sort published versions + dst = os.path.join(args.dst, "packages/apm/") + published_versions = [os.path.basename(f) for f in os.listdir(dst)] + published_versions.sort(key=cmp_to_key(semver_sorter)) + published_versions.reverse() + + # resolve the next version + # only patch and dev might be automatically bumped + next_version = original_version + for published in published_versions: + if original_version == published: + raise Exception("Version already published") + if original_version in published: + if not args.final: + # development version released, bump it + # eg. 0.1.0-dev.3 -> 0.1.0-dev.4 + next_version = bump(published) + break + + if next_version == original_version and not args.final: + # version never released, create the first development version out of it + next_version = next_version + "-dev.1" + + dst = os.path.join(dst, next_version) + print("from " + src + " to " + dst) + + if args.dry: + sys.exit(0) + + # copy over the package and replace version in manifest and pipeline names + shutil.copytree(src, dst) + subprocess.check_call('rm -rf {0}'.format(os.path.join(dst, 'README.template.md')), shell=True) + cmd = 'find {0} -not -name "*.png" -type f -exec sed -i -e "s/{1}/{2}/g" {{}} \\;'.format( + dst, original_version, next_version) + + out = subprocess.check_output(cmd, shell=True) + if out: + print(out) + print("Done") diff --git a/script/generate_notice.py b/script/generate_notice.py index 353208fc0fb..09de90c917e 100644 --- a/script/generate_notice.py +++ b/script/generate_notice.py @@ -12,44 +12,39 @@ import subprocess import fnmatch import textwrap +import sys +import tempfile DEFAULT_BUILD_TAGS = "darwin,linux,windows" -COPYRIGHT_YEAR_BEGIN = "2017" +# Get the beats repo root directory, making sure it's downloaded first. +subprocess.run(["go", "mod", "download", "github.com/elastic/beats/..."], check=True) +BEATS_DIR = subprocess.check_output( + ["go", "list", "-m", "-f", "{{.Dir}}", "github.com/elastic/beats/..."]).decode("utf-8").strip() +# notice_overrides holds additional overrides entries for go-licence-detector. +notice_overrides = [ + {"name": "github.com/elastic/beats/v7", "licenceType": "Elastic"}, + {"name": "github.com/golang/glog", "licenceType": "Apache-2.0"} +] -def read_file(filename): - if not os.path.isfile(filename): - print("File not found {}".format(filename)) - return "" - with open(filename, 'r', encoding='utf-8') as f: - return f.read() +# Additional third-party, non-source code dependencies, to add to the CSV output. +additional_third_party_deps = [{ + "name": "Red Hat Universal Base Image minimal", + "version": "8", + "url": "https://catalog.redhat.com/software/containers/ubi8/ubi-minimal/5c359a62bed8bd75a2c3fba8", + "license": "Custom;https://www.redhat.com/licenses/EULA_Red_Hat_Universal_Base_Image_English_20190422.pdf", + "sourceURL": "https://oss-dependencies.elastic.co/red-hat-universal-base-image-minimal/8/ubi-minimal-8-source.tar.gz", +}] def read_go_deps(main_packages, build_tags): """ - read_go_deps returns a dictionary of modules, with the module path - as the key and the value being a dictionary holding information about - the module. Main modules are excluded; only dependencies are returned. - - The module dict holds the following keys: - - Dir (required) - Local filesystem directory holding the module contents. - e.g. "$HOME/go/pkg/mod/github.com/elastic/go-txfile@v0.0.7" - - - Path (required) - Module path. e.g. "github.com/elastic/beats". - - - Replacement (optional) - Replacement module path. e.g. "../beats", or "github.com/elastic/sarama". - - - Version (optional) - Module version, excluding timestamp/revision and +incompatible suffix. - If the module has a replacement, this holds the replacement module's version. + read_go_deps returns a list of module dependencies in JSON format. + Main modules are excluded; only dependencies are returned. - - Revision (optional) - VCS revision hash, extracted from module version. - If the module has a replacement, this holds the replacement module's revision. + Unlike `go list -m all`, this function excludes modules that are only + required for running tests. """ go_list_args = ["go", "list", "-deps", "-json"] if build_tags: @@ -63,317 +58,91 @@ def read_go_deps(main_packages, build_tags): break pkg, end = decoder.raw_decode(output) output = output[end:] - if 'Standard' in pkg: continue - module = pkg['Module'] - modpath = module['Path'] - if "Main" in module or modpath in modules: - continue - - modules[modpath] = module - version = module["Version"] - replace = module.get("Replace", None) - del(module["Version"]) - if replace: - module["Replacement"] = replace["Path"] - # Modules with local-filesystem replacements have no version. - version = replace.get("Version", None) - - if version: - i = version.rfind("+incompatible") - if i > 0: - version = version[:i] - version_parts = version.split("-") - if len(version_parts) == 3: # version-timestamp-revision - version = version_parts[0] - module["Revision"] = version_parts[2] - if version != "v0.0.0": - module["Version"] = version - - return modules - - -def gather_modules(main_packages, build_tags): - modules = read_go_deps(main_packages, build_tags) - - # Look for a license file in the top-level directory of each module. - for modpath, module in modules.items(): - moddir = module['Dir'] - filenames = os.listdir(moddir) - for filename in get_licenses(filenames): - license = {} - license_path = os.path.join(moddir, filename) - license["license_file"] = filename - license["license_contents"] = read_file(license_path) - license["license_summary"] = detect_license_summary(license["license_contents"]) - - notice_filenames = fnmatch.filter(filenames, "NOTICE*") - license["notice_files"] = { - filename: read_file(os.path.join(moddir, filename)) for filename in notice_filenames - } - - if license["license_summary"] == "UNKNOWN": - print("WARNING: Unknown license for {}: {}".format(modpath, license_path)) - module["licenses"] = module.get("licenses", []) + [license] - - return modules - - -def get_licenses(filenames): - licenses = [] - for filename in sorted(filenames): - if filename.startswith("LICENSE"): - if filename == "LICENSE.docs": - # Ignore docs-related licenses, such as CC-BY-SA-4.0. - continue - licenses.append(filename) - elif filename in ("COPYING",): - licenses.append(filename) - return licenses - - -def write_notice_file(f, modules, beat, copyright, skip_notice): - - now = datetime.datetime.now() - - # Add header - f.write("{}\n".format(beat)) - f.write("Copyright {}-{} {}\n".format(COPYRIGHT_YEAR_BEGIN, now.year, copyright)) - f.write("\n") - f.write("This product includes software developed by The Apache Software \n" + - "Foundation (http://www.apache.org/).\n\n") - - # Add licenses for 3rd party libraries - f.write("==========================================================================\n") - f.write("Third party libraries used by the {} project:\n".format(beat)) - f.write("==========================================================================\n\n") - - def maybe_write(dict_, key, print_key=None): - if key in dict_: - f.write("{}: {}\n".format(print_key or key, dict_.get(key))) - - # Sort licenses by package path, ignore upper / lower case - for modpath in sorted(modules, key=str.lower): - module = modules[modpath] - for license in module.get("licenses", []): - f.write("\n--------------------------------------------------------------------\n") - f.write("Dependency: {}\n".format(modpath)) - maybe_write(module, "Replacement", "Replacement") - maybe_write(module, "Version") - maybe_write(module, "Revision") - f.write("License type (autodetected): {}\n".format(license["license_summary"])) - if license["license_summary"] != "Apache-2.0": - f.write("Contents of \"{}\":\n".format(license["license_file"])) - f.write("\n") - f.write(textwrap.indent(license["license_contents"].rstrip(" \n"), " ")) - f.write("\n") - elif not any([fnmatch.fnmatch(modpath, pattern) for pattern in skip_notice]): - # It's an Apache License, so include only the NOTICE file - for notice_file, notice_contents in license["notice_files"].items(): - f.write("Contents of \"{}\":\n".format(notice_file)) - f.write("\n") - f.write(textwrap.indent(notice_contents.rstrip(" \n"), " ")) - f.write("\n") - - -def write_csv_file(f, modules): - def get_url(modpath): - domain = modpath.split("/", 1)[0] - if domain in ("github.com", "go.elastic.co", "go.uber.org", "golang.org", "google.golang.org", "gopkg.in"): - return "https://{}".format(modpath) - return modpath - - csvwriter = csv.writer(f) - csvwriter.writerow(["name", "url", "version", "revision", "license"]) - for modpath in sorted(modules, key=str.lower): - module = modules[modpath] - for license in module.get("licenses", []): - csvwriter.writerow([ - modpath, - get_url(modpath), - module.get("Version", ""), - module.get("Revision", ""), - license["license_summary"], - ]) - - -APACHE2_LICENSE_TITLES = [ - "Apache License 2.0", - "Apache License Version 2.0", - "Apache License, Version 2.0", - "licensed under the Apache 2.0 license", # github.com/zmap/zcrypto - re.sub(r"\s+", " ", """Apache License - ============== - - _Version 2.0, January 2004_"""), -] + if "Main" not in module: + modules[module['Path']] = module + return sorted(modules.values(), key=lambda module: module['Path']) + + +def go_license_detector(notice_out, deps_out, modules): + modules_json = "\n".join(map(json.dumps, modules)) + + beats_deps_template_path = os.path.join(BEATS_DIR, "dev-tools", "notice", "dependencies.csv.tmpl") + beats_notice_template_path = os.path.join(BEATS_DIR, "dev-tools", "notice", "NOTICE.txt.tmpl") + beats_overrides_path = os.path.join(BEATS_DIR, "dev-tools", "notice", "overrides.json") + beats_rules_path = os.path.join(BEATS_DIR, "dev-tools", "notice", "rules.json") + + beats_notice_template = open(beats_notice_template_path).read() + beats_overrides = open(beats_overrides_path).read() + + with tempfile.TemporaryDirectory() as tmpdir: + # Create notice overrides.json by combining the overrides from beats with apm-server specific ones. + overrides_file = open(os.path.join(tmpdir, "overrides.json"), "w") + overrides_file.write(beats_overrides) + overrides_file.write("\n") + for entry in notice_overrides: + overrides_file.write("\n") + json.dump(entry, overrides_file) + overrides_file.close() + + # Replace "Elastic Beats" with "Elastic APM Server" in the NOTICE.txt template. + notice_template_file = open(os.path.join(tmpdir, "NOTICE.txt.tmpl"), "w") + notice_template_file.write(beats_notice_template.replace("Elastic Beats", "Elastic APM Server")) + notice_template_file.close() + + args = [ + "go", "run", "-modfile=tools/go.mod", "go.elastic.co/go-licence-detector", + "-includeIndirect", + "-overrides", overrides_file.name, + "-rules", beats_rules_path, + "-noticeTemplate", notice_template_file.name, + "-depsTemplate", beats_deps_template_path, + "-noticeOut", notice_out, + "-depsOut", deps_out, + ] + subprocess.run(args, check=True, input=modules_json.encode("utf-8")) + + +def write_notice_file(notice_filename, modules): + go_license_detector(notice_filename, "", modules) + + +def write_csv_file(csv_filename, modules): + with tempfile.TemporaryDirectory() as tmpdir: + tmp_deps_path = os.path.join(tmpdir, "dependencies.csv") + go_license_detector("", tmp_deps_path, modules) + rows = [] + fieldnames = [] + with open(tmp_deps_path) as csvfile: + reader = csv.DictReader(csvfile) + fieldnames = reader.fieldnames + rows = [row for row in reader] + with open(csv_filename, "w") as csvfile: + writer = csv.DictWriter(csvfile, fieldnames) + writer.writeheader() + for row in rows: + writer.writerow(row) + for dep in additional_third_party_deps: + writer.writerow(dep) -MIT_LICENSES = [ - re.sub(r"\s+", " ", """Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - """), - re.sub(r"\s+", " ", """Permission to use, copy, modify, and distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies."""), - re.sub(r"\s+", " ", """Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - """), - re.sub(r"\s+", " ", """Permission is hereby granted, free of charge, to any person obtaining a copy of this -software and associated documentation files (the "Software"), to deal in the Software -without restriction, including without limitation the rights to use, copy, modify, -merge, publish, distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, -INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - """), -] - -BSD_LICENSE_CONTENTS = [ - re.sub(r"\s+", " ", """Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met:"""), - re.sub(r"\s+", " ", """Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer."""), - re.sub(r"\s+", " ", """Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. -""")] - -BSD_LICENSE_3_CLAUSE = [ - re.sub(r"\s+", " ", """Neither the name of"""), - re.sub(r"\s+", " ", """nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission.""") -] - -BSD_LICENSE_4_CLAUSE = [ - re.sub(r"\s+", " ", """All advertising materials mentioning features or use of this software - must display the following acknowledgement"""), -] - -CC_SA_4_LICENSE_TITLE = [ - "Creative Commons Attribution-ShareAlike 4.0 International" -] - -LGPL_3_LICENSE_TITLE = [ - "GNU LESSER GENERAL PUBLIC LICENSE Version 3" -] - -MPL_LICENSE_TITLES = [ - "Mozilla Public License Version 2.0", - "Mozilla Public License, version 2.0" -] - -UNIVERSAL_PERMISSIVE_LICENSE_TITLES = [ - "The Universal Permissive License (UPL), Version 1.0" -] - -ISC_LICENSE_TITLE = [ - "ISC License", -] - -ELASTIC_LICENSE_TITLE = [ - "ELASTIC LICENSE AGREEMENT", -] - - -# return SPDX identifiers from https://spdx.org/licenses/ -def detect_license_summary(content): - # replace all white spaces with a single space - content = re.sub(r"\s+", ' ', content) - # replace smart quotes with less intelligent ones - content = content.replace('\xe2\x80\x9c', '"').replace('\xe2\x80\x9d', '"') - if any(sentence in content[0:1000] for sentence in APACHE2_LICENSE_TITLES): - return "Apache-2.0" - if any(sentence in content[0:1000] for sentence in MIT_LICENSES): - return "MIT" - if all(sentence in content[0:1000] for sentence in BSD_LICENSE_CONTENTS): - if all(sentence in content[0:1000] for sentence in BSD_LICENSE_3_CLAUSE): - if all(sentence in content[0:1000] for sentence in BSD_LICENSE_4_CLAUSE): - return "BSD-4-Clause" - return "BSD-3-Clause" - else: - return "BSD-2-Clause" - if any(sentence in content[0:300] for sentence in MPL_LICENSE_TITLES): - return "MPL-2.0" - if any(sentence in content[0:3000] for sentence in CC_SA_4_LICENSE_TITLE): - return "CC-BY-SA-4.0" - if any(sentence in content[0:3000] for sentence in LGPL_3_LICENSE_TITLE): - return "LGPL-3.0" - if any(sentence in content[0:1500] for sentence in UNIVERSAL_PERMISSIVE_LICENSE_TITLES): - return "UPL-1.0" - if any(sentence in content[0:1500] for sentence in ISC_LICENSE_TITLE): - return "ISC" - if any(sentence in content[0:1500] for sentence in ELASTIC_LICENSE_TITLE): - return "ELASTIC" - - return "UNKNOWN" - - -ACCEPTED_LICENSES = [ - "Apache-2.0", - "BSD-2-Clause", - "BSD-3-Clause", - "BSD-4-Clause", - "ISC", - "MIT", - "MPL-2.0", - "UPL-1.0", - "ELASTIC", -] if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Generate the NOTICE file from package dependencies") - parser.add_argument("-b", "--beat", default="Elastic Beats", - help="Beat name") - parser.add_argument("-c", "--copyright", default="Elasticsearch BV", - help="copyright owner") + parser.add_argument("main_package", nargs="*", default=["."], + help="List of main Go packages for which dependencies should be processed") parser.add_argument("--csv", dest="csvfile", help="Output to a csv file") - parser.add_argument("-s", "--skip-notice", default=[], action="append", - help="List of modules whose NOTICE file should be skipped") parser.add_argument("--build-tags", default=DEFAULT_BUILD_TAGS, help="Comma-separated list of build tags to pass to 'go list -deps'") - parser.add_argument("main_package", nargs="*", default=["."], - help="List of main Go packages for which dependencies should be processed") args = parser.parse_args() - - # Gather modules, and check that each one has an acceptable license. - modules = gather_modules(args.main_package, args.build_tags) - for modpath, module in modules.items(): - licenses = module.get("licenses", None) - if not licenses: - raise Exception("Missing license in module: {}".format(modpath)) - for license in licenses: - if license["license_summary"] not in ACCEPTED_LICENSES: - raise Exception("Dependency {} has invalid {} license: {}" - .format(modpath, license["license_summary"], license["license_file"])) + modules = read_go_deps(args.main_package, args.build_tags) if args.csvfile: - with open(args.csvfile, mode='w', encoding='utf-8') as f: - write_csv_file(f, modules) + write_csv_file(args.csvfile, modules) print(args.csvfile) else: notice_filename = os.path.abspath("NOTICE.txt") - with open(notice_filename, mode='w+', encoding='utf-8') as f: - write_notice_file(f, modules, args.beat, args.copyright, args.skip_notice) + write_notice_file(notice_filename, modules) print(notice_filename) diff --git a/script/goimports.sh b/script/goimports.sh new file mode 100755 index 00000000000..0074a6009ff --- /dev/null +++ b/script/goimports.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +dirs=$(find . -maxdepth 1 -type d \! \( -name '.*' -or -name build \)) +exec goimports $GOIMPORTSFLAGS -local github.com/elastic *.go $dirs diff --git a/script/inline_schemas/inline_schemas.go b/script/inline_schemas/inline_schemas.go deleted file mode 100644 index c96e18d53e5..00000000000 --- a/script/inline_schemas/inline_schemas.go +++ /dev/null @@ -1,117 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package main - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "path/filepath" - "regexp" - "strings" -) - -const basePath = "./docs/spec/" - -func main() { - schemaPaths := []struct { - path, schemaOut, varName string - }{ - {"sourcemaps/payload.json", "model/sourcemap/generated/schema/payload.go", "PayloadSchema"}, - {"metadata.json", "model/metadata/generated/schema/metadata.go", "ModelSchema"}, - {"rum_v3_metadata.json", "model/metadata/generated/schema/rum_v3_metadata.go", "RUMV3Schema"}, - {"errors/error.json", "model/error/generated/schema/error.go", "ModelSchema"}, - {"transactions/transaction.json", "model/transaction/generated/schema/transaction.go", "ModelSchema"}, - {"spans/span.json", "model/span/generated/schema/span.go", "ModelSchema"}, - {"metricsets/metricset.json", "model/metricset/generated/schema/metricset.go", "ModelSchema"}, - {"errors/rum_v3_error.json", "model/error/generated/schema/rum_v3_error.go", "RUMV3Schema"}, - {"transactions/rum_v3_transaction.json", "model/transaction/generated/schema/rum_v3_transaction.go", "RUMV3Schema"}, - {"spans/rum_v3_span.json", "model/span/generated/schema/rum_v3_span.go", "RUMV3Schema"}, - {"metricsets/rum_v3_metricset.json", "model/metricset/generated/schema/rum_v3_metricset.go", "RUMV3Schema"}, - } - for _, schemaInfo := range schemaPaths { - file := filepath.Join(filepath.Dir(basePath), schemaInfo.path) - schemaBytes, err := ioutil.ReadFile(file) - if err != nil { - panic(err) - } - - schema, err := replaceRef(filepath.Dir(file), string(schemaBytes)) - if err != nil { - panic(err) - } - - goScript := fmt.Sprintf("package schema\n\nconst %s = `%s`\n", schemaInfo.varName, schema) - err = os.MkdirAll(path.Dir(schemaInfo.schemaOut), os.ModePerm) - if err != nil { - panic(err) - } - err = ioutil.WriteFile(schemaInfo.schemaOut, []byte(goScript), 0644) - if err != nil { - panic(err) - } - } -} - -var re = regexp.MustCompile(`\"\$ref\": \"(.*?.json)\"`) -var findAndReplace = map[string]string{} - -func replaceRef(currentDir string, schema string) (string, error) { - matches := re.FindAllStringSubmatch(schema, -1) - for _, match := range matches { - pattern := escapePattern(match[0]) - if _, ok := findAndReplace[pattern]; !ok { - s, err := read(currentDir, match[1]) - if err != nil { - panic(err) - } - findAndReplace[pattern] = trimSchemaPart(s) - } - - re := regexp.MustCompile(pattern) - schema = re.ReplaceAllLiteralString(schema, findAndReplace[pattern]) - } - return schema, nil -} - -func read(currentRelativePath string, filePath string) (string, error) { - path := filepath.Join(currentRelativePath, filePath) - file, err := ioutil.ReadFile(path) - if err != nil { - return "", err - } - return replaceRef(filepath.Dir(path), string(file)) -} - -var reDollar = regexp.MustCompile(`\$`) -var reQuote = regexp.MustCompile(`\"`) - -func escapePattern(pattern string) string { - pattern = reDollar.ReplaceAllLiteralString(pattern, `\$`) - return reQuote.ReplaceAllLiteralString(pattern, `\"`) -} - -func trimSchemaPart(part string) string { - part = strings.Trim(part, "\n") - part = strings.Trim(part, "\b") - part = strings.TrimSuffix(part, "}") - part = strings.TrimPrefix(part, "{") - part = strings.Trim(part, "\n") - return strings.Trim(part, "\b") -} diff --git a/script/jenkins/ci.ps1 b/script/jenkins/ci.ps1 deleted file mode 100755 index 770d7699d6d..00000000000 --- a/script/jenkins/ci.ps1 +++ /dev/null @@ -1,67 +0,0 @@ -function Exec { - [CmdletBinding()] - param( - [Parameter(Mandatory = $true)] - [scriptblock]$cmd, - [string]$errorMessage = ($msgs.error_bad_command -f $cmd) - ) - - try { - $global:lastexitcode = 0 - & $cmd - if ($lastexitcode -ne 0) { - throw $errorMessage - } - } - catch [Exception] { - throw $_ - } -} - -# Setup Go. -$env:GOPATH = $env:WORKSPACE -$env:PATH = "$env:GOPATH\bin;C:\tools\mingw64\bin;$env:PATH" -& gvm --format=powershell $(Get-Content .go-version) | Invoke-Expression - -# Write cached magefile binaries to workspace to ensure -# each run starts from a clean slate. -$env:MAGEFILE_CACHE = "$env:WORKSPACE\.magefile" - -# Configure testing parameters. -$env:TEST_COVERAGE = "true" -$env:RACE_DETECTOR = "true" - -# Install mage from vendor. -exec { go install github.com/elastic/apm-server/vendor/github.com/magefile/mage } - -echo "Fetching testing dependencies" -# TODO (elastic/beats#5050): Use a vendored copy of this. -exec { go get github.com/docker/libcompose } - -if (Test-Path "build") { Remove-Item -Recurse -Force build } -New-Item -ItemType directory -Path build\coverage | Out-Null -New-Item -ItemType directory -Path build\system-tests | Out-Null -New-Item -ItemType directory -Path build\system-tests\run | Out-Null - -echo "Building fields.yml" -exec { mage fields } - -echo "Building $env:beat" -exec { mage build } "Build FAILURE" - -echo "Unit testing $env:beat" -exec { mage goTestUnit } - -echo "System testing $env:beat" -# Get a CSV list of package names. -$packages = $(go list ./... | select-string -Pattern "/vendor/" -NotMatch | select-string -Pattern "/scripts/cmd/" -NotMatch) -$packages = ($packages|group|Select -ExpandProperty Name) -join "," -exec { go test -race -c -cover -covermode=atomic -coverpkg $packages } "go test FAILURE" - -echo "Running python tests" -choco install python -y -r --no-progress --version 3.8.1.20200110 -refreshenv -$env:PATH = "C:\Python38;C:\Python38\Scripts;$env:PATH" -$env:PYTHON_ENV = "$env:TEMP\python-env" -python --version -exec { mage pythonUnitTest } "System test FAILURE" diff --git a/script/jenkins/sync.sh b/script/jenkins/sync.sh deleted file mode 100755 index 3aadb09e0fe..00000000000 --- a/script/jenkins/sync.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash -set -euox pipefail - -source ./script/common.bash - -jenkins_setup - -cleanup() { - rm -rf $TEMP_PYTHON_ENV -} -trap cleanup EXIT - -make are-kibana-objects-updated diff --git a/script/jenkins/test-install-packages.sh b/script/jenkins/test-install-packages.sh deleted file mode 100755 index b21b8e9ae43..00000000000 --- a/script/jenkins/test-install-packages.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env bash -set -xeuo pipefail - -export MAGEFILE_VERBOSE=1 -./build/linux/mage -v testPackagesInstall diff --git a/script/latest_snapshot_version.py b/script/latest_snapshot_version.py new file mode 100755 index 00000000000..a50d43f852e --- /dev/null +++ b/script/latest_snapshot_version.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python3 +# +# Find the latest snapshot version for the specified branch. +# +# Example usage: ./latest_snapshot_version.py 7.x + +import argparse +import requests + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('branch', type=str) + args = parser.parse_args() + + r = requests.get('https://snapshots.elastic.co/latest/{}.json'.format(args.branch)) + if r.status_code != 404: + r.raise_for_status() + print(r.json()['version']) + + +if __name__ == '__main__': + main() diff --git a/script/run_agent.sh b/script/run_agent.sh new file mode 100755 index 00000000000..783c0a90ef6 --- /dev/null +++ b/script/run_agent.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +# Builds an apm-server Linux binary and runs an Elastic Agent image bind-mounting apm-server in the correct directory +# It expects an Elastic Agent image to exist and apm-integration-testing running (Elasticsearch and Kibana) + +set -xe + +version=$(mage version) + +apmdir=apm-server-$version-linux-x86_64 +builddir=build/distributions/$apmdir +mkdir -p $builddir +cp -f LICENSE.txt NOTICE.txt README.md apm-server.yml $builddir +GOOS=linux go build -o $builddir/apm-server ./x-pack/apm-server + +imageid=$(docker image ls docker.elastic.co/beats/elastic-agent:"$version" -q) +sha=$(docker inspect "$imageid" | grep org.label-schema.vcs-ref | awk -F ": \"" '{print $2}' | head -c 6) +dst="/usr/share/elastic-agent/data/elastic-agent-${sha}/install/${apmdir}" + +docker run --name elastic-agent-local -it \ + --env FLEET_ENROLL=1 \ + --env FLEET_ENROLL_INSECURE=1 \ + --env FLEET_SETUP=1 \ + --env KIBANA_HOST="http://admin:changeme@kibana:5601" \ + --env KIBANA_PASSWORD="changeme" \ + --env KIBANA_USERNAME="admin" \ + --network apm-integration-testing \ + -v "$(pwd)/$builddir:$dst" \ + -p 8200:8200 --rm "${imageid}" diff --git a/script/update_kibana_objects.py b/script/update_kibana_objects.py deleted file mode 100755 index fee05cea401..00000000000 --- a/script/update_kibana_objects.py +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -import json -import tempfile -import shlex -import shutil -import subprocess -import sys -import os - - -def exec(cmd): - """ - Executes the given command and returns its result as a string - """ - try: - out = subprocess.check_output(shlex.split(cmd)) - except subprocess.CalledProcessError as e: - print(e) - sys.exit(1) - - return out.decode("utf-8") - - -def call(cmd): - """ - Executes the given command while showing progress in stdout / stderr - """ - code = subprocess.call(shlex.split(cmd)) - if code > 0: - sys.exit(code) - - -def main(branch): - """ - Updates the index pattern in kibana in the specified branch (default "master") - - NOTES: - - `make && make update` must have been run previously - """ - - apm_bin = os.path.abspath(os.path.join(os.path.dirname(__file__), "../apm-server")) - export = exec(apm_bin + " export index-pattern") - index_pattern = json.loads(export)["objects"][0] - - remote_url = exec("git config remote.origin.url") - gh_user = remote_url.split(":")[1].split("/")[0] - print("branch: " + branch) - print("github user: " + gh_user) - - path = tempfile.mkdtemp() - print("checking out kibana in temp dir " + path) - os.chdir(path) - call("git clone --depth 1 git@github.com:" + gh_user + "/kibana.git .") - call("git remote add elastic git@github.com:elastic/kibana.git") - call("git fetch elastic " + branch) - call("git checkout -b update-apm-index-pattern-" + branch + " elastic/" + branch) - call("git pull") - - kibana_file_path = "src/plugins/apm_oss/server/tutorial/index_pattern.json" - - with open(kibana_file_path, 'r+') as kibana_file: - data = json.load(kibana_file) - old_fields = set([item["name"] for item in json.loads(data["attributes"]["fields"])]) - new_fields = set([item["name"] for item in json.loads(index_pattern["attributes"]["fields"])]) - print("added fields :" + repr(new_fields.difference(old_fields))) - print("removed fields :" + repr(old_fields.difference(new_fields))) - - del index_pattern["attributes"]["title"] - kibana_file.seek(0) - kibana_file.write(json.dumps(index_pattern, indent=2, sort_keys=True)) - kibana_file.truncate() - - call("git add " + kibana_file_path) - call('git commit -m "update apm index pattern"') - call("git push --force origin update-apm-index-pattern-" + branch) - - print("removing " + path) - shutil.rmtree(path) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('-b', default='master', dest='branch') - args = parser.parse_args() - main(args.branch) diff --git a/script/vendor_otel.sh b/script/vendor_otel.sh new file mode 100644 index 00000000000..9f2fcae3586 --- /dev/null +++ b/script/vendor_otel.sh @@ -0,0 +1,18 @@ +#!/bin/sh + +set -xe + +go mod edit -dropreplace go.opentelemetry.io/collector +go mod download go.opentelemetry.io/collector + +REPO_ROOT=$(go list -m -f {{.Dir}} github.com/elastic/apm-server) +MIXIN_DIR=$REPO_ROOT/internal/.otel_collector_mixin +TARGET_DIR=$REPO_ROOT/internal/otel_collector +MODULE_DIR=$(go list -m -f {{.Dir}} go.opentelemetry.io/collector) + +rm -fr $TARGET_DIR +mkdir $TARGET_DIR +rsync -cr --no-perms --no-group --chmod=ugo=rwX --delete --exclude='*_test.go' $MODULE_DIR/* $TARGET_DIR +rsync -cr --no-perms --no-group --chmod=ugo=rwX $MIXIN_DIR/* $TARGET_DIR + +go mod edit -replace go.opentelemetry.io/collector=./internal/otel_collector diff --git a/sourcemap/es_store.go b/sourcemap/es_store.go index 58f921ea356..08bc356c6cf 100644 --- a/sourcemap/es_store.go +++ b/sourcemap/es_store.go @@ -25,12 +25,15 @@ import ( "io" "io/ioutil" "net/http" + "time" "github.com/pkg/errors" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/go-elasticsearch/v7/esapi" "github.com/elastic/apm-server/elasticsearch" + logs "github.com/elastic/apm-server/log" "github.com/elastic/apm-server/utility" ) @@ -40,7 +43,7 @@ const ( ) var ( - errMsgESFailure = "failure querying ES" + errMsgESFailure = errMsgFailure + " ES" errSourcemapWrongFormat = errors.New("Sourcemapping ES Result not in expected format") ) @@ -65,18 +68,32 @@ type esSourcemapResponse struct { } `json:"hits"` } +// NewElasticsearchStore returns an instance of Store for interacting with +// sourcemaps stored in ElasticSearch. +func NewElasticsearchStore( + c elasticsearch.Client, + index string, + expiration time.Duration, +) (*Store, error) { + logger := logp.NewLogger(logs.Sourcemap) + s := &esStore{c, index, logger} + + return newStore(s, logger, expiration) +} + func (s *esStore) fetch(ctx context.Context, name, version, path string) (string, error) { - statusCode, body, err := s.runSearchQuery(ctx, name, version, path) + resp, err := s.runSearchQuery(ctx, name, version, path) if err != nil { return "", errors.Wrap(err, errMsgESFailure) } - defer body.Close() + defer resp.Body.Close() + // handle error response - if statusCode >= http.StatusMultipleChoices { - if statusCode == http.StatusNotFound { + if resp.StatusCode >= http.StatusMultipleChoices { + if resp.StatusCode == http.StatusNotFound { return "", nil } - b, err := ioutil.ReadAll(body) + b, err := ioutil.ReadAll(resp.Body) if err != nil { return "", errors.Wrap(err, errMsgParseSourcemap) } @@ -84,17 +101,20 @@ func (s *esStore) fetch(ctx context.Context, name, version, path string) (string } // parse response - return parse(body, name, version, path, s.logger) + return parse(resp.Body, name, version, path, s.logger) } -func (s *esStore) runSearchQuery(ctx context.Context, name, version, path string) (int, io.ReadCloser, error) { - // build and encode the query +func (s *esStore) runSearchQuery(ctx context.Context, name, version, path string) (*esapi.Response, error) { var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(query(name, version, path)); err != nil { - return 0, nil, err + return nil, err + } + req := esapi.SearchRequest{ + Index: []string{s.index}, + Body: &buf, + TrackTotalHits: true, } - // Perform the runSearchQuery request. - return s.client.SearchQuery(ctx, s.index, &buf) + return req.Do(ctx, s.client) } func parse(body io.ReadCloser, name, version, path string, logger *logp.Logger) (string, error) { @@ -103,7 +123,7 @@ func parse(body io.ReadCloser, name, version, path string, logger *logp.Logger) return "", err } hits := esSourcemapResponse.Hits.Total.Value - if hits == 0 { + if hits == 0 || len(esSourcemapResponse.Hits.Hits) == 0 { return emptyResult, nil } diff --git a/sourcemap/es_store_test.go b/sourcemap/es_store_test.go index d59d5a29dde..e90605a483d 100644 --- a/sourcemap/es_store_test.go +++ b/sourcemap/es_store_test.go @@ -18,55 +18,60 @@ package sourcemap import ( + "bytes" "context" + "encoding/json" + "io" "net/http" + "net/http/httptest" "testing" + "time" "github.com/go-sourcemap/sourcemap" - + "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/apm-server/elasticsearch" - - "github.com/elastic/apm-server/elasticsearch/estest" logs "github.com/elastic/apm-server/log" - "github.com/elastic/apm-server/sourcemap/test" ) func Test_esFetcher_fetchError(t *testing.T) { for name, tc := range map[string]struct { - statusCode int - esBody map[string]interface{} - temporary bool + statusCode int + clientError bool + responseBody io.Reader + temporary bool }{ "es not reachable": { - statusCode: -1, temporary: true, + clientError: true, + temporary: true, }, "es bad request": { statusCode: http.StatusBadRequest, }, "empty sourcemap string": { - esBody: map[string]interface{}{ - "hits": map[string]interface{}{ - "total": map[string]interface{}{"value": 1}, - "hits": []map[string]interface{}{ - {"_source": map[string]interface{}{ - "sourcemap": map[string]interface{}{ - "sourcemap": ""}}}}}}, + statusCode: http.StatusOK, + responseBody: sourcemapSearchResponseBody(1, []map[string]interface{}{{ + "_source": map[string]interface{}{ + "sourcemap": map[string]interface{}{ + "sourcemap": "", + }, + }, + }}), }, } { t.Run(name, func(t *testing.T) { - statusCode := tc.statusCode - if statusCode == 0 { - statusCode = http.StatusOK + var client elasticsearch.Client + if tc.clientError { + client = newUnavailableElasticsearchClient(t) + } else { + client = newMockElasticsearchClient(t, tc.statusCode, tc.responseBody) } - client, err := estest.NewElasticsearchClient(estest.NewTransport(t, statusCode, tc.esBody)) - require.NoError(t, err) + consumer, err := testESStore(client).fetch(context.Background(), "abc", "1.0", "/tmp") - require.Error(t, err) if tc.temporary { assert.Contains(t, err.Error(), errMsgESFailure) } else { @@ -79,14 +84,27 @@ func Test_esFetcher_fetchError(t *testing.T) { func Test_esFetcher_fetch(t *testing.T) { for name, tc := range map[string]struct { - client elasticsearch.Client - filePath string + statusCode int + responseBody io.Reader + filePath string }{ - "no sourcemap found": {client: test.ESClientWithSourcemapNotFound(t)}, - "valid sourcemap found": {client: test.ESClientWithValidSourcemap(t), filePath: "bundle.js"}, + "no sourcemap found": { + statusCode: http.StatusNotFound, + responseBody: sourcemapSearchResponseBody(0, nil), + }, + "sourcemap indicated but not found": { + statusCode: http.StatusOK, + responseBody: sourcemapSearchResponseBody(1, []map[string]interface{}{}), + }, + "valid sourcemap found": { + statusCode: http.StatusOK, + responseBody: sourcemapSearchResponseBody(1, []map[string]interface{}{sourcemapHit(validSourcemap)}), + filePath: "bundle.js", + }, } { t.Run(name, func(t *testing.T) { - sourcemapStr, err := testESStore(tc.client).fetch(context.Background(), "abc", "1.0", "/tmp") + client := newMockElasticsearchClient(t, tc.statusCode, tc.responseBody) + sourcemapStr, err := testESStore(client).fetch(context.Background(), "abc", "1.0", "/tmp") require.NoError(t, err) if tc.filePath == "" { @@ -103,3 +121,103 @@ func Test_esFetcher_fetch(t *testing.T) { func testESStore(client elasticsearch.Client) *esStore { return &esStore{client: client, index: "apm-sourcemap", logger: logp.NewLogger(logs.Sourcemap)} } + +func sourcemapSearchResponseBody(hitsTotal int, hits []map[string]interface{}) io.Reader { + resultHits := map[string]interface{}{ + "total": map[string]interface{}{ + "value": hitsTotal, + }, + } + if hits != nil { + resultHits["hits"] = hits + } + result := map[string]interface{}{"hits": resultHits} + data, err := json.Marshal(result) + if err != nil { + panic(err) + } + return bytes.NewReader(data) +} + +func sourcemapHit(sourcemap string) map[string]interface{} { + return map[string]interface{}{ + "_source": map[string]interface{}{ + "sourcemap": map[string]interface{}{ + "sourcemap": sourcemap, + }, + }, + } +} + +// newUnavailableElasticsearchClient returns an elasticsearch.Client configured +// to send requests to an invalid (unavailable) host. +func newUnavailableElasticsearchClient(t testing.TB) elasticsearch.Client { + var transport roundTripperFunc = func(r *http.Request) (*http.Response, error) { + return nil, errors.New("client error") + } + cfg := elasticsearch.DefaultConfig() + cfg.Hosts = []string{"testing.invalid"} + cfg.MaxRetries = 1 + client, err := elasticsearch.NewClientParams(elasticsearch.ClientParams{Config: cfg, Transport: transport}) + require.NoError(t, err) + return client +} + +// newMockElasticsearchClient returns an elasticsearch.Clien configured to send +// requests to an httptest.Server that responds to source map search requests +// with the given status code and response body. +func newMockElasticsearchClient(t testing.TB, statusCode int, responseBody io.Reader) elasticsearch.Client { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(statusCode) + if responseBody != nil { + io.Copy(w, responseBody) + } + })) + t.Cleanup(srv.Close) + config := elasticsearch.DefaultConfig() + config.Backoff.Init = time.Nanosecond + config.Hosts = []string{srv.URL} + client, err := elasticsearch.NewClient(config) + require.NoError(t, err) + return client +} + +// validSourcemap is an example of a valid sourcemap for use in tests. +const validSourcemap = `{ + "version": 3, + "sources": [ + "webpack:///bundle.js", + "", + "webpack:///./scripts/index.js", + "webpack:///./index.html", + "webpack:///./scripts/app.js" + ], + "names": [ + "modules", + "__webpack_require__", + "moduleId", + "installedModules", + "exports", + "module", + "id", + "loaded", + "call", + "m", + "c", + "p", + "foo", + "console", + "log", + "foobar" + ], + "mappings": "CAAS,SAAUA,GCInB,QAAAC,GAAAC,GAGA,GAAAC,EAAAD,GACA,MAAAC,GAAAD,GAAAE,OAGA,IAAAC,GAAAF,EAAAD,IACAE,WACAE,GAAAJ,EACAK,QAAA,EAUA,OANAP,GAAAE,GAAAM,KAAAH,EAAAD,QAAAC,IAAAD,QAAAH,GAGAI,EAAAE,QAAA,EAGAF,EAAAD,QAvBA,GAAAD,KAqCA,OATAF,GAAAQ,EAAAT,EAGAC,EAAAS,EAAAP,EAGAF,EAAAU,EAAA,GAGAV,EAAA,KDMM,SAASI,EAAQD,EAASH,GE3ChCA,EAAA,GAEAA,EAAA,GAEAW,OFmDM,SAASP,EAAQD,EAASH,GGxDhCI,EAAAD,QAAAH,EAAAU,EAAA,cH8DM,SAASN,EAAQD,GI9DvB,QAAAQ,KACAC,QAAAC,IAAAC,QAGAH", + "file": "bundle.js", + "sourcesContent": [ + "/******/ (function(modules) { // webpackBootstrap\n/******/ \t// The module cache\n/******/ \tvar installedModules = {};\n/******/\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(installedModules[moduleId])\n/******/ \t\t\treturn installedModules[moduleId].exports;\n/******/\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = installedModules[moduleId] = {\n/******/ \t\t\texports: {},\n/******/ \t\t\tid: moduleId,\n/******/ \t\t\tloaded: false\n/******/ \t\t};\n/******/\n/******/ \t\t// Execute the module function\n/******/ \t\tmodules[moduleId].call(module.exports, module, module.exports, __webpack_require__);\n/******/\n/******/ \t\t// Flag the module as loaded\n/******/ \t\tmodule.loaded = true;\n/******/\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/\n/******/\n/******/ \t// expose the modules object (__webpack_modules__)\n/******/ \t__webpack_require__.m = modules;\n/******/\n/******/ \t// expose the module cache\n/******/ \t__webpack_require__.c = installedModules;\n/******/\n/******/ \t// __webpack_public_path__\n/******/ \t__webpack_require__.p = \"\";\n/******/\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(0);\n/******/ })\n/************************************************************************/\n/******/ ([\n/* 0 */\n/***/ function(module, exports, __webpack_require__) {\n\n\t// Webpack\n\t__webpack_require__(1)\n\t\n\t__webpack_require__(2)\n\t\n\tfoo()\n\n\n/***/ },\n/* 1 */\n/***/ function(module, exports, __webpack_require__) {\n\n\tmodule.exports = __webpack_require__.p + \"index.html\"\n\n/***/ },\n/* 2 */\n/***/ function(module, exports) {\n\n\tfunction foo() {\n\t console.log(foobar)\n\t}\n\t\n\tfoo()\n\n\n/***/ }\n/******/ ]);\n\n\n/** WEBPACK FOOTER **\n ** bundle.js\n **/", + " \t// The module cache\n \tvar installedModules = {};\n\n \t// The require function\n \tfunction __webpack_require__(moduleId) {\n\n \t\t// Check if module is in cache\n \t\tif(installedModules[moduleId])\n \t\t\treturn installedModules[moduleId].exports;\n\n \t\t// Create a new module (and put it into the cache)\n \t\tvar module = installedModules[moduleId] = {\n \t\t\texports: {},\n \t\t\tid: moduleId,\n \t\t\tloaded: false\n \t\t};\n\n \t\t// Execute the module function\n \t\tmodules[moduleId].call(module.exports, module, module.exports, __webpack_require__);\n\n \t\t// Flag the module as loaded\n \t\tmodule.loaded = true;\n\n \t\t// Return the exports of the module\n \t\treturn module.exports;\n \t}\n\n\n \t// expose the modules object (__webpack_modules__)\n \t__webpack_require__.m = modules;\n\n \t// expose the module cache\n \t__webpack_require__.c = installedModules;\n\n \t// __webpack_public_path__\n \t__webpack_require__.p = \"\";\n\n \t// Load entry module and return exports\n \treturn __webpack_require__(0);\n\n\n\n/** WEBPACK FOOTER **\n ** webpack/bootstrap 6002740481c9666b0d38\n **/", + "// Webpack\nrequire('../index.html')\n\nrequire('./app')\n\nfoo()\n\n\n\n/*****************\n ** WEBPACK FOOTER\n ** ./scripts/index.js\n ** module id = 0\n ** module chunks = 0\n **/", + "module.exports = __webpack_public_path__ + \"index.html\"\n\n\n/*****************\n ** WEBPACK FOOTER\n ** ./index.html\n ** module id = 1\n ** module chunks = 0\n **/", + "function foo() {\n console.log(foobar)\n}\n\nfoo()\n\n\n\n/*****************\n ** WEBPACK FOOTER\n ** ./scripts/app.js\n ** module id = 2\n ** module chunks = 0\n **/" + ], + "sourceRoot": "" +}` diff --git a/sourcemap/fleet_store.go b/sourcemap/fleet_store.go new file mode 100644 index 00000000000..80f855d096b --- /dev/null +++ b/sourcemap/fleet_store.go @@ -0,0 +1,190 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package sourcemap + +import ( + "compress/zlib" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "sync" + "time" + + "github.com/pkg/errors" + + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/logp" + + "github.com/elastic/apm-server/beater/config" + logs "github.com/elastic/apm-server/log" +) + +const defaultFleetPort = 8220 + +var errMsgFleetFailure = errMsgFailure + " fleet" + +type fleetStore struct { + apikey string + c *http.Client + sourceMapURLs map[key]string + fleetBaseURLs []string +} + +type key struct { + ServiceName string + ServiceVersion string + BundleFilepath string +} + +// NewFleetStore returns an instance of Store for interacting with sourcemaps +// stored in Fleet-Server. +func NewFleetStore( + c *http.Client, + fleetCfg *config.Fleet, + cfgs []config.SourceMapMetadata, + expiration time.Duration, +) (*Store, error) { + if len(fleetCfg.Hosts) < 1 { + return nil, errors.New("no fleet hosts present for fleet store") + } + logger := logp.NewLogger(logs.Sourcemap) + s, err := newFleetStore(c, fleetCfg, cfgs) + if err != nil { + return nil, err + } + return newStore(s, logger, expiration) +} + +func newFleetStore( + c *http.Client, + fleetCfg *config.Fleet, + cfgs []config.SourceMapMetadata, +) (fleetStore, error) { + sourceMapURLs := make(map[key]string) + fleetBaseURLs := make([]string, len(fleetCfg.Hosts)) + + for _, cfg := range cfgs { + k := key{cfg.ServiceName, cfg.ServiceVersion, cfg.BundleFilepath} + sourceMapURLs[k] = cfg.SourceMapURL + } + + for i, host := range fleetCfg.Hosts { + baseURL, err := common.MakeURL(fleetCfg.Protocol, "", host, defaultFleetPort) + if err != nil { + return fleetStore{}, err + } + fleetBaseURLs[i] = baseURL + } + + return fleetStore{ + apikey: "ApiKey " + fleetCfg.AccessAPIKey, + fleetBaseURLs: fleetBaseURLs, + sourceMapURLs: sourceMapURLs, + c: c, + }, nil +} + +func (f fleetStore) fetch(ctx context.Context, name, version, path string) (string, error) { + k := key{name, version, path} + sourceMapURL, ok := f.sourceMapURLs[k] + if !ok { + return "", fmt.Errorf("unable to find sourcemap.url for service.name=%s service.version=%s bundle.path=%s", + name, version, path, + ) + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + type result struct { + sourcemap string + err error + } + + results := make(chan result) + var wg sync.WaitGroup + for _, baseURL := range f.fleetBaseURLs { + wg.Add(1) + go func(fleetURL string) { + defer wg.Done() + sourcemap, err := sendRequest(ctx, f, fleetURL) + select { + case <-ctx.Done(): + case results <- result{sourcemap, err}: + } + }(baseURL + sourceMapURL) + } + + go func() { + wg.Wait() + close(results) + }() + + var err error + for result := range results { + err = result.err + if err == nil { + return result.sourcemap, nil + } + } + + if err != nil { + return "", err + } + // No results were received: context was cancelled. + return "", ctx.Err() +} + +func sendRequest(ctx context.Context, f fleetStore, fleetURL string) (string, error) { + req, err := http.NewRequest(http.MethodGet, fleetURL, nil) + if err != nil { + return "", err + } + req.Header.Add("Authorization", f.apikey) + + resp, err := f.c.Do(req.WithContext(ctx)) + if err != nil { + return "", err + } + defer resp.Body.Close() + + // Verify that we should only get 200 back from fleet-server + if resp.StatusCode != http.StatusOK { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf(errMsgFleetFailure, ": statuscode=%d response=(failed to read body)", resp.StatusCode) + } + return "", fmt.Errorf(errMsgFleetFailure, ": statuscode=%d response=%s", resp.StatusCode, body) + } + + // Looking at the index in elasticsearch, currently + // - no encryption + // - zlib compression + r, err := zlib.NewReader(resp.Body) + if err != nil { + return "", err + } + + var m map[string]json.RawMessage + if err := json.NewDecoder(r).Decode(&m); err != nil { + return "", err + } + return string(m["sourceMap"]), nil +} diff --git a/sourcemap/fleet_store_test.go b/sourcemap/fleet_store_test.go new file mode 100644 index 00000000000..4ec96f331bd --- /dev/null +++ b/sourcemap/fleet_store_test.go @@ -0,0 +1,182 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package sourcemap + +import ( + "compress/zlib" + "context" + "net/http" + "net/http/httptest" + "sync/atomic" + "testing" + + "github.com/elastic/apm-server/beater/config" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFleetFetch(t *testing.T) { + var ( + apikey = "supersecret" + name = "webapp" + version = "1.0.0" + path = "/my/path/to/bundle.js.map" + c = http.DefaultClient + sourceMapPath = "/api/fleet/artifact" + ) + + h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, sourceMapPath, r.URL.Path) + if auth := r.Header.Get("Authorization"); auth != "ApiKey "+apikey { + w.WriteHeader(http.StatusUnauthorized) + return + } + // zlib compress + wr := zlib.NewWriter(w) + defer wr.Close() + wr.Write([]byte(resp)) + }) + + ts0 := httptest.NewServer(h) + defer ts0.Close() + + ts1 := httptest.NewServer(h) + defer ts1.Close() + + fleetCfg := &config.Fleet{ + Hosts: []string{ts0.URL[7:], ts1.URL[7:]}, + Protocol: "http", + AccessAPIKey: apikey, + TLS: nil, + } + + cfgs := []config.SourceMapMetadata{ + { + ServiceName: name, + ServiceVersion: version, + BundleFilepath: path, + SourceMapURL: sourceMapPath, + }, + } + fb, err := newFleetStore(c, fleetCfg, cfgs) + assert.NoError(t, err) + + gotRes, err := fb.fetch(context.Background(), name, version, path) + require.NoError(t, err) + + assert.Contains(t, gotRes, "webpack:///bundle.js") +} + +func TestFailedAndSuccessfulFleetHostsFetch(t *testing.T) { + var ( + apikey = "supersecret" + name = "webapp" + version = "1.0.0" + path = "/my/path/to/bundle.js.map" + c = http.DefaultClient + sourceMapPath = "/api/fleet/artifact" + ) + + hError := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "err", http.StatusInternalServerError) + }) + ts0 := httptest.NewServer(hError) + + h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + wr := zlib.NewWriter(w) + defer wr.Close() + wr.Write([]byte(resp)) + }) + ts1 := httptest.NewServer(h) + ts2 := httptest.NewServer(h) + + fleetCfg := &config.Fleet{ + Hosts: []string{ts0.URL[7:], ts1.URL[7:], ts2.URL[7:]}, + Protocol: "http", + AccessAPIKey: apikey, + TLS: nil, + } + + cfgs := []config.SourceMapMetadata{ + { + ServiceName: name, + ServiceVersion: version, + BundleFilepath: path, + SourceMapURL: sourceMapPath, + }, + } + f, err := newFleetStore(c, fleetCfg, cfgs) + assert.NoError(t, err) + + resp, err := f.fetch(context.Background(), name, version, path) + require.NoError(t, err) + assert.Contains(t, resp, "webpack:///bundle.js") +} + +func TestAllFailedFleetHostsFetch(t *testing.T) { + var ( + requestCount int32 + apikey = "supersecret" + name = "webapp" + version = "1.0.0" + path = "/my/path/to/bundle.js.map" + c = http.DefaultClient + sourceMapPath = "/api/fleet/artifact" + ) + + h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "err", http.StatusInternalServerError) + atomic.AddInt32(&requestCount, 1) + }) + + ts0 := httptest.NewServer(h) + defer ts0.Close() + + ts1 := httptest.NewServer(h) + defer ts1.Close() + + ts2 := httptest.NewServer(h) + defer ts2.Close() + + fleetCfg := &config.Fleet{ + Hosts: []string{ts0.URL[7:], ts1.URL[7:], ts2.URL[7:]}, + Protocol: "http", + AccessAPIKey: apikey, + TLS: nil, + } + + cfgs := []config.SourceMapMetadata{ + { + ServiceName: name, + ServiceVersion: version, + BundleFilepath: path, + SourceMapURL: sourceMapPath, + }, + } + f, err := newFleetStore(c, fleetCfg, cfgs) + assert.NoError(t, err) + + resp, err := f.fetch(context.Background(), name, version, path) + assert.EqualValues(t, len(fleetCfg.Hosts), requestCount) + require.Error(t, err) + assert.Contains(t, err.Error(), errMsgFleetFailure) + assert.Equal(t, "", resp) +} + +var resp = "{\"serviceName\":\"web-app\",\"serviceVersion\":\"1.0.0\",\"bundleFilepath\":\"/test/e2e/general-usecase/bundle.js.map\",\"sourceMap\":{\"version\":3,\"sources\":[\"webpack:///bundle.js\",\"\",\"webpack:///./scripts/index.js\",\"webpack:///./index.html\",\"webpack:///./scripts/app.js\"],\"names\":[\"modules\",\"__webpack_require__\",\"moduleId\",\"installedModules\",\"exports\",\"module\",\"id\",\"loaded\",\"call\",\"m\",\"c\",\"p\",\"foo\",\"console\",\"log\",\"foobar\"],\"mappings\":\"CAAS,SAAUA,GCInB,QAAAC,GAAAC,GAGA,GAAAC,EAAAD,GACA,MAAAC,GAAAD,GAAAE,OAGA,IAAAC,GAAAF,EAAAD,IACAE,WACAE,GAAAJ,EACAK,QAAA,EAUA,OANAP,GAAAE,GAAAM,KAAAH,EAAAD,QAAAC,IAAAD,QAAAH,GAGAI,EAAAE,QAAA,EAGAF,EAAAD,QAvBA,GAAAD,KAqCA,OATAF,GAAAQ,EAAAT,EAGAC,EAAAS,EAAAP,EAGAF,EAAAU,EAAA,GAGAV,EAAA,KDMM,SAASI,EAAQD,EAASH,GE3ChCA,EAAA,GAEAA,EAAA,GAEAW,OFmDM,SAASP,EAAQD,EAASH,GGxDhCI,EAAAD,QAAAH,EAAAU,EAAA,cH8DM,SAASN,EAAQD,GI9DvB,QAAAQ,KACAC,QAAAC,IAAAC,QAGAH\",\"file\":\"bundle.js\",\"sourcesContent\":[\"/******/ (function(modules) { // webpackBootstrap\\n/******/ \\t// The module cache\\n/******/ \\tvar installedModules = {};\\n/******/\\n/******/ \\t// The require function\\n/******/ \\tfunction __webpack_require__(moduleId) {\\n/******/\\n/******/ \\t\\t// Check if module is in cache\\n/******/ \\t\\tif(installedModules[moduleId])\\n/******/ \\t\\t\\treturn installedModules[moduleId].exports;\\n/******/\\n/******/ \\t\\t// Create a new module (and put it into the cache)\\n/******/ \\t\\tvar module = installedModules[moduleId] = {\\n/******/ \\t\\t\\texports: {},\\n/******/ \\t\\t\\tid: moduleId,\\n/******/ \\t\\t\\tloaded: false\\n/******/ \\t\\t};\\n/******/\\n/******/ \\t\\t// Execute the module function\\n/******/ \\t\\tmodules[moduleId].call(module.exports, module, module.exports, __webpack_require__);\\n/******/\\n/******/ \\t\\t// Flag the module as loaded\\n/******/ \\t\\tmodule.loaded = true;\\n/******/\\n/******/ \\t\\t// Return the exports of the module\\n/******/ \\t\\treturn module.exports;\\n/******/ \\t}\\n/******/\\n/******/\\n/******/ \\t// expose the modules object (__webpack_modules__)\\n/******/ \\t__webpack_require__.m = modules;\\n/******/\\n/******/ \\t// expose the module cache\\n/******/ \\t__webpack_require__.c = installedModules;\\n/******/\\n/******/ \\t// __webpack_public_path__\\n/******/ \\t__webpack_require__.p = \\\"\\\";\\n/******/\\n/******/ \\t// Load entry module and return exports\\n/******/ \\treturn __webpack_require__(0);\\n/******/ })\\n/************************************************************************/\\n/******/ ([\\n/* 0 */\\n/***/ function(module, exports, __webpack_require__) {\\n\\n\\t// Webpack\\n\\t__webpack_require__(1)\\n\\t\\n\\t__webpack_require__(2)\\n\\t\\n\\tfoo()\\n\\n\\n/***/ },\\n/* 1 */\\n/***/ function(module, exports, __webpack_require__) {\\n\\n\\tmodule.exports = __webpack_require__.p + \\\"index.html\\\"\\n\\n/***/ },\\n/* 2 */\\n/***/ function(module, exports) {\\n\\n\\tfunction foo() {\\n\\t console.log(foobar)\\n\\t}\\n\\t\\n\\tfoo()\\n\\n\\n/***/ }\\n/******/ ]);\\n\\n\\n/** WEBPACK FOOTER **\\n ** bundle.js\\n **/\",\" \\t// The module cache\\n \\tvar installedModules = {};\\n\\n \\t// The require function\\n \\tfunction __webpack_require__(moduleId) {\\n\\n \\t\\t// Check if module is in cache\\n \\t\\tif(installedModules[moduleId])\\n \\t\\t\\treturn installedModules[moduleId].exports;\\n\\n \\t\\t// Create a new module (and put it into the cache)\\n \\t\\tvar module = installedModules[moduleId] = {\\n \\t\\t\\texports: {},\\n \\t\\t\\tid: moduleId,\\n \\t\\t\\tloaded: false\\n \\t\\t};\\n\\n \\t\\t// Execute the module function\\n \\t\\tmodules[moduleId].call(module.exports, module, module.exports, __webpack_require__);\\n\\n \\t\\t// Flag the module as loaded\\n \\t\\tmodule.loaded = true;\\n\\n \\t\\t// Return the exports of the module\\n \\t\\treturn module.exports;\\n \\t}\\n\\n\\n \\t// expose the modules object (__webpack_modules__)\\n \\t__webpack_require__.m = modules;\\n\\n \\t// expose the module cache\\n \\t__webpack_require__.c = installedModules;\\n\\n \\t// __webpack_public_path__\\n \\t__webpack_require__.p = \\\"\\\";\\n\\n \\t// Load entry module and return exports\\n \\treturn __webpack_require__(0);\\n\\n\\n\\n/** WEBPACK FOOTER **\\n ** webpack/bootstrap 6002740481c9666b0d38\\n **/\",\"// Webpack\\nrequire('../index.html')\\n\\nrequire('./app')\\n\\nfoo()\\n\\n\\n\\n/*****************\\n ** WEBPACK FOOTER\\n ** ./scripts/index.js\\n ** module id = 0\\n ** module chunks = 0\\n **/\",\"module.exports = __webpack_public_path__ + \\\"index.html\\\"\\n\\n\\n/*****************\\n ** WEBPACK FOOTER\\n ** ./index.html\\n ** module id = 1\\n ** module chunks = 0\\n **/\",\"function foo() {\\n console.log(foobar)\\n}\\n\\nfoo()\\n\\n\\n\\n/*****************\\n ** WEBPACK FOOTER\\n ** ./scripts/app.js\\n ** module id = 2\\n ** module chunks = 0\\n **/\"],\"sourceRoot\":\"\"}}" diff --git a/sourcemap/mapper.go b/sourcemap/mapper.go index 8c3ed655974..9b40e875400 100644 --- a/sourcemap/mapper.go +++ b/sourcemap/mapper.go @@ -18,6 +18,7 @@ package sourcemap import ( + "bufio" "strings" "github.com/go-sourcemap/sourcemap" @@ -34,28 +35,34 @@ func Map(mapper *sourcemap.Consumer, lineno, colno int) ( return } file, function, line, col, ok = mapper.Source(lineno, colno) - src := strings.Split(mapper.SourceContent(file), "\n") - contextLine = strings.Join(subSlice(line-1, line, src), "") - preContext = subSlice(line-1-sourcemapContentSnippetSize, line-1, src) - postContext = subSlice(line, line+sourcemapContentSnippetSize, src) - return -} + scanner := bufio.NewScanner(strings.NewReader(mapper.SourceContent(file))) -func subSlice(from, to int, content []string) []string { - if len(content) == 0 { - return content - } - if from < 0 { - from = 0 - } - if to < 0 { - to = 0 + var currentLine int + for scanner.Scan() { + currentLine++ + if currentLine == line { + contextLine = scanner.Text() + } else if abs(line-currentLine) <= sourcemapContentSnippetSize { + if currentLine < line { + preContext = append(preContext, scanner.Text()) + } else { + postContext = append(postContext, scanner.Text()) + } + } else if currentLine > line { + // More than sourcemapContentSnippetSize lines past, we're done. + break + } } - if from > len(content) { - from = len(content) + if scanner.Err() != nil { + ok = false + return } - if to > len(content) { - to = len(content) + return +} + +func abs(n int) int { + if n < 0 { + return -n } - return content[from:to] + return n } diff --git a/sourcemap/mapper_test.go b/sourcemap/mapper_test.go index 20785728e8b..1a9f56f24ee 100644 --- a/sourcemap/mapper_test.go +++ b/sourcemap/mapper_test.go @@ -18,37 +18,54 @@ package sourcemap import ( + "encoding/json" + "strings" "testing" "github.com/stretchr/testify/require" - "github.com/elastic/apm-server/sourcemap/test" - "github.com/go-sourcemap/sourcemap" "github.com/stretchr/testify/assert" ) -func TestApply(t *testing.T) { +func TestMapNilConsumer(t *testing.T) { // no sourcemapConsumer _, _, _, _, _, _, _, ok := Map(nil, 0, 0) assert.False(t, ok) +} - m, err := sourcemap.Parse("", []byte(test.ValidSourcemap)) +func TestMapNoMatch(t *testing.T) { + m, err := sourcemap.Parse("", []byte(validSourcemap)) require.NoError(t, err) - t.Run("notOK", func(t *testing.T) { - // nothing found for lineno and colno - file, fc, line, col, ctxLine, _, _, ok := Map(m, 0, 0) - require.False(t, ok) - assert.Zero(t, file) - assert.Zero(t, fc) - assert.Zero(t, line) - assert.Zero(t, col) - assert.Zero(t, ctxLine) - }) + // nothing found for lineno and colno + file, fc, line, col, ctxLine, _, _, ok := Map(m, 0, 0) + require.False(t, ok) + assert.Zero(t, file) + assert.Zero(t, fc) + assert.Zero(t, line) + assert.Zero(t, col) + assert.Zero(t, ctxLine) +} + +func TestMapMatch(t *testing.T) { + // Re-encode the sourcemap, adding carriage returns to the + // line endings in the source content. + decoded := make(map[string]interface{}) + require.NoError(t, json.Unmarshal([]byte(validSourcemap), &decoded)) + sourceContent := decoded["sourcesContent"].([]interface{}) + for i := range sourceContent { + sourceContentFile := sourceContent[i].(string) + sourceContentFile = strings.Replace(sourceContentFile, "\n", "\r\n", -1) + sourceContent[i] = sourceContentFile + } + crlfSourcemap, err := json.Marshal(decoded) + require.NoError(t, err) - t.Run("OK", func(t *testing.T) { - // mapping found in minified sourcemap + // mapping found in minified sourcemap + test := func(t *testing.T, source []byte) { + m, err := sourcemap.Parse("", source) + require.NoError(t, err) file, fc, line, col, ctxLine, preCtx, postCtx, ok := Map(m, 1, 7) require.True(t, ok) assert.Equal(t, "webpack:///bundle.js", file) @@ -56,34 +73,9 @@ func TestApply(t *testing.T) { assert.Equal(t, 1, line) assert.Equal(t, 9, col) assert.Equal(t, "/******/ (function(modules) { // webpackBootstrap", ctxLine) - assert.Equal(t, []string{}, preCtx) + assert.Empty(t, preCtx) assert.NotZero(t, postCtx) - }) -} - -func TestSubSlice(t *testing.T) { - src := []string{"a", "b", "c", "d", "e", "f"} - for _, test := range []struct { - start, end int - rs []string - }{ - {2, 4, []string{"c", "d"}}, - {-1, 1, []string{"a"}}, - {4, 10, []string{"e", "f"}}, - // relevant test cases because we don't control the input and can not panic - {-5, -3, []string{}}, - {8, 10, []string{}}, - } { - assert.Equal(t, test.rs, subSlice(test.start, test.end, src)) - } - - for _, test := range []struct { - start, end int - }{ - {0, 1}, - {0, 0}, - {-1, 0}, - } { - assert.Equal(t, []string{}, subSlice(test.start, test.end, []string{})) } + t.Run("unix_endings", func(t *testing.T) { test(t, []byte(validSourcemap)) }) + t.Run("windows_endings", func(t *testing.T) { test(t, crlfSourcemap) }) } diff --git a/sourcemap/processor.go b/sourcemap/processor.go new file mode 100644 index 00000000000..5f001410037 --- /dev/null +++ b/sourcemap/processor.go @@ -0,0 +1,174 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package sourcemap + +import ( + "context" + "sync" + "time" + + "github.com/elastic/beats/v7/libbeat/logp" + + logs "github.com/elastic/apm-server/log" + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/utility" +) + +// BatchProcessor is a model.BatchProcessor that performs source mapping for +// span and error events. Any errors fetching source maps, including the +// timeout expiring, will result in the StacktraceFrame.SourcemapError field +// being set; the error will not be returned. +type BatchProcessor struct { + // Store is the store to use for fetching source maps. + Store *Store + + // Timeout holds a timeout for each ProcessBatch call, to limit how + // much time is spent fetching source maps. + // + // If Timeout is <= 0, it will be ignored. + Timeout time.Duration +} + +// ProcessBatch processes spans and errors, applying source maps +// to their stack traces. +func (p BatchProcessor) ProcessBatch(ctx context.Context, batch *model.Batch) error { + if p.Timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, p.Timeout) + defer cancel() + } + for _, event := range *batch { + if event.Service.Name == "" || event.Service.Version == "" { + continue + } + switch { + case event.Span != nil: + p.processStacktraceFrames(ctx, &event.Service, event.Span.Stacktrace...) + case event.Error != nil: + if event.Error.Log != nil { + p.processStacktraceFrames(ctx, &event.Service, event.Error.Log.Stacktrace...) + } + if event.Error.Exception != nil { + p.processException(ctx, &event.Service, event.Error.Exception) + } + } + } + return nil +} + +func (p BatchProcessor) processException(ctx context.Context, service *model.Service, exception *model.Exception) { + p.processStacktraceFrames(ctx, service, exception.Stacktrace...) + for _, cause := range exception.Cause { + p.processException(ctx, service, &cause) + } +} + +// source map algorithm: +// +// apply source mapping frame by frame +// if no source map could be found, set updated to false and set sourcemap error +// otherwise use source map library for mapping and update +// - filename: only if it was found +// - function: +// * should be moved down one stack trace frame, +// * the function name of the first frame is set to +// * if one frame is not found in the source map, this frame is left out and +// the function name from the previous frame is used +// * if a mapping could be applied but no function name is found, the +// function name for the next frame is set to +// - colno +// - lineno +// - abs_path is set to the cleaned abs_path +// - sourcemap.updated is set to true +func (p BatchProcessor) processStacktraceFrames(ctx context.Context, service *model.Service, frames ...*model.StacktraceFrame) error { + prevFunction := "" + for i := len(frames) - 1; i >= 0; i-- { + frame := frames[i] + if mapped, function := p.processStacktraceFrame(ctx, service, frame, prevFunction); mapped { + prevFunction = function + } + } + return nil +} + +func (p BatchProcessor) processStacktraceFrame( + ctx context.Context, + service *model.Service, + frame *model.StacktraceFrame, + prevFunction string, +) (bool, string) { + if frame.Colno == nil || frame.Lineno == nil || frame.AbsPath == "" { + return false, "" + } + + path := utility.CleanUrlPath(frame.AbsPath) + mapper, err := p.Store.Fetch(ctx, service.Name, service.Version, path) + if err != nil { + frame.SourcemapError = err.Error() + getProcessorLogger().Debugf("failed to fetch source map: %s", frame.SourcemapError) + return false, "" + } + if mapper == nil { + return false, "" + } + file, function, lineno, colno, ctxLine, preCtx, postCtx, ok := Map(mapper, *frame.Lineno, *frame.Colno) + if !ok { + return false, "" + } + + // Store original source information. + frame.Original.Colno = frame.Colno + frame.Original.AbsPath = frame.AbsPath + frame.Original.Function = frame.Function + frame.Original.Lineno = frame.Lineno + frame.Original.Filename = frame.Filename + frame.Original.Classname = frame.Classname + + if file != "" { + frame.Filename = file + } + frame.Colno = &colno + frame.Lineno = &lineno + frame.AbsPath = path + frame.SourcemapUpdated = true + frame.Function = prevFunction + frame.ContextLine = ctxLine + frame.PreContext = preCtx + frame.PostContext = postCtx + if function == "" { + function = "" + } + return true, function +} + +func getProcessorLogger() *logp.Logger { + processorLoggerOnce.Do(func() { + // We use a rate limited logger to avoid spamming the logs + // due to issues communicating with Elasticsearch, for example. + processorLogger = logp.NewLogger( + logs.Stacktrace, + logs.WithRateLimit(time.Minute), + ) + }) + return processorLogger +} + +var ( + processorLoggerOnce sync.Once + processorLogger *logp.Logger +) diff --git a/sourcemap/processor_test.go b/sourcemap/processor_test.go new file mode 100644 index 00000000000..0011da76655 --- /dev/null +++ b/sourcemap/processor_test.go @@ -0,0 +1,312 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package sourcemap + +import ( + "context" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/libbeat/logp" + + "github.com/elastic/apm-server/elasticsearch" + "github.com/elastic/apm-server/model" +) + +func TestBatchProcessor(t *testing.T) { + client := newMockElasticsearchClient(t, http.StatusOK, + sourcemapSearchResponseBody(1, []map[string]interface{}{sourcemapHit(string(validSourcemap))}), + ) + store, err := NewElasticsearchStore(client, "index", time.Minute) + require.NoError(t, err) + + originalLinenoWithFilename := 1 + originalColnoWithFilename := 7 + originalLinenoWithoutFilename := 1 + originalColnoWithoutFilename := 23 + originalLinenoWithFunction := 1 + originalColnoWithFunction := 67 + + nonMatchingFrame := model.StacktraceFrame{ + AbsPath: "bundle.js", + Lineno: newInt(0), + Colno: newInt(0), + Function: "original function", + } + mappedFrameWithFilename := model.StacktraceFrame{ + AbsPath: "bundle.js", + Function: "", + Filename: "webpack:///bundle.js", + Lineno: newInt(1), + Colno: newInt(9), + ContextLine: "/******/ (function(modules) { // webpackBootstrap", + PostContext: []string{ + "/******/ \t// The module cache", + "/******/ \tvar installedModules = {};", + "/******/", + "/******/ \t// The require function", + "/******/ \tfunction __webpack_require__(moduleId) {", + }, + Original: model.Original{ + AbsPath: "bundle.js", + Lineno: &originalLinenoWithFilename, + Colno: &originalColnoWithFilename, + Function: "original function", + }, + SourcemapUpdated: true, + } + + mappedFrameWithoutFilename := mappedFrameWithFilename + mappedFrameWithoutFilename.Original.Lineno = &originalLinenoWithoutFilename + mappedFrameWithoutFilename.Original.Colno = &originalColnoWithoutFilename + mappedFrameWithoutFilename.Lineno = newInt(5) + mappedFrameWithoutFilename.Colno = newInt(0) + mappedFrameWithoutFilename.Filename = "" + mappedFrameWithoutFilename.ContextLine = " \tfunction __webpack_require__(moduleId) {" + mappedFrameWithoutFilename.PreContext = []string{ + " \t// The module cache", + " \tvar installedModules = {};", + "", + " \t// The require function", + } + mappedFrameWithoutFilename.PostContext = []string{ + "", + " \t\t// Check if module is in cache", + " \t\tif(installedModules[moduleId])", + " \t\t\treturn installedModules[moduleId].exports;", + "", + } + + mappedFrameWithFunction := mappedFrameWithoutFilename + mappedFrameWithFunction.Original.Lineno = &originalLinenoWithFunction + mappedFrameWithFunction.Original.Colno = &originalColnoWithFunction + mappedFrameWithFunction.Lineno = newInt(13) + mappedFrameWithFunction.Colno = newInt(0) + mappedFrameWithFunction.ContextLine = " \t\t\texports: {}," + mappedFrameWithFunction.PreContext = []string{ + " \t\tif(installedModules[moduleId])", + " \t\t\treturn installedModules[moduleId].exports;", + "", + " \t\t// Create a new module (and put it into the cache)", + " \t\tvar module = installedModules[moduleId] = {", + } + mappedFrameWithFunction.PostContext = []string{ + " \t\t\tid: moduleId,", + " \t\t\tloaded: false", + " \t\t};", + "", + " \t\t// Execute the module function", + } + mappedFrameWithFunction2 := mappedFrameWithFunction + mappedFrameWithFunction2.Function = "exports" + + service := model.Service{ + Name: "service_name", + Version: "service_version", + } + + // Service intentionally left blank + transaction := model.APMEvent{Transaction: &model.Transaction{}} + span1 := model.APMEvent{Span: &model.Span{}} + + error1 := model.APMEvent{ + Service: service, + Error: &model.Error{}, + } + span2 := model.APMEvent{ + Service: service, + Span: &model.Span{ + Stacktrace: model.Stacktrace{cloneFrame(nonMatchingFrame), { + AbsPath: "bundle.js", + Lineno: newInt(originalLinenoWithFilename), + Colno: newInt(originalColnoWithFilename), + Function: "original function", + }}, + }, + } + error2 := model.APMEvent{ + Service: service, + Error: &model.Error{ + Log: &model.Log{ + Stacktrace: model.Stacktrace{{ + AbsPath: "bundle.js", + Lineno: newInt(originalLinenoWithoutFilename), + Colno: newInt(originalColnoWithoutFilename), + Function: "original function", + }}, + }, + }, + } + error3 := model.APMEvent{ + Service: service, + Error: &model.Error{ + Exception: &model.Exception{ + Stacktrace: model.Stacktrace{{ + AbsPath: "bundle.js", + Lineno: newInt(originalLinenoWithFunction), + Colno: newInt(originalColnoWithFunction), + Function: "original function", + }}, + Cause: []model.Exception{{ + Stacktrace: model.Stacktrace{{ + AbsPath: "bundle.js", + Lineno: newInt(originalLinenoWithFunction), + Colno: newInt(originalColnoWithFunction), + Function: "original function", + }, { + AbsPath: "bundle.js", + Lineno: newInt(originalLinenoWithFunction), + Colno: newInt(originalColnoWithFunction), + Function: "original function", + }}, + }}, + }, + }, + } + + processor := BatchProcessor{Store: store} + err = processor.ProcessBatch(context.Background(), &model.Batch{transaction, span1, span2, error1, error2, error3}) + assert.NoError(t, err) + + assert.Equal(t, &model.Span{}, span1.Span) + assert.Equal(t, &model.Error{}, error1.Error) + assert.Equal(t, &model.Span{ + Stacktrace: model.Stacktrace{ + cloneFrame(nonMatchingFrame), + cloneFrame(mappedFrameWithFilename), + }, + }, span2.Span) + assert.Equal(t, &model.Error{ + Log: &model.Log{ + Stacktrace: model.Stacktrace{ + cloneFrame(mappedFrameWithoutFilename), + }, + }, + }, error2.Error) + assert.Equal(t, &model.Error{ + Exception: &model.Exception{ + Stacktrace: model.Stacktrace{ + cloneFrame(mappedFrameWithFunction), + }, + Cause: []model.Exception{{ + Stacktrace: model.Stacktrace{ + cloneFrame(mappedFrameWithFunction2), + cloneFrame(mappedFrameWithFunction), + }, + }}, + }, + }, error3.Error) +} + +func TestBatchProcessorElasticsearchUnavailable(t *testing.T) { + client := newUnavailableElasticsearchClient(t) + store, err := NewElasticsearchStore(client, "index", time.Minute) + require.NoError(t, err) + + nonMatchingFrame := model.StacktraceFrame{ + AbsPath: "bundle.js", + Lineno: newInt(0), + Colno: newInt(0), + Function: "original function", + } + + span := model.APMEvent{ + Service: model.Service{ + Name: "service_name", + Version: "service_version", + }, + Span: &model.Span{ + Stacktrace: model.Stacktrace{cloneFrame(nonMatchingFrame), cloneFrame(nonMatchingFrame)}, + }, + } + + logp.DevelopmentSetup(logp.ToObserverOutput()) + for i := 0; i < 2; i++ { + processor := BatchProcessor{Store: store} + err = processor.ProcessBatch(context.Background(), &model.Batch{span, span}) + assert.NoError(t, err) + } + + // SourcemapError should have been set, but the frames should otherwise be unmodified. + expectedFrame := nonMatchingFrame + expectedFrame.SourcemapError = "failure querying ES: client error" + assert.Equal(t, model.Stacktrace{&expectedFrame, &expectedFrame}, span.Span.Stacktrace) + + // We should have a single log message, due to rate limiting. + entries := logp.ObserverLogs().TakeAll() + require.Len(t, entries, 1) + assert.Equal(t, "failed to fetch source map: failure querying ES: client error", entries[0].Message) +} + +func TestBatchProcessorTimeout(t *testing.T) { + var transport roundTripperFunc = func(req *http.Request) (*http.Response, error) { + <-req.Context().Done() + return nil, req.Context().Err() + } + + cfg := elasticsearch.DefaultConfig() + cfg.Hosts = []string{""} + client, err := elasticsearch.NewClientParams(elasticsearch.ClientParams{ + Config: cfg, + Transport: transport, + }) + require.NoError(t, err) + store, err := NewElasticsearchStore(client, "index", time.Minute) + require.NoError(t, err) + + frame := model.StacktraceFrame{ + AbsPath: "bundle.js", + Lineno: newInt(0), + Colno: newInt(0), + Function: "original function", + } + span := model.APMEvent{ + Service: model.Service{ + Name: "service_name", + Version: "service_version", + }, + Span: &model.Span{ + Stacktrace: model.Stacktrace{cloneFrame(frame)}, + }, + } + + before := time.Now() + processor := BatchProcessor{Store: store, Timeout: 100 * time.Millisecond} + err = processor.ProcessBatch(context.Background(), &model.Batch{span}) + assert.NoError(t, err) + taken := time.Since(before) + assert.Less(t, taken, time.Second) +} + +func cloneFrame(frame model.StacktraceFrame) *model.StacktraceFrame { + return &frame +} + +func newInt(v int) *int { + return &v +} + +type roundTripperFunc func(*http.Request) (*http.Response, error) + +func (f roundTripperFunc) RoundTrip(req *http.Request) (*http.Response, error) { + return f(req) +} diff --git a/sourcemap/store.go b/sourcemap/store.go index 462ffedd3d4..d351723b769 100644 --- a/sourcemap/store.go +++ b/sourcemap/store.go @@ -21,17 +21,14 @@ import ( "context" "math" "strings" + "sync" "time" - "github.com/elastic/apm-server/elasticsearch" - "github.com/go-sourcemap/sourcemap" gocache "github.com/patrickmn/go-cache" "github.com/pkg/errors" "github.com/elastic/beats/v7/libbeat/logp" - - logs "github.com/elastic/apm-server/log" ) const ( @@ -39,33 +36,45 @@ const ( ) var ( - errInit = errors.New("Cache cannot be initialized. Expiration and CleanupInterval need to be >= 0") + errMsgFailure = "failure querying" + errInit = errors.New("Cache cannot be initialized. Expiration and CleanupInterval need to be >= 0") ) -// Store holds information necessary to fetch a sourcemap, either from an Elasticsearch instance or an internal cache. +// Store holds information necessary to fetch a sourcemap, either from an +// Elasticsearch instance or an internal cache. type Store struct { cache *gocache.Cache - esStore *esStore + backend backend logger *logp.Logger + + mu sync.Mutex + inflight map[string]chan struct{} } -// NewStore creates a new instance for fetching sourcemaps. The client and index parameters are needed to be able to -// fetch sourcemaps from Elasticsearch. The expiration time is used for the internal cache. -func NewStore(client elasticsearch.Client, index string, expiration time.Duration) (*Store, error) { - if expiration < 0 { +type backend interface { + fetch(ctx context.Context, name, version, path string) (string, error) +} + +func newStore( + b backend, + logger *logp.Logger, + cacheExpiration time.Duration, +) (*Store, error) { + if cacheExpiration < 0 { return nil, errInit } - logger := logp.NewLogger(logs.Sourcemap) + return &Store{ - cache: gocache.New(expiration, cleanupInterval(expiration)), - esStore: &esStore{client: client, index: index, logger: logger}, - logger: logger, + cache: gocache.New(cacheExpiration, cleanupInterval(cacheExpiration)), + backend: b, + logger: logger, + inflight: make(map[string]chan struct{}), }, nil } // Fetch a sourcemap from the store. -func (s *Store) Fetch(ctx context.Context, name string, version string, path string) (*sourcemap.Consumer, error) { - key := key([]string{name, version, path}) +func (s *Store) Fetch(ctx context.Context, name, version, path string) (*sourcemap.Consumer, error) { + key := cacheKey([]string{name, version, path}) // fetch from cache if val, found := s.cache.Get(key); found { @@ -73,10 +82,43 @@ func (s *Store) Fetch(ctx context.Context, name string, version string, path str return consumer, nil } - // fetch from Elasticsearch and ensure caching for all non-temporary results - sourcemapStr, err := s.esStore.fetch(ctx, name, version, path) + // if the value hasn't been found, check to see if there's an inflight + // request to update the value. + s.mu.Lock() + wait, ok := s.inflight[key] + if ok { + // found an inflight request, wait for it to complete. + s.mu.Unlock() + + select { + case <-wait: + case <-ctx.Done(): + return nil, ctx.Err() + } + // Try to read the value again + return s.Fetch(ctx, name, version, path) + } + + // no inflight request found, add a channel to the map and then + // make the fetch request. + wait = make(chan struct{}) + s.inflight[key] = wait + + s.mu.Unlock() + + // Once the fetch request is complete, close and remove the channel + // from the syncronization map. + defer func() { + s.mu.Lock() + delete(s.inflight, key) + close(wait) + s.mu.Unlock() + }() + + // fetch from the store and ensure caching for all non-temporary results + sourcemapStr, err := s.backend.fetch(ctx, name, version, path) if err != nil { - if !strings.Contains(err.Error(), errMsgESFailure) { + if !strings.Contains(err.Error(), errMsgFailure) { s.add(key, nil) } return nil, err @@ -93,16 +135,18 @@ func (s *Store) Fetch(ctx context.Context, name string, version string, path str return nil, errors.Wrap(err, errMsgParseSourcemap) } s.add(key, consumer) + return consumer, nil } -// Added ensures the internal cache is cleared for the given parameters. This should be called when a sourcemap is uploaded. -func (s *Store) Added(ctx context.Context, name string, version string, path string) { +// NotifyAdded ensures the internal cache is cleared for the given parameters. +// This should be called when a sourcemap is uploaded. +func (s *Store) NotifyAdded(ctx context.Context, name string, version string, path string) { if sourcemap, err := s.Fetch(ctx, name, version, path); err == nil && sourcemap != nil { s.logger.Warnf("Overriding sourcemap for service %s version %s and file %s", name, version, path) } - key := key([]string{name, version, path}) + key := cacheKey([]string{name, version, path}) s.cache.Delete(key) if !s.logger.IsDebug() { return @@ -118,7 +162,7 @@ func (s *Store) add(key string, consumer *sourcemap.Consumer) { s.logger.Debugf("Added id %v. Cache now has %v entries.", key, s.cache.ItemCount()) } -func key(s []string) string { +func cacheKey(s []string) string { return strings.Join(s, "_") } diff --git a/sourcemap/store_test.go b/sourcemap/store_test.go index 6c18722166e..7cd6cd1e752 100644 --- a/sourcemap/store_test.go +++ b/sourcemap/store_test.go @@ -18,8 +18,14 @@ package sourcemap import ( + "compress/zlib" "context" + "errors" "fmt" + "net/http" + "net/http/httptest" + "sync" + "sync/atomic" "testing" "time" @@ -28,16 +34,29 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/elastic/apm-server/beater/config" "github.com/elastic/apm-server/elasticsearch" - - "github.com/elastic/apm-server/sourcemap/test" + logs "github.com/elastic/apm-server/log" + "github.com/elastic/beats/v7/libbeat/logp" ) -func Test_NewStore(t *testing.T) { - _, err := NewStore(nil, "", -1) +var unsupportedVersionSourcemap = `{ + "version": 1, + "sources": ["webpack:///bundle.js"], + "names": [], + "mappings": "CAAS", + "file": "bundle.js", + "sourcesContent": [], + "sourceRoot": "" +}` + +func Test_newStore(t *testing.T) { + logger := logp.NewLogger(logs.Sourcemap) + + _, err := newStore(nil, logger, -1) require.Error(t, err) - f, err := NewStore(nil, "", 100) + f, err := newStore(nil, logger, 100) require.NoError(t, err) assert.NotNil(t, f.cache) } @@ -49,7 +68,9 @@ func TestStore_Fetch(t *testing.T) { t.Run("cache", func(t *testing.T) { t.Run("nil", func(t *testing.T) { var nilConsumer *sourcemap.Consumer - store := testStore(t, test.ESClientWithValidSourcemap(t)) //if ES was queried, it would return a valid sourcemap + store := testStore(t, newMockElasticsearchClient(t, http.StatusOK, + sourcemapSearchResponseBody(1, []map[string]interface{}{sourcemapHit(validSourcemap)}), + )) store.add(key, nilConsumer) mapper, err := store.Fetch(context.Background(), serviceName, serviceVersion, path) @@ -59,7 +80,7 @@ func TestStore_Fetch(t *testing.T) { t.Run("sourcemapConsumer", func(t *testing.T) { consumer := &sourcemap.Consumer{} - store := testStore(t, test.ESClientUnavailable(t)) //if ES was queried, it would return a server error + store := testStore(t, newUnavailableElasticsearchClient(t)) store.add(key, consumer) mapper, err := store.Fetch(context.Background(), serviceName, serviceVersion, path) @@ -70,7 +91,9 @@ func TestStore_Fetch(t *testing.T) { }) t.Run("validFromES", func(t *testing.T) { - store := testStore(t, test.ESClientWithValidSourcemap(t)) + store := testStore(t, newMockElasticsearchClient(t, http.StatusOK, + sourcemapSearchResponseBody(1, []map[string]interface{}{sourcemapHit(validSourcemap)}), + )) mapper, err := store.Fetch(context.Background(), serviceName, serviceVersion, path) require.NoError(t, err) require.NotNil(t, mapper) @@ -82,8 +105,7 @@ func TestStore_Fetch(t *testing.T) { }) t.Run("notFoundInES", func(t *testing.T) { - - store := testStore(t, test.ESClientWithSourcemapNotFound(t)) + store := testStore(t, newMockElasticsearchClient(t, http.StatusNotFound, sourcemapSearchResponseBody(0, nil))) //not cached cached, found := store.cache.Get(key) require.False(t, found) @@ -102,8 +124,12 @@ func TestStore_Fetch(t *testing.T) { t.Run("invalidFromES", func(t *testing.T) { for name, client := range map[string]elasticsearch.Client{ - "invalid": test.ESClientWithInvalidSourcemap(t), - "unsupportedVersion": test.ESClientWithUnsupportedSourcemap(t), + "invalid": newMockElasticsearchClient(t, http.StatusOK, + sourcemapSearchResponseBody(1, []map[string]interface{}{sourcemapHit("foo")}), + ), + "unsupportedVersion": newMockElasticsearchClient(t, http.StatusOK, + sourcemapSearchResponseBody(1, []map[string]interface{}{sourcemapHit(unsupportedVersionSourcemap)}), + ), } { t.Run(name, func(t *testing.T) { store := testStore(t, client) @@ -126,7 +152,7 @@ func TestStore_Fetch(t *testing.T) { }) t.Run("noConnectionToES", func(t *testing.T) { - store := testStore(t, test.ESClientUnavailable(t)) + store := testStore(t, newUnavailableElasticsearchClient(t)) //not cached _, found := store.cache.Get(key) require.False(t, found) @@ -142,13 +168,156 @@ func TestStore_Fetch(t *testing.T) { }) } +func TestFetchContext(t *testing.T) { + var ( + apikey = "supersecret" + name = "webapp" + version = "1.0.0" + path = "/my/path/to/bundle.js.map" + c = http.DefaultClient + ) + + requestReceived := make(chan struct{}) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + select { + case requestReceived <- struct{}{}: + case <-r.Context().Done(): + return + } + // block until the client cancels the request + <-r.Context().Done() + })) + defer ts.Close() + + fleetCfg := &config.Fleet{ + Hosts: []string{ts.URL}, + Protocol: "https", + AccessAPIKey: apikey, + TLS: nil, + } + cfgs := []config.SourceMapMetadata{ + { + ServiceName: name, + ServiceVersion: version, + BundleFilepath: path, + SourceMapURL: "", + }, + } + b, err := newFleetStore(c, fleetCfg, cfgs) + assert.NoError(t, err) + logger := logp.NewLogger(logs.Sourcemap) + store, err := newStore(b, logger, time.Minute) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + fetchReturned := make(chan error, 1) + go func() { + defer close(fetchReturned) + _, err := store.Fetch(ctx, name, version, path) + fetchReturned <- err + }() + select { + case <-requestReceived: + case <-time.After(10 * time.Second): + t.Fatal("timed out waiting for server to receive request") + } + + // Check that cancelling the context unblocks the request. + cancel() + select { + case err := <-fetchReturned: + assert.True(t, errors.Is(err, context.Canceled)) + case <-time.After(10 * time.Second): + t.Fatal("timed out waiting for Fetch to return") + } +} + +func TestConcurrentFetch(t *testing.T) { + for _, tc := range []struct { + calledWant, errWant, succsWant int64 + }{ + {calledWant: 1, errWant: 0, succsWant: 10}, + {calledWant: 2, errWant: 1, succsWant: 9}, + {calledWant: 4, errWant: 3, succsWant: 7}, + } { + var ( + called, errs, succs int64 + + apikey = "supersecret" + name = "webapp" + version = "1.0.0" + path = "/my/path/to/bundle.js.map" + c = http.DefaultClient + res = fmt.Sprintf(`{"sourceMap":%s}`, validSourcemap) + + errsLeft = tc.errWant + ) + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + atomic.AddInt64(&called, 1) + // Simulate the wait for a network request. + time.Sleep(50 * time.Millisecond) + if errsLeft > 0 { + errsLeft-- + http.Error(w, "err", http.StatusInternalServerError) + return + } + wr := zlib.NewWriter(w) + defer wr.Close() + wr.Write([]byte(res)) + })) + defer ts.Close() + + fleetCfg := &config.Fleet{ + Hosts: []string{ts.URL}, + Protocol: "https", + AccessAPIKey: apikey, + TLS: nil, + } + cfgs := []config.SourceMapMetadata{ + { + ServiceName: name, + ServiceVersion: version, + BundleFilepath: path, + SourceMapURL: "", + }, + } + store, err := NewFleetStore(c, fleetCfg, cfgs, time.Minute) + assert.NoError(t, err) + + var wg sync.WaitGroup + for i := 0; i < int(tc.succsWant+tc.errWant); i++ { + wg.Add(1) + go func() { + consumer, err := store.Fetch(context.Background(), name, version, path) + if err != nil { + atomic.AddInt64(&errs, 1) + } else { + assert.NotNil(t, consumer) + atomic.AddInt64(&succs, 1) + } + + wg.Done() + }() + } + + wg.Wait() + assert.Equal(t, tc.errWant, errs) + assert.Equal(t, tc.calledWant, called) + assert.Equal(t, tc.succsWant, succs) + } +} + func TestStore_Added(t *testing.T) { name, version, path := "foo", "1.0.1", "/tmp" key := "foo_1.0.1_/tmp" // setup // remove empty sourcemap from cache, and valid one with File() == "bundle.js" from Elasticsearch - store := testStore(t, test.ESClientWithValidSourcemap(t)) + store := testStore(t, newMockElasticsearchClient(t, http.StatusOK, + sourcemapSearchResponseBody(1, []map[string]interface{}{sourcemapHit(validSourcemap)}), + )) store.add(key, &sourcemap.Consumer{}) mapper, err := store.Fetch(context.Background(), name, version, path) @@ -157,7 +326,7 @@ func TestStore_Added(t *testing.T) { assert.Equal(t, "", mapper.File()) // remove from cache, afterwards sourcemap should be fetched from ES - store.Added(context.Background(), name, version, path) + store.NotifyAdded(context.Background(), name, version, path) mapper, err = store.Fetch(context.Background(), name, version, path) require.NoError(t, err) assert.NotNil(t, &sourcemap.Consumer{}, mapper) @@ -165,7 +334,7 @@ func TestStore_Added(t *testing.T) { } func TestExpiration(t *testing.T) { - store := testStore(t, test.ESClientUnavailable(t)) //if ES was queried it would return an error + store := testStore(t, newUnavailableElasticsearchClient(t)) //if ES was queried it would return an error store.cache = gocache.New(25*time.Millisecond, 100) store.add("foo_1.0.1_/tmp", &sourcemap.Consumer{}) name, version, path := "foo", "1.0.1", "/tmp" @@ -202,7 +371,7 @@ func TestCleanupInterval(t *testing.T) { } func testStore(t *testing.T, client elasticsearch.Client) *Store { - store, err := NewStore(client, "apm-*sourcemap*", time.Minute) + store, err := NewElasticsearchStore(client, "apm-*sourcemap*", time.Minute) require.NoError(t, err) return store } diff --git a/sourcemap/test/es_client.go b/sourcemap/test/es_client.go deleted file mode 100644 index 1381372815e..00000000000 --- a/sourcemap/test/es_client.go +++ /dev/null @@ -1,152 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package test - -import ( - "net/http" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/elastic/apm-server/elasticsearch" - - "github.com/elastic/apm-server/elasticsearch/estest" -) - -//ValidSourcemap represents an example for a valid sourcemap string -var ValidSourcemap = `{ - "version": 3, - "sources": [ - "webpack:///bundle.js", - "", - "webpack:///./scripts/index.js", - "webpack:///./index.html", - "webpack:///./scripts/app.js" - ], - "names": [ - "modules", - "__webpack_require__", - "moduleId", - "installedModules", - "exports", - "module", - "id", - "loaded", - "call", - "m", - "c", - "p", - "foo", - "console", - "log", - "foobar" - ], - "mappings": "CAAS,SAAUA,GCInB,QAAAC,GAAAC,GAGA,GAAAC,EAAAD,GACA,MAAAC,GAAAD,GAAAE,OAGA,IAAAC,GAAAF,EAAAD,IACAE,WACAE,GAAAJ,EACAK,QAAA,EAUA,OANAP,GAAAE,GAAAM,KAAAH,EAAAD,QAAAC,IAAAD,QAAAH,GAGAI,EAAAE,QAAA,EAGAF,EAAAD,QAvBA,GAAAD,KAqCA,OATAF,GAAAQ,EAAAT,EAGAC,EAAAS,EAAAP,EAGAF,EAAAU,EAAA,GAGAV,EAAA,KDMM,SAASI,EAAQD,EAASH,GE3ChCA,EAAA,GAEAA,EAAA,GAEAW,OFmDM,SAASP,EAAQD,EAASH,GGxDhCI,EAAAD,QAAAH,EAAAU,EAAA,cH8DM,SAASN,EAAQD,GI9DvB,QAAAQ,KACAC,QAAAC,IAAAC,QAGAH", - "file": "bundle.js", - "sourcesContent": [ - "/******/ (function(modules) { // webpackBootstrap\n/******/ \t// The module cache\n/******/ \tvar installedModules = {};\n/******/\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(installedModules[moduleId])\n/******/ \t\t\treturn installedModules[moduleId].exports;\n/******/\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = installedModules[moduleId] = {\n/******/ \t\t\texports: {},\n/******/ \t\t\tid: moduleId,\n/******/ \t\t\tloaded: false\n/******/ \t\t};\n/******/\n/******/ \t\t// Execute the module function\n/******/ \t\tmodules[moduleId].call(module.exports, module, module.exports, __webpack_require__);\n/******/\n/******/ \t\t// Flag the module as loaded\n/******/ \t\tmodule.loaded = true;\n/******/\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/\n/******/\n/******/ \t// expose the modules object (__webpack_modules__)\n/******/ \t__webpack_require__.m = modules;\n/******/\n/******/ \t// expose the module cache\n/******/ \t__webpack_require__.c = installedModules;\n/******/\n/******/ \t// __webpack_public_path__\n/******/ \t__webpack_require__.p = \"\";\n/******/\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(0);\n/******/ })\n/************************************************************************/\n/******/ ([\n/* 0 */\n/***/ function(module, exports, __webpack_require__) {\n\n\t// Webpack\n\t__webpack_require__(1)\n\t\n\t__webpack_require__(2)\n\t\n\tfoo()\n\n\n/***/ },\n/* 1 */\n/***/ function(module, exports, __webpack_require__) {\n\n\tmodule.exports = __webpack_require__.p + \"index.html\"\n\n/***/ },\n/* 2 */\n/***/ function(module, exports) {\n\n\tfunction foo() {\n\t console.log(foobar)\n\t}\n\t\n\tfoo()\n\n\n/***/ }\n/******/ ]);\n\n\n/** WEBPACK FOOTER **\n ** bundle.js\n **/", - " \t// The module cache\n \tvar installedModules = {};\n\n \t// The require function\n \tfunction __webpack_require__(moduleId) {\n\n \t\t// Check if module is in cache\n \t\tif(installedModules[moduleId])\n \t\t\treturn installedModules[moduleId].exports;\n\n \t\t// Create a new module (and put it into the cache)\n \t\tvar module = installedModules[moduleId] = {\n \t\t\texports: {},\n \t\t\tid: moduleId,\n \t\t\tloaded: false\n \t\t};\n\n \t\t// Execute the module function\n \t\tmodules[moduleId].call(module.exports, module, module.exports, __webpack_require__);\n\n \t\t// Flag the module as loaded\n \t\tmodule.loaded = true;\n\n \t\t// Return the exports of the module\n \t\treturn module.exports;\n \t}\n\n\n \t// expose the modules object (__webpack_modules__)\n \t__webpack_require__.m = modules;\n\n \t// expose the module cache\n \t__webpack_require__.c = installedModules;\n\n \t// __webpack_public_path__\n \t__webpack_require__.p = \"\";\n\n \t// Load entry module and return exports\n \treturn __webpack_require__(0);\n\n\n\n/** WEBPACK FOOTER **\n ** webpack/bootstrap 6002740481c9666b0d38\n **/", - "// Webpack\nrequire('../index.html')\n\nrequire('./app')\n\nfoo()\n\n\n\n/*****************\n ** WEBPACK FOOTER\n ** ./scripts/index.js\n ** module id = 0\n ** module chunks = 0\n **/", - "module.exports = __webpack_public_path__ + \"index.html\"\n\n\n/*****************\n ** WEBPACK FOOTER\n ** ./index.html\n ** module id = 1\n ** module chunks = 0\n **/", - "function foo() {\n console.log(foobar)\n}\n\nfoo()\n\n\n\n/*****************\n ** WEBPACK FOOTER\n ** ./scripts/app.js\n ** module id = 2\n ** module chunks = 0\n **/" - ], - "sourceRoot": "" -}` - -// ESClientWithValidSourcemap returns an elasticsearch client that will always return a document containing -// a valid sourcemap. -func ESClientWithValidSourcemap(t *testing.T) elasticsearch.Client { - client, err := estest.NewElasticsearchClient(estest.NewTransport(t, http.StatusOK, validSourcemapFromES())) - require.NoError(t, err) - return client -} - -// ESClientUnavailable returns an elasticsearch client that will always return a client error, mimicking an -// unavailable Elasticsearch server. -func ESClientUnavailable(t *testing.T) elasticsearch.Client { - client, err := estest.NewElasticsearchClient(estest.NewTransport(t, -1, nil)) - require.NoError(t, err) - return client -} - -// ESClientWithInvalidSourcemap returns an elasticsearch client that will always return a document containing -// an invalid sourcemap. -func ESClientWithInvalidSourcemap(t *testing.T) elasticsearch.Client { - client, err := estest.NewElasticsearchClient(estest.NewTransport(t, http.StatusOK, invalidSourcemapFromES())) - require.NoError(t, err) - return client -} - -// ESClientWithUnsupportedSourcemap returns an elasticsearch client that will always return a document containing -// a sourcemap with an unsupported version. -func ESClientWithUnsupportedSourcemap(t *testing.T) elasticsearch.Client { - client, err := estest.NewElasticsearchClient(estest.NewTransport(t, http.StatusOK, sourcemapUnsupportedVersionFromES())) - require.NoError(t, err) - return client -} - -// ESClientWithSourcemapNotFound returns an elasticsearch client that will always return a not found error -func ESClientWithSourcemapNotFound(t *testing.T) elasticsearch.Client { - client, err := estest.NewElasticsearchClient(estest.NewTransport(t, http.StatusNotFound, sourcemapNotFoundFromES())) - require.NoError(t, err) - return client -} - -func validSourcemapFromES() map[string]interface{} { - return map[string]interface{}{ - "hits": map[string]interface{}{ - "total": map[string]interface{}{"value": 1}, - "hits": []map[string]interface{}{ - {"_source": map[string]interface{}{ - "sourcemap": map[string]interface{}{ - "sourcemap": ValidSourcemap}}}}}} -} - -func sourcemapNotFoundFromES() map[string]interface{} { - return map[string]interface{}{ - "hits": map[string]interface{}{ - "total": map[string]interface{}{"value": 0}}} -} - -func invalidSourcemapFromES() map[string]interface{} { - return map[string]interface{}{ - "hits": map[string]interface{}{ - "total": map[string]interface{}{"value": 1}, - "hits": []map[string]interface{}{ - {"_source": map[string]interface{}{ - "sourcemap": map[string]interface{}{ - "sourcemap": "foo"}}}}}} -} - -func sourcemapUnsupportedVersionFromES() map[string]interface{} { - return map[string]interface{}{ - "hits": map[string]interface{}{ - "total": map[string]interface{}{"value": 1}, - "hits": []map[string]interface{}{ - {"_source": map[string]interface{}{ - "sourcemap": map[string]interface{}{ - "sourcemap": `{ - "version": 1, - "sources": ["webpack:///bundle.js"], - "names": [], - "mappings": "CAAS", - "file": "bundle.js", - "sourcesContent": [], - "sourceRoot": "" - }`}}}}}} -} diff --git a/systemtest/agentconfig.go b/systemtest/agentconfig.go new file mode 100644 index 00000000000..e2e0a4dd009 --- /dev/null +++ b/systemtest/agentconfig.go @@ -0,0 +1,117 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package systemtest + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + "strings" + "testing" + + "github.com/elastic/apm-server/systemtest/apmservertest" +) + +// CreateAgentConfig creates or updates agent central config via Kibana. +func CreateAgentConfig(t testing.TB, serviceName, serviceEnvironment, agentName string, settings map[string]string) { + kibanaConfig := apmservertest.DefaultConfig().Kibana + kibanaURL, err := url.Parse(kibanaConfig.Host) + if err != nil { + t.Fatal(err) + } + kibanaURL.User = url.UserPassword(kibanaConfig.Username, kibanaConfig.Password) + kibanaURL.Path = "/api/apm/settings/agent-configuration" + kibanaURL.RawQuery = "overwrite=true" + + var params struct { + AgentName string `json:"agent_name,omitempty"` + Service struct { + Name string `json:"name"` + Environment string `json:"environment,omitempty"` + } `json:"service"` + Settings map[string]string `json:"settings"` + } + params.Service.Name = serviceName + params.Service.Environment = serviceEnvironment + params.AgentName = agentName + params.Settings = settings + + var body bytes.Buffer + if err := json.NewEncoder(&body).Encode(params); err != nil { + t.Fatal(err) + } + + req, err := http.NewRequest("PUT", kibanaURL.String(), &body) + if err != nil { + t.Fatal(err) + } + req.Header.Set("kbn-xsrf", "1") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + body, _ := ioutil.ReadAll(resp.Body) + t.Fatalf("failed to create agent config: %s (%s)", resp.Status, strings.TrimSpace(string(body))) + } +} + +// DeleteAgentConfig deletes agent central config via Kibana. +func DeleteAgentConfig(t testing.TB, serviceName, serviceEnvironment string) { + kibanaConfig := apmservertest.DefaultConfig().Kibana + kibanaURL, err := url.Parse(kibanaConfig.Host) + if err != nil { + t.Fatal(err) + } + kibanaURL.User = url.UserPassword(kibanaConfig.Username, kibanaConfig.Password) + kibanaURL.Path = "/api/apm/settings/agent-configuration" + + var params struct { + Service struct { + Name string `json:"name"` + Environment string `json:"environment,omitempty"` + } `json:"service"` + } + params.Service.Name = serviceName + params.Service.Environment = serviceEnvironment + + var body bytes.Buffer + if err := json.NewEncoder(&body).Encode(params); err != nil { + t.Fatal(err) + } + + req, err := http.NewRequest("DELETE", kibanaURL.String(), &body) + if err != nil { + t.Fatal(err) + } + req.Header.Set("kbn-xsrf", "1") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNotFound { + body, _ := ioutil.ReadAll(resp.Body) + t.Fatalf("failed to create agent config: %s (%s)", resp.Status, strings.TrimSpace(string(body))) + } +} diff --git a/systemtest/agentconfig_test.go b/systemtest/agentconfig_test.go new file mode 100644 index 00000000000..b10b063c04c --- /dev/null +++ b/systemtest/agentconfig_test.go @@ -0,0 +1,131 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package systemtest_test + +import ( + "encoding/json" + "net/http" + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/systemtest" + "github.com/elastic/apm-server/systemtest/apmservertest" +) + +func TestAgentConfig(t *testing.T) { + serviceName := "systemtest_service" + serviceEnvironment := "testing" + systemtest.DeleteAgentConfig(t, serviceName, "") + systemtest.DeleteAgentConfig(t, serviceName, serviceEnvironment) + + // Run apm-server standalone, exercising the Kibana agent config implementation. + srv := apmservertest.NewUnstartedServer(t) + srv.Config.KibanaAgentConfig = &apmservertest.KibanaAgentConfig{CacheExpiration: time.Second} + err := srv.Start() + require.NoError(t, err) + + // Run apm-server under Fleet, exercising the Fleet agent config implementation. + apmIntegration := initAPMIntegration(t, map[string]interface{}{}) + serverURLs := []string{srv.URL, apmIntegration.URL} + + expectChange := func(serverURL string, etag string) (map[string]string, *http.Response) { + t.Helper() + timer := time.NewTimer(time.Minute) + defer timer.Stop() + interval := 100 * time.Millisecond + for { + settings, resp := queryAgentConfig(t, serverURL, serviceName, serviceEnvironment, etag) + if resp.StatusCode == http.StatusOK { + return settings, resp + } + select { + case <-timer.C: + t.Fatal("timed out waiting for agent config change") + case <-time.After(interval): + } + } + } + + // No agent config matching service name/environment initially. + etags := make(map[string]string) + for _, url := range serverURLs { + settings, resp := expectChange(url, "") + assert.Empty(t, settings) + etag := resp.Header.Get("Etag") + assert.Equal(t, `"-"`, etag) + etags[url] = etag + _, resp = queryAgentConfig(t, url, serviceName, serviceEnvironment, etag) + assert.Equal(t, http.StatusNotModified, resp.StatusCode) + } + + // Create an agent config entry matching the service name, and any environment. + configured := map[string]string{"transaction_sample_rate": "0.1", "sanitize_field_names": "foo,bar,baz"} + systemtest.CreateAgentConfig(t, "systemtest_service", "", "", configured) + for _, url := range serverURLs { + settings, resp := expectChange(url, etags[url]) + assert.Equal(t, configured, settings) + etag := resp.Header.Get("Etag") + assert.NotEqual(t, `"-"`, etag) + etags[url] = etag + _, resp = queryAgentConfig(t, url, serviceName, serviceEnvironment, etag) + assert.Equal(t, http.StatusNotModified, resp.StatusCode) + } + + // Create a more specific agent config entry with both service name and environment + // matching the query. This should now take precedence. + configured2 := map[string]string{"transaction_sample_rate": "0.2"} + systemtest.CreateAgentConfig(t, "systemtest_service", "testing", "", configured2) + for _, url := range serverURLs { + settings, resp := expectChange(url, etags[url]) + assert.Equal(t, configured2, settings) + assert.NotEqual(t, etags[url], resp.Header.Get("Etag")) + } +} + +func queryAgentConfig(t testing.TB, serverURL, serviceName, serviceEnvironment, etag string) (map[string]string, *http.Response) { + query := make(url.Values) + query.Set("service.name", serviceName) + if serviceEnvironment != "" { + query.Set("service.environment", serviceEnvironment) + } + url, _ := url.Parse(serverURL + "/config/v1/agents") + url.RawQuery = query.Encode() + + req, _ := http.NewRequest("GET", url.String(), nil) + if etag != "" { + req.Header.Set("If-None-Match", etag) + } + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + attrs := make(map[string]string) + switch resp.StatusCode { + case http.StatusOK: + err = json.NewDecoder(resp.Body).Decode(&attrs) + require.NoError(t, err) + case http.StatusNotModified: + default: + t.Fatalf("unexpected status %q", resp.Status) + } + return attrs, resp +} diff --git a/systemtest/aggregation_test.go b/systemtest/aggregation_test.go index b1dfe59cc6c..c9c38fd3410 100644 --- a/systemtest/aggregation_test.go +++ b/systemtest/aggregation_test.go @@ -19,22 +19,31 @@ package systemtest_test import ( "context" + "encoding/json" "net/http" + "strings" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" "go.elastic.co/apm" "github.com/elastic/apm-server/systemtest" "github.com/elastic/apm-server/systemtest/apmservertest" "github.com/elastic/apm-server/systemtest/estest" + "github.com/elastic/go-elasticsearch/v7/esapi" ) func TestTransactionAggregation(t *testing.T) { systemtest.CleanupElasticsearch(t) srv := apmservertest.NewUnstartedServer(t) + srv.Config.Monitoring = &apmservertest.MonitoringConfig{ + Enabled: true, + MetricsPeriod: 100 * time.Millisecond, + StatePeriod: 100 * time.Millisecond, + } srv.Config.Aggregation = &apmservertest.AggregationConfig{ Transactions: &apmservertest.TransactionAggregationConfig{ Enabled: true, @@ -50,32 +59,59 @@ func TestTransactionAggregation(t *testing.T) { require.NoError(t, err) // Send some transactions to the server to be aggregated. - // - // Mimic a RUM transaction by using the "page-load" transaction type, - // which causes user-agent to be parsed and included in the aggregation - // and added to the document fields. tracer := srv.Tracer() - const chromeUserAgent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36" - for _, transactionType := range []string{"backend", "page-load"} { - tx := tracer.StartTransaction("name", transactionType) - req, _ := http.NewRequest("GET", "/", nil) - req.Header.Set("User-Agent", chromeUserAgent) - tx.Context.SetHTTPRequest(req) - tx.Duration = time.Second - tx.End() + for i, name := range []string{"abc", "def"} { + for j := 0; j < (i+1)*5; j++ { + tx := tracer.StartTransaction(name, "backend") + req, _ := http.NewRequest("GET", "/", nil) + tx.Context.SetHTTPRequest(req) + tx.Duration = time.Second + tx.End() + } } tracer.Flush(nil) - var result estest.SearchResult - _, err = systemtest.Elasticsearch.Search("apm-*").WithQuery(estest.BoolQuery{ - Filter: []interface{}{ - estest.ExistsQuery{Field: "transaction.duration.histogram"}, - }, - }).Do(context.Background(), &result, - estest.WithCondition(result.Hits.NonEmptyCondition()), + result := systemtest.Elasticsearch.ExpectMinDocs(t, 2, "apm-*", + estest.ExistsQuery{Field: "transaction.duration.histogram"}, ) - require.NoError(t, err) systemtest.ApproveEvents(t, t.Name(), result.Hits.Hits, "@timestamp") + + // Make sure apm-server.aggregation.txmetrics metrics are published. Metric values are unit tested. + doc := getBeatsMonitoringStats(t, srv, nil) + assert.True(t, gjson.GetBytes(doc.RawSource, "beats_stats.metrics.apm-server.aggregation.txmetrics").Exists()) + + // Make sure the _doc_count field is added such that aggregations return + // the appropriate per-bucket doc_count values. + result = estest.SearchResult{} + _, err = systemtest.Elasticsearch.Do(context.Background(), &esapi.SearchRequest{ + Index: []string{"apm-*"}, + Body: strings.NewReader(`{ + "size": 0, + "query": {"exists":{"field":"transaction.duration.histogram"}}, + "aggs": { + "transaction_names": { + "terms": {"field": "transaction.name"} + } + } +} +`), + }, &result) + require.NoError(t, err) + require.Contains(t, result.Aggregations, "transaction_names") + + type aggregationBucket struct { + Key string `json:"key"` + DocCount int `json:"doc_count"` + } + var aggregationResult struct { + Buckets []aggregationBucket `json:"buckets"` + } + err = json.Unmarshal(result.Aggregations["transaction_names"], &aggregationResult) + require.NoError(t, err) + assert.Equal(t, []aggregationBucket{ + {Key: "def", DocCount: 10}, + {Key: "abc", DocCount: 5}, + }, aggregationResult.Buckets) } func TestTransactionAggregationShutdown(t *testing.T) { @@ -88,7 +124,7 @@ func TestTransactionAggregationShutdown(t *testing.T) { // a timeout if we were to wait that long. The server // should flush metrics on shutdown without waiting for // the configured interval. - Interval: time.Minute, + Interval: 30 * time.Minute, }, } err := srv.Start() @@ -101,18 +137,19 @@ func TestTransactionAggregationShutdown(t *testing.T) { tx.End() tracer.Flush(nil) + // Wait for the transaction to be indexed, indicating that Elasticsearch + // indices have been setup and we should not risk triggering the shutdown + // timeout while waiting for the aggregated metrics to be indexed. + systemtest.Elasticsearch.ExpectDocs(t, "apm-*", + estest.TermQuery{Field: "processor.event", Value: "transaction"}, + ) + // Stop server to ensure metrics are flushed on shutdown. assert.NoError(t, srv.Close()) - var result estest.SearchResult - _, err = systemtest.Elasticsearch.Search("apm-*").WithQuery(estest.BoolQuery{ - Filter: []interface{}{ - estest.ExistsQuery{Field: "transaction.duration.histogram"}, - }, - }).Do(context.Background(), &result, - estest.WithCondition(result.Hits.NonEmptyCondition()), + result := systemtest.Elasticsearch.ExpectDocs(t, "apm-*", + estest.ExistsQuery{Field: "transaction.duration.histogram"}, ) - require.NoError(t, err) systemtest.ApproveEvents(t, t.Name(), result.Hits.Hits, "@timestamp") } @@ -143,14 +180,8 @@ func TestServiceDestinationAggregation(t *testing.T) { tx.End() tracer.Flush(nil) - var result estest.SearchResult - _, err = systemtest.Elasticsearch.Search("apm-*").WithQuery(estest.BoolQuery{ - Filter: []interface{}{ - estest.ExistsQuery{Field: "span.destination.service.response_time.count"}, - }, - }).Do(context.Background(), &result, - estest.WithCondition(result.Hits.NonEmptyCondition()), + result := systemtest.Elasticsearch.ExpectDocs(t, "apm-*", + estest.ExistsQuery{Field: "span.destination.service.response_time.count"}, ) - require.NoError(t, err) systemtest.ApproveEvents(t, t.Name(), result.Hits.Hits, "@timestamp") } diff --git a/systemtest/apikeycmd_test.go b/systemtest/apikeycmd_test.go index 4c59762ee33..adf0a12ccfa 100644 --- a/systemtest/apikeycmd_test.go +++ b/systemtest/apikeycmd_test.go @@ -19,6 +19,7 @@ package systemtest_test import ( "bytes" + "context" "encoding/json" "io" "net/http" @@ -37,6 +38,10 @@ import ( func apiKeyCommand(subcommand string, args ...string) *apmservertest.ServerCmd { cfg := apmservertest.DefaultConfig() + return apiKeyCommandConfig(cfg, subcommand, args...) +} + +func apiKeyCommandConfig(cfg apmservertest.Config, subcommand string, args ...string) *apmservertest.ServerCmd { cfgargs, err := cfg.Args() if err != nil { panic(err) @@ -72,6 +77,21 @@ func TestAPIKeyCreate(t *testing.T) { es := systemtest.NewElasticsearchClientWithAPIKey(attrs["credentials"].(string)) assertAuthenticateSucceeds(t, es) + + // Check that the API Key has expected metadata. + type apiKey struct { + ID string `json:"id"` + Metadata map[string]interface{} `json:"metadata"` + } + var resp struct { + APIKeys []apiKey `json:"api_keys"` + } + _, err = systemtest.Elasticsearch.Do(context.Background(), &esapi.SecurityGetAPIKeyRequest{ + ID: attrs["id"].(string), + }, &resp) + require.NoError(t, err) + require.Len(t, resp.APIKeys, 1) + assert.Equal(t, map[string]interface{}{"application": "apm"}, resp.APIKeys[0].Metadata) } func TestAPIKeyCreateExpiration(t *testing.T) { @@ -86,6 +106,22 @@ func TestAPIKeyCreateExpiration(t *testing.T) { assert.Contains(t, attrs, "expiration") } +func TestAPIKeyCreateInvalidUser(t *testing.T) { + // heartbeat_user lacks cluster privileges, and cannot create keys + // beats_user has cluster privileges, but not APM application privileges + for _, username := range []string{"heartbeat_user", "beats_user"} { + cfg := apmservertest.DefaultConfig() + cfg.Output.Elasticsearch.Username = username + cfg.Output.Elasticsearch.Password = "changeme" + + cmd := apiKeyCommandConfig(cfg, "create", "--name", t.Name(), "--json") + out, err := cmd.CombinedOutput() + require.Error(t, err) + attrs := decodeJSONMap(t, bytes.NewReader(out)) + assert.Regexp(t, username+` is missing the following requested privilege\(s\): .*`, attrs["error"]) + } +} + func TestAPIKeyInvalidateName(t *testing.T) { systemtest.InvalidateAPIKeys(t) defer systemtest.InvalidateAPIKeys(t) @@ -139,6 +175,75 @@ func TestAPIKeyInvalidateID(t *testing.T) { assertAuthenticateFails(t, es) } +func TestAPIKeyVerify(t *testing.T) { + systemtest.InvalidateAPIKeys(t) + defer systemtest.InvalidateAPIKeys(t) + + cmd := apiKeyCommand("create", "--name", t.Name(), "--json", "--ingest", "--agent-config") + out, err := cmd.CombinedOutput() + require.NoError(t, err) + attrs := decodeJSONMap(t, bytes.NewReader(out)) + credentials := attrs["credentials"].(string) + + cmd = apiKeyCommand("verify", "--json", "--credentials="+credentials) + out, err = cmd.CombinedOutput() + require.NoError(t, err) + attrs = decodeJSONMap(t, bytes.NewReader(out)) + assert.Equal(t, map[string]interface{}{ + "event:write": true, + "config_agent:read": true, + "sourcemap:write": false, + }, attrs) + + cmd = apiKeyCommand("verify", "--json", "--credentials="+credentials, "--ingest") + out, err = cmd.CombinedOutput() + require.NoError(t, err) + attrs = decodeJSONMap(t, bytes.NewReader(out)) + assert.Equal(t, map[string]interface{}{"event:write": true}, attrs) +} + +func TestAPIKeyInfo(t *testing.T) { + systemtest.InvalidateAPIKeys(t) + defer systemtest.InvalidateAPIKeys(t) + + var ids []string + for i := 0; i < 2; i++ { + cmd := apiKeyCommand("create", "--name", t.Name(), "--json", "--ingest", "--agent-config") + out, err := cmd.CombinedOutput() + require.NoError(t, err) + attrs := decodeJSONMap(t, bytes.NewReader(out)) + ids = append(ids, attrs["id"].(string)) + } + + type apiKey struct { + ID string `json:"id"` + Name string `json:"name"` + } + var result struct { + APIKeys []apiKey `json:"api_keys"` + } + + cmd := apiKeyCommand("info", "--json", "--id="+ids[0]) + out, err := cmd.CombinedOutput() + require.NoError(t, err) + err = json.Unmarshal(out, &result) + require.NoError(t, err) + assert.Equal(t, []apiKey{{ + ID: ids[0], + Name: t.Name(), + }}, result.APIKeys) + + result.APIKeys = nil + cmd = apiKeyCommand("info", "--json", "--name="+t.Name()) + out, err = cmd.CombinedOutput() + require.NoError(t, err) + err = json.Unmarshal(out, &result) + require.NoError(t, err) + // Should be at least 2, possibly more; Elasticsearch may + // hold invalidated keys from previous test runs. + assert.GreaterOrEqual(t, len(result.APIKeys), 2) +} + func assertAuthenticateSucceeds(t testing.TB, es *estest.Client) *esapi.Response { t.Helper() resp, err := es.Security.Authenticate() diff --git a/systemtest/apmservertest/cert.pem b/systemtest/apmservertest/cert.pem new file mode 100644 index 00000000000..07457e03e40 --- /dev/null +++ b/systemtest/apmservertest/cert.pem @@ -0,0 +1,14 @@ +-----BEGIN CERTIFICATE----- +MIICEzCCAXygAwIBAgIQMIMChMLGrR+QvmQvpwAU6zANBgkqhkiG9w0BAQsFADAS +MRAwDgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYw +MDAwWjASMRAwDgYDVQQKEwdBY21lIENvMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB +iQKBgQDuLnQAI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9SjY1bIw4 +iA5sBBZzHi3z0h1YV8QPuxEbi4nW91IJm2gsvvZhIrCHS3l6afab4pZBl2+XsDul +rKBxKKtD1rGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTvqJQIDAQABo2gwZjAO +BgNVHQ8BAf8EBAMCAqQwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0TAQH/BAUw +AwEB/zAuBgNVHREEJzAlggtleGFtcGxlLmNvbYcEfwAAAYcQAAAAAAAAAAAAAAAA +AAAAATANBgkqhkiG9w0BAQsFAAOBgQCEcetwO59EWk7WiJsG4x8SY+UIAA+flUI9 +tyC4lNhbcF2Idq9greZwbYCqTTTr2XiRNSMLCOjKyI7ukPoPjo16ocHj+P3vZGfs +h1fIw3cSS2OolhloGw/XM6RWPWtPAlGykKLciQrBru5NAPvCMsb/I1DAceTiotQM +fblo6RBxUQ== +-----END CERTIFICATE----- diff --git a/systemtest/apmservertest/client_cert.pem b/systemtest/apmservertest/client_cert.pem new file mode 100644 index 00000000000..384731767a9 --- /dev/null +++ b/systemtest/apmservertest/client_cert.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC4zCCAcugAwIBAgIQdU3dyZoPmjTgIWgqymIYRjANBgkqhkiG9w0BAQsFADAS +MRAwDgYDVQQKEwdBY21lIENvMB4XDTIxMDkwOTAzMzIwMVoXDTMxMDkwNzAzMzIw +MVowEjEQMA4GA1UEChMHQWNtZSBDbzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAOeRwMdUZfLeMO+4sULM6a/hjiGhaGdESOueZfkxGZjXQa2905bNQiE/ +3MbtTGTI7gvX6BfsAKXs4UQZVwsdyuaCB4FnVL6j+nZ+QxEAMosOmbU0aH42PEIW +GrHV1GcKmATBrszqVkPwe860sM4Lc3TQpbk4l67Ku61vriyQUVrlQkM1SO1FRl+e +NyodLyoTGHRtP59EvpyQibhW/jaSbWyzjG3fF1PCjSgVRpRABNgNvYMRPA8EZcXg +YO8vguvbYHdUmBlCK62GM/hRFMO6RokGY+H63FoqLQC6Z+GRSbOGwpn1jFwy9Mcr +pfEjHzMbH4B5Ocq/Q8v0jYijfAdeaMkCAwEAAaM1MDMwDgYDVR0PAQH/BAQDAgWg +MBMGA1UdJQQMMAoGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQEL +BQADggEBANTk9UEPHpJxUH6+2BUM+ppUBoB+buJamr1WqsEhp8Y83DYeDVOTGGVT +pkqSwhqw6gj1HTMefWgTRcenBwE2wgRnjR9ap1cI1Z6ILZJGFU8secfk+PEx4p+O +uyis7bgKUkyVbacd0hNV5C3xBXdzj8k/1sqc1EoSd53ywkY/zPGPkNAn1Al46GMo +V5Lwf+OoWjjBa3UPTdX3C9W7w3xv5KUJmjNEeA6yDj9XO4GPVUMV+PFffQgZIClg +DGBPUZQSCRcaFAagOOIp08zP9NowAHLG9AS/eHAl3s9NruHOkXWJUJiyQsNaSYna +uDvR7s54/pnBX+VU2y9Bqwsz8yq0rWo= +-----END CERTIFICATE----- diff --git a/systemtest/apmservertest/client_key.pem b/systemtest/apmservertest/client_key.pem new file mode 100644 index 00000000000..2da6e97b900 --- /dev/null +++ b/systemtest/apmservertest/client_key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDnkcDHVGXy3jDv +uLFCzOmv4Y4hoWhnREjrnmX5MRmY10GtvdOWzUIhP9zG7UxkyO4L1+gX7ACl7OFE +GVcLHcrmggeBZ1S+o/p2fkMRADKLDpm1NGh+NjxCFhqx1dRnCpgEwa7M6lZD8HvO +tLDOC3N00KW5OJeuyrutb64skFFa5UJDNUjtRUZfnjcqHS8qExh0bT+fRL6ckIm4 +Vv42km1ss4xt3xdTwo0oFUaUQATYDb2DETwPBGXF4GDvL4Lr22B3VJgZQiuthjP4 +URTDukaJBmPh+txaKi0AumfhkUmzhsKZ9YxcMvTHK6XxIx8zGx+AeTnKv0PL9I2I +o3wHXmjJAgMBAAECggEBAItDL3Fww0L09u8j3o6y1xLsDeMRw8ze1w/EGcVsDt3J +DnmjQH1fgZanZmnHyfqOkAJVXQIN7zHaLHsc1ikcSKqppkPfVaq0yOq78i9mM/9G +z4+XLgRBL6n6yyEJ3Iv/Zf7uT+ZyQ2DmXj6oke/IGS42UIP8kdQPniCmmlO4kwSa +2IagT38c8StxC4n+Ub2j1SPacN9A55Y6wehczukjy8niTa8BL6Jps94hvsL6sC9f +6kBke9hABV+VZMQrIz12lkJqFyrpVsTmyNQc4u4Cp14fVq2jj0ndapIgQWUiDGKS +5sVYLBgL1SfntSsNoIdQtJOoYgWrEyx5IEpHZxsOpBECgYEA8EBXa8bIcDK/FXHS ++UIavBXULH9h+sjF3SerqnISIJ+wNkk+yNtqa3AwVW9UNJz67yNPjgs4OJKhCgv8 +JRDt0ZZid43DYnCLxyYuj4+pCuO61R5yF8hZ6Tm4FRTZCJjBHsfwZBP1iFT+RAw2 +IoHW+WeXSksutMoPYmmThvjMjRUCgYEA9r+4F8E+BMAvHX29889eYLKL0c2ZGkJ4 +PbjCnHUAGIqkrTD7LACF3rNh8EaQYLMAm9NSr6qXe4RN1AQjl+S5Wb2QQ87XiKCw +1bo25VddlumxWNv7KEoZw1DhPUjf+EZn3WFb9TvrNgepU1pPenLdOYW36AqVVxeW +bV73HDOCoeUCgYAetnr2gjD5duNKoIRx7OtyA/Bdx+MEpkn1Ha8qWO7MUN278t0d +3D7cgTkMkvDW0QKbM45mADUttkD0Adg3/NQKKQlpcyv5F54m1VC6dYkMpVQrDntT +Yv9rez93InUkTjLR9olaodqPlR30IQOCocqTk5SZoMbVwcb5s7lyVbgAiQKBgQCt +mhOOewIEW6aKbDZhvAsdo2Dl+m32YL+yRTR+2X54xcq4o1aEDdYh6jlE0JCxyB7s +g1CQkUhEub/8I/Fa46xYbBt86aEQ2mWv6XTIBKCvFboV16esH/5iQZxG/ue9COP4 +iJAnSMHi4J8MKuTyNLBa0S6jzhZmEQ1f48meCMum1QKBgF4ffOu8SrRFR2bs9729 +luu/f9c1Y4suq4KWofGx4qBT0QFdbhhfoBO7tVZNpi6z4OhsipgC79vWkGhPq2Vl +XcFXX+NGsss32vmGAHN6Amxjh8X5XncHS6L3Zw2vFlSAe9QN1/uEDJjl65vLxDFt +ReWMJTaS5Pr6ZrC5/Tsa7S/t +-----END PRIVATE KEY----- diff --git a/systemtest/apmservertest/command.go b/systemtest/apmservertest/command.go index 4ade8c42dcd..aa042486bb2 100644 --- a/systemtest/apmservertest/command.go +++ b/systemtest/apmservertest/command.go @@ -33,7 +33,7 @@ import ( // ServerCommand returns a ServerCmd (wrapping os/exec) for running // apm-server with args. func ServerCommand(subcommand string, args ...string) *ServerCmd { - binary, buildErr := buildServer() + binary, buildErr := BuildServerBinary(runtime.GOOS) if buildErr != nil { // Dummy command; Start etc. will return the build error. binary = "/usr/bin/false" @@ -149,27 +149,37 @@ func (c *ServerCmd) cleanup() { } } -// buildServer builds the apm-server binary, returning its absolute path. -func buildServer() (string, error) { +// BuildServerBinary builds the apm-server binary for the given GOOS, +// returning its absolute path. +func BuildServerBinary(goos string) (string, error) { + // Build apm-server binary in the repo root, unless + // we're building for another GOOS, in which case we + // suffix the binary with that GOOS and place it in + // the build directory. + var reldir, suffix string + if goos != runtime.GOOS { + reldir = "build/" + suffix = "-" + goos + if runtime.GOOS == "windows" { + suffix += ".exe" + } + } + apmServerBinaryMu.Lock() defer apmServerBinaryMu.Unlock() - if apmServerBinary != "" { - return apmServerBinary, nil + if binary := apmServerBinary[goos]; binary != "" { + return binary, nil } - // Build apm-server binary in the repo root. - output, err := exec.Command("go", "list", "-m", "-f={{.Dir}}/..").Output() + repoRoot, err := getRepoRoot() if err != nil { return "", err } - repoRoot := filepath.Clean(strings.TrimSpace(string(output))) - abspath := filepath.Join(repoRoot, "apm-server") - if runtime.GOOS == "windows" { - abspath += ".exe" - } + abspath := filepath.Join(repoRoot, reldir, "apm-server"+suffix) log.Println("Building apm-server...") cmd := exec.Command("go", "build", "-o", abspath, "./x-pack/apm-server") + cmd.Env = append(os.Environ(), "GOOS="+goos) cmd.Dir = repoRoot cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr @@ -177,11 +187,30 @@ func buildServer() (string, error) { return "", err } log.Println("Built", abspath) - apmServerBinary = abspath - return apmServerBinary, nil + apmServerBinary[goos] = abspath + return abspath, nil +} + +func getRepoRoot() (string, error) { + repoRootMu.Lock() + defer repoRootMu.Unlock() + if repoRoot != "" { + return repoRoot, nil + } + + // Build apm-server binary in the repo root. + output, err := exec.Command("go", "list", "-m", "-f={{.Dir}}/..").Output() + if err != nil { + return "", err + } + repoRoot = filepath.Clean(strings.TrimSpace(string(output))) + return repoRoot, nil } var ( apmServerBinaryMu sync.Mutex - apmServerBinary string + apmServerBinary = make(map[string]string) + + repoRootMu sync.Mutex + repoRoot string ) diff --git a/systemtest/apmservertest/config.go b/systemtest/apmservertest/config.go index bb067200b2d..26ace9512ec 100644 --- a/systemtest/apmservertest/config.go +++ b/systemtest/apmservertest/config.go @@ -21,6 +21,7 @@ import ( "encoding/json" "fmt" "net" + "net/http" "net/url" "os" "sort" @@ -41,11 +42,21 @@ const ( // Config holds APM Server configuration. type Config struct { - SecretToken string `json:"apm-server.secret_token,omitempty"` - Jaeger *JaegerConfig `json:"apm-server.jaeger,omitempty"` - Kibana *KibanaConfig `json:"apm-server.kibana,omitempty"` - Aggregation *AggregationConfig `json:"apm-server.aggregation,omitempty"` - Sampling *SamplingConfig `json:"apm-server.sampling,omitempty"` + Jaeger *JaegerConfig `json:"apm-server.jaeger,omitempty"` + Kibana *KibanaConfig `json:"apm-server.kibana,omitempty"` + Aggregation *AggregationConfig `json:"apm-server.aggregation,omitempty"` + Sampling *SamplingConfig `json:"apm-server.sampling,omitempty"` + RUM *RUMConfig `json:"apm-server.rum,omitempty"` + DataStreams *DataStreamsConfig `json:"apm-server.data_streams,omitempty"` + DefaultServiceEnvironment string `json:"apm-server.default_service_environment,omitempty"` + KibanaAgentConfig *KibanaAgentConfig `json:"apm-server.agent.config,omitempty"` + TLS *TLSConfig `json:"apm-server.ssl,omitempty"` + + // AgentAuth holds configuration for APM agent authorization. + AgentAuth AgentAuthConfig `json:"apm-server.auth"` + + // ResponseHeaders holds headers to add to all APM Server HTTP responses. + ResponseHeaders http.Header `json:"apm-server.response_headers,omitempty"` // Instrumentation holds configuration for libbeat and apm-server instrumentation. Instrumentation *InstrumentationConfig `json:"instrumentation,omitempty"` @@ -62,7 +73,7 @@ type Config struct { Output OutputConfig `json:"output"` // Setup holds configuration for libbeat setup. - Setup SetupConfig `json:"setup"` + Setup *SetupConfig `json:"setup,omitempty"` // Queue holds configuration for the libbeat event queue. Queue QueueConfig `json:"queue"` @@ -74,6 +85,35 @@ func (cfg Config) Args() ([]string, error) { return configArgs(cfg, nil) } +// TLSConfig holds configuration to TLS encryption of agent/server communication. +type TLSConfig struct { + // ClientAuthentication controls whether TLS client authentication is + // enabled, and optional or required. If this is non-empty, then + // `apm-server.ssl.certificate_authorities` will be set to the server's + // self-signed certificate path. + ClientAuthentication string `json:"client_authentication,omitempty"` + + CipherSuites []string `json:"cipher_suites,omitempty"` + SupportedProtocols []string `json:"supported_protocols,omitempty"` +} + +// KibanaAgentConfig holds configuration related to the Kibana-based +// implementation of agent configuration. +type KibanaAgentConfig struct { + CacheExpiration time.Duration +} + +func (c *KibanaAgentConfig) MarshalJSON() ([]byte, error) { + // time.Duration is encoded as int64. + // Convert time.Durations to durations, to encode as duration strings. + type config struct { + CacheExpiration string `json:"cache.expiration,omitempty"` + } + return json.Marshal(config{ + CacheExpiration: durationString(c.CacheExpiration), + }) +} + // LoggingConfig holds APM Server logging configuration. type LoggingConfig struct { Files FileLoggingConfig `json:"files"` @@ -103,24 +143,179 @@ type JaegerConfig struct { // SamplingConfig holds APM Server trace sampling configuration. type SamplingConfig struct { - KeepUnsampled bool `json:"keep_unsampled"` + KeepUnsampled bool `json:"keep_unsampled"` + Tail *TailSamplingConfig `json:"tail,omitempty"` +} + +// TailSamplingConfig holds APM Server tail-based sampling configuration. +type TailSamplingConfig struct { + Enabled bool + Interval time.Duration + Policies []TailSamplingPolicy +} + +func (t *TailSamplingConfig) MarshalJSON() ([]byte, error) { + // time.Duration is encoded as int64. + // Convert time.Durations to durations, to encode as duration strings. + type config struct { + Enabled bool `json:"enabled"` + Interval string `json:"interval"` + Policies []TailSamplingPolicy `json:"policies,omitempty"` + } + return json.Marshal(config{ + Enabled: t.Enabled, + Interval: durationString(t.Interval), + Policies: t.Policies, + }) +} + +// TailSamplingPolicy holds an APM Server tail-based sampling policy. +type TailSamplingPolicy struct { + ServiceName string `json:"service.name,omitempty"` + ServiceEnvironment string `json:"service.environment,omitempty"` + TraceName string `json:"trace.name,omitempty"` + TraceOutcome string `json:"trace.outcome,omitempty"` + SampleRate float64 `json:"sample_rate"` +} + +// RUMConfig holds APM Server RUM configuration. +type RUMConfig struct { + Enabled bool `json:"enabled"` + + // AllowOrigins holds a list of allowed origins for RUM. + AllowOrigins []string `json:"allow_origins,omitempty"` + + // AllowHeaders holds a list of Access-Control-Allow-Headers for RUM. + AllowHeaders []string `json:"allow_headers,omitempty"` + + // AllowServiceNames holds a list of exclusively allowed service names for + // RUM events. + AllowServiceNames []string `json:"allow_service_names,omitempty"` + + // ResponseHeaders holds headers to add to all APM Server RUM HTTP responses. + ResponseHeaders http.Header `json:"response_headers,omitempty"` + + Sourcemap *RUMSourcemapConfig `json:"source_mapping,omitempty"` +} + +// RUMSourcemapConfig holds APM Server RUM sourcemap configuration. +type RUMSourcemapConfig struct { + Enabled bool `json:"enabled,omitempty"` + Cache *RUMSourcemapCacheConfig `json:"cache,omitempty"` +} + +// RUMSourcemapCacheConfig holds sourcemap cache expiration. +type RUMSourcemapCacheConfig struct { + Expiration time.Duration `json:"expiration,omitempty"` +} + +// DataStreamsConfig holds APM Server data streams configuration. +type DataStreamsConfig struct { + Enabled bool `json:"enabled"` + WaitForIntegration *bool `json:"wait_for_integration,omitempty"` +} + +// APIKeyConfig holds agent auth configuration. +type AgentAuthConfig struct { + SecretToken string `json:"secret_token,omitempty"` + APIKey *APIKeyAuthConfig `json:"api_key,omitempty"` + Anonymous *AnonymousAuthConfig `json:"anonymous,omitempty"` +} + +// APIKeyAuthConfig holds API Key agent auth configuration. +type APIKeyAuthConfig struct { + Enabled bool `json:"enabled"` +} + +// AnonymousAuthConfig holds anonymous agent auth configuration. +type AnonymousAuthConfig struct { + Enabled bool `json:"enabled"` + AllowAgent []string `json:"allow_agent,omitempty"` + AllowService []string `json:"allow_service,omitempty"` + RateLimit *RateLimitConfig `json:"rate_limit,omitempty"` +} + +// RateLimitConfig holds event rate limit configuration. +type RateLimitConfig struct { + IPLimit int `json:"ip_limit,omitempty"` + EventLimit int `json:"event_limit,omitempty"` } // InstrumentationConfig holds APM Server instrumentation configuration. type InstrumentationConfig struct { - Enabled bool `json:"enabled"` + Enabled bool `json:"enabled"` + Profiling *ProfilingConfig `json:"profiling,omitempty"` + + Hosts []string `json:"hosts,omitempty"` + APIKey string `json:"api_key,omitempty"` + SecretToken string `json:"secret_token,omitempty"` +} + +// ProfilingConfig holds APM Server profiling configuration. +type ProfilingConfig struct { + CPU *CPUProfilingConfig `json:"cpu,omitempty"` + Heap *HeapProfilingConfig `json:"heap,omitempty"` +} + +// CPUProfilingConfig holds APM Server profiling configuration. +type CPUProfilingConfig struct { + Enabled bool `json:"enabled"` + Interval time.Duration `json:"interval,omitempty"` + Duration time.Duration `json:"duration,omitempty"` +} + +func (c *CPUProfilingConfig) MarshalJSON() ([]byte, error) { + // time.Duration is encoded as int64. + // Convert time.Durations to durations, to encode as duration strings. + type config struct { + Enabled bool `json:"enabled"` + Interval string `json:"interval,omitempty"` + Duration string `json:"duration,omitempty"` + } + return json.Marshal(config{ + Enabled: c.Enabled, + Interval: durationString(c.Interval), + Duration: durationString(c.Duration), + }) +} + +// HeapProfilingConfig holds APM Server profiling configuration. +type HeapProfilingConfig struct { + Enabled bool `json:"enabled"` + Interval time.Duration `json:"interval,omitempty"` +} + +func (c *HeapProfilingConfig) MarshalJSON() ([]byte, error) { + // time.Duration is encoded as int64. + // Convert time.Durations to durations, to encode as duration strings. + type config struct { + Enabled bool `json:"enabled"` + Interval string `json:"interval,omitempty"` + } + return json.Marshal(config{ + Enabled: c.Enabled, + Interval: durationString(c.Interval), + }) } // OutputConfig holds APM Server libbeat output configuration. type OutputConfig struct { - Elasticsearch *ElasticsearchOutputConfig `json:"elasticsearch"` + Console *ConsoleOutputConfig `json:"console,omitempty"` + Elasticsearch *ElasticsearchOutputConfig `json:"elasticsearch,omitempty"` +} + +// ConsolehOutputConfig holds APM Server libbeat console output configuration. +type ConsoleOutputConfig struct { + Enabled bool `json:"enabled"` } // ElasticsearchOutputConfig holds APM Server libbeat Elasticsearch output configuration. type ElasticsearchOutputConfig struct { + Enabled bool `json:"enabled"` Hosts []string `json:"hosts,omitempty"` Username string `json:"username,omitempty"` Password string `json:"password,omitempty"` + APIKey string `json:"api_key,omitempty"` } // SetupConfig holds APM Server libbeat setup configuration. @@ -151,14 +346,14 @@ func (m *MemoryQueueConfig) MarshalJSON() ([]byte, error) { // time.Duration is encoded as int64. // Convert time.Durations to durations, to encode as duration strings. type config struct { - Events int `json:"events"` - FlushMinEvents int `json:"flush.min_events"` - FlushTimeout duration `json:"flush.timeout"` + Events int `json:"events"` + FlushMinEvents int `json:"flush.min_events"` + FlushTimeout string `json:"flush.timeout,omitempty"` } return json.Marshal(config{ Events: m.Events, FlushMinEvents: m.FlushMinEvents, - FlushTimeout: duration(m.FlushTimeout), + FlushTimeout: durationString(m.FlushTimeout), }) } @@ -176,14 +371,14 @@ func (m *MonitoringConfig) MarshalJSON() ([]byte, error) { type config struct { Enabled bool `json:"enabled"` Elasticsearch *ElasticsearchOutputConfig `json:"elasticsearch,omitempty"` - MetricsPeriod duration `json:"elasticsearch.metrics.period,omitempty"` - StatePeriod duration `json:"elasticsearch.state.period,omitempty"` + MetricsPeriod string `json:"elasticsearch.metrics.period,omitempty"` + StatePeriod string `json:"elasticsearch.state.period,omitempty"` } return json.Marshal(config{ Enabled: m.Enabled, Elasticsearch: m.Elasticsearch, - MetricsPeriod: duration(m.MetricsPeriod), - StatePeriod: duration(m.StatePeriod), + MetricsPeriod: durationString(m.MetricsPeriod), + StatePeriod: durationString(m.StatePeriod), }) } @@ -203,12 +398,12 @@ func (m *TransactionAggregationConfig) MarshalJSON() ([]byte, error) { // time.Duration is encoded as int64. // Convert time.Durations to durations, to encode as duration strings. type config struct { - Enabled bool `json:"enabled"` - Interval duration `json:"interval,omitempty"` + Enabled bool `json:"enabled"` + Interval string `json:"interval,omitempty"` } return json.Marshal(config{ Enabled: m.Enabled, - Interval: duration(m.Interval), + Interval: durationString(m.Interval), }) } @@ -222,19 +417,20 @@ func (s *ServiceDestinationAggregationConfig) MarshalJSON() ([]byte, error) { // time.Duration is encoded as int64. // Convert time.Durations to durations, to encode as duration strings. type config struct { - Enabled bool `json:"enabled"` - Interval duration `json:"interval,omitempty"` + Enabled bool `json:"enabled"` + Interval string `json:"interval,omitempty"` } return json.Marshal(config{ Enabled: s.Enabled, - Interval: duration(s.Interval), + Interval: durationString(s.Interval), }) } -type duration time.Duration - -func (d duration) MarshalText() (text []byte, err error) { - return []byte(time.Duration(d).String()), nil +func durationString(d time.Duration) string { + if d == 0 { + return "" + } + return d.String() } func configArgs(cfg Config, extra map[string]interface{}) ([]string, error) { @@ -294,23 +490,14 @@ func DefaultConfig() Config { Scheme: "http", Host: net.JoinHostPort( getenvDefault("KIBANA_HOST", defaultKibanaHost), - getenvDefault("KIBANA_PORT", defaultKibanaPort), + KibanaPort(), ), }).String(), Username: getenvDefault("KIBANA_USER", defaultKibanaUser), Password: getenvDefault("KIBANA_PASS", defaultKibanaPass), }, - Output: OutputConfig{ - Elasticsearch: &ElasticsearchOutputConfig{ - Hosts: []string{net.JoinHostPort( - getenvDefault("ES_HOST", defaultElasticsearchHost), - getenvDefault("ES_PORT", defaultElasticsearchPort), - )}, - Username: getenvDefault("ES_USER", defaultElasticsearchUser), - Password: getenvDefault("ES_PASS", defaultElasticsearchPass), - }, - }, - Setup: SetupConfig{ + Output: defaultOutputConfig(), + Setup: &SetupConfig{ IndexTemplate: IndexTemplateConfig{ Shards: 1, RefreshInterval: "250ms", @@ -325,6 +512,44 @@ func DefaultConfig() Config { } } +// defaultOutputConfig enables overriding the default output, and is used to +// default to console output in tests for apmservertest itself. This is needed +// to avoid interacting with Elasticsearch, which would cause systemtest tests +// to fail as they assume sole access to Elasticsearch. +func defaultOutputConfig() OutputConfig { + var outputConfig OutputConfig + switch v := os.Getenv("APMSERVERTEST_DEFAULT_OUTPUT"); v { + case "console": + outputConfig.Console = &ConsoleOutputConfig{Enabled: true} + case "": + outputConfig.Elasticsearch = &ElasticsearchOutputConfig{ + Enabled: true, + Hosts: []string{net.JoinHostPort( + getenvDefault("ES_HOST", defaultElasticsearchHost), + ElasticsearchPort(), + )}, + Username: getenvDefault("ES_USER", defaultElasticsearchUser), + Password: getenvDefault("ES_PASS", defaultElasticsearchPass), + } + default: + panic("APMSERVERTEST_DEFAULT_OUTPUT has unexpected value: " + v) + } + return outputConfig +} + +// KibanaPort returns the Kibana port, configured using +// KIBANA_PORT, or otherwise returning the default of 5601. +func KibanaPort() string { + return getenvDefault("KIBANA_PORT", defaultKibanaPort) +} + +// ElasticsearchPort returns the Elasticsearch REST API port, +// configured using ES_PORT, or otherwise returning the default +// of 9200. +func ElasticsearchPort() string { + return getenvDefault("ES_PORT", defaultElasticsearchPort) +} + func getenvDefault(k, defaultv string) string { v := os.Getenv(k) if v == "" { diff --git a/systemtest/apmservertest/filter.go b/systemtest/apmservertest/filter.go index 079976cf627..d41926a9394 100644 --- a/systemtest/apmservertest/filter.go +++ b/systemtest/apmservertest/filter.go @@ -29,7 +29,7 @@ import ( "go.elastic.co/fastjson" ) -// TODO(axw) move EventMetadata and filteringTransport to go.elastic.co/apmtest, +// TODO(axw) move EventMetadata and FilteringTransport to go.elastic.co/apmtest, // generalising filteringTransport to work with arbitrary base transports. To do // that we would need to dynamically check for optional interfaces supported by // the base transport, and create passthrough methods. @@ -47,14 +47,22 @@ type EventMetadataFilter interface { FilterEventMetadata(*EventMetadata) } -type filteringTransport struct { +// FilteringTransport is a transport for the APM Go agent which modifies events +// prior to sending them to the underlying transport. +type FilteringTransport struct { *transport.HTTPTransport filter EventMetadataFilter } +// NewFilteringTransport returns a new FilteringTransport that filters events +// using f, and sends them on to h. +func NewFilteringTransport(h *transport.HTTPTransport, f EventMetadataFilter) *FilteringTransport { + return &FilteringTransport{h, f} +} + // SendStream decodes metadata from reader, passes it through the filters, // and then sends the modified stream to the underlying transport. -func (t *filteringTransport) SendStream(ctx context.Context, stream io.Reader) error { +func (t *FilteringTransport) SendStream(ctx context.Context, stream io.Reader) error { zr, err := zlib.NewReader(stream) if err != nil { return err @@ -98,16 +106,21 @@ func (t *filteringTransport) SendStream(ctx context.Context, stream io.Reader) e return t.HTTPTransport.SendStream(ctx, &buf) } -type defaultMetadataFilter struct{} +// DefaultMetadataFilter implements EventMetadataFilter, setting some default values +// for fields that would otherwise by dynamically discovered. +type DefaultMetadataFilter struct{} -func (defaultMetadataFilter) FilterEventMetadata(m *EventMetadata) { +// FilterEventMetadata updates m with default values for dynamically discovered fields. +func (DefaultMetadataFilter) FilterEventMetadata(m *EventMetadata) { m.System.Platform = "minix" m.System.Architecture = "i386" m.System.Container = nil m.System.Kubernetes = nil m.System.Hostname = "beowulf" + m.Process.Argv = nil m.Process.Pid = 1 m.Process.Ppid = nil + m.Process.Title = "systemtest.test" m.Service.Agent.Version = "0.0.0" m.Service.Language.Version = "2.0" m.Service.Runtime.Version = "2.0" diff --git a/systemtest/apmservertest/key.pem b/systemtest/apmservertest/key.pem new file mode 100644 index 00000000000..d2a8e48416f --- /dev/null +++ b/systemtest/apmservertest/key.pem @@ -0,0 +1,15 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICXgIBAAKBgQDuLnQAI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9 +SjY1bIw4iA5sBBZzHi3z0h1YV8QPuxEbi4nW91IJm2gsvvZhIrCHS3l6afab4pZB +l2+XsDulrKBxKKtD1rGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTvqJQIDAQAB +AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet +3Zm4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb +uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H +qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz8vp +jy4SHEg1AkEA/v13/5M47K9vCxmb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY +fFUtxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4b3sjXNueLKH85Q+phy2U +fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xlp/DoCzjA0CQQDU +y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6PYj013sovGKUFfYAqVXVlxtIX +qyUBnu3X9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYZHOde3JEMhNRcVFMO8dJDaFeo +f9Oeos0UUothgiDktdQHxdNEwLjQf7lJJBzV+5OtwswCWA== +-----END RSA PRIVATE KEY----- diff --git a/systemtest/apmservertest/logs.go b/systemtest/apmservertest/logs.go index ba327610642..6c4392079c9 100644 --- a/systemtest/apmservertest/logs.go +++ b/systemtest/apmservertest/logs.go @@ -160,7 +160,8 @@ type LogEntry struct { Timestamp time.Time Level zapcore.Level Logger string - Caller string + File string + Line int Message string Fields map[string]interface{} } diff --git a/systemtest/apmservertest/server.go b/systemtest/apmservertest/server.go index bfbc3dfc4b4..848dd7dc480 100644 --- a/systemtest/apmservertest/server.go +++ b/systemtest/apmservertest/server.go @@ -19,6 +19,8 @@ package apmservertest import ( "bytes" + "crypto/tls" + "crypto/x509" "encoding/json" "errors" "fmt" @@ -29,6 +31,7 @@ import ( "net/url" "os" "os/exec" + "path/filepath" "strings" "testing" "time" @@ -56,11 +59,15 @@ type Server struct { // The temporary directory will be removed when the server is closed. Dir string - // BeatUUID will be populated with the server's Beat UUID after the - // Start returns successfully. This can be used to search for documents + // BeatUUID will be populated with the server's Beat UUID after Start + // returns successfully. This can be used to search for documents // corresponding to this test server instance. BeatUUID string + // Version will be populated with the servers' version number after + // Start returns successfully. + Version string + // Logs provides access to the apm-server log entries. Logs LogEntries @@ -68,7 +75,7 @@ type Server struct { Stderr io.ReadCloser // URL holds the base URL for Elastic APM agents, in the form - // http://ipaddr:port with no trailing slash. + // http[s]://ipaddr:port with no trailing slash. URL string // JaegerGRPCAddr holds the address for the Jaeger gRPC server, if enabled. @@ -77,6 +84,10 @@ type Server struct { // JaegerHTTPURL holds the base URL for Jaeger HTTP, if enabled. JaegerHTTPURL string + // TLS is optional TLS client configuration, populated with a new config + // after TLS is started. + TLS *tls.Config + // EventMetadataFilter holds an optional EventMetadataFilter, which // can modify event metadata before it is sent to the server. // @@ -106,7 +117,7 @@ func NewServer(tb testing.TB, args ...string) *Server { func NewUnstartedServer(tb testing.TB, args ...string) *Server { return &Server{ Config: DefaultConfig(), - EventMetadataFilter: defaultMetadataFilter{}, + EventMetadataFilter: DefaultMetadataFilter{}, tb: tb, args: args, } @@ -117,19 +128,41 @@ func NewUnstartedServer(tb testing.TB, args ...string) *Server { // // Start will have set s.URL upon a successful return. func (s *Server) Start() error { + return s.start(false) +} + +func (s *Server) StartTLS() error { + return s.start(true) +} + +func (s *Server) start(tls bool) error { if s.URL != "" { panic("Server already started") } s.Logs.init() - cfgargs, err := configArgs(s.Config, map[string]interface{}{ + extra := map[string]interface{}{ // These are config attributes that we always specify, // as the testing framework relies on them being set. + "logging.ecs": true, "logging.json": true, + "logging.level": "debug", "logging.to_stderr": true, "apm-server.expvar.enabled": true, "apm-server.host": "127.0.0.1:0", - }) + } + if tls { + certPath, keyPath, caCertPath, err := s.initTLS() + if err != nil { + panic(err) + } + extra["apm-server.ssl.certificate"] = certPath + extra["apm-server.ssl.key"] = keyPath + if s.Config.TLS != nil && s.Config.TLS.ClientAuthentication != "" { + extra["apm-server.ssl.certificate_authorities"] = []string{caCertPath} + } + } + cfgargs, err := configArgs(s.Config, extra) if err != nil { return err } @@ -183,12 +216,53 @@ func (s *Server) Start() error { logs := s.Logs.Iterator() defer logs.Close() - if err := s.waitUntilListening(logs); err != nil { + if err := s.waitUntilListening(tls, logs); err != nil { return err } return nil } +func (s *Server) initTLS() (serverCertPath, serverKeyPath, caCertPath string, _ error) { + repoRoot, err := getRepoRoot() + if err != nil { + panic(err) + } + + // Load a self-signed server certificate for testing TLS encryption. + serverCertPath = filepath.Join(repoRoot, "systemtest", "apmservertest", "cert.pem") + serverKeyPath = filepath.Join(repoRoot, "systemtest", "apmservertest", "key.pem") + serverCertBytes, err := ioutil.ReadFile(serverCertPath) + if err != nil { + return "", "", "", err + } + certpool := x509.NewCertPool() + if !certpool.AppendCertsFromPEM(serverCertBytes) { + panic("failed to add CA certificate to cert pool") + } + + // Load a self-signed client certificate for testing TLS client certificate auth. + clientCertPath := filepath.Join(repoRoot, "systemtest", "apmservertest", "client_cert.pem") + clientKeyPath := filepath.Join(repoRoot, "systemtest", "apmservertest", "client_key.pem") + clientCertBytes, err := ioutil.ReadFile(clientCertPath) + if err != nil { + return "", "", "", err + } + clientKeyBytes, err := ioutil.ReadFile(clientKeyPath) + if err != nil { + return "", "", "", err + } + clientCert, err := tls.X509KeyPair(clientCertBytes, clientKeyBytes) + if err != nil { + return "", "", "", err + } + + s.TLS = &tls.Config{ + Certificates: []tls.Certificate{clientCert}, + RootCAs: certpool, + } + return serverCertPath, serverKeyPath, clientCertPath, nil +} + func (s *Server) printCmdline(w io.Writer, args []string) { var buf bytes.Buffer fmt.Fprint(&buf, "# Running apm-server\n") @@ -206,7 +280,7 @@ func (s *Server) printCmdline(w io.Writer, args []string) { } } -func (s *Server) waitUntilListening(logs *LogEntryIterator) error { +func (s *Server) waitUntilListening(tls bool, logs *LogEntryIterator) error { var ( elasticHTTPListeningAddr string jaegerGRPCListeningAddr string @@ -223,14 +297,28 @@ func (s *Server) waitUntilListening(logs *LogEntryIterator) error { } } - // First wait for the Beat UUID to be logged. + // First wait for the Beat UUID and server version to be logged. for entry := range logs.C() { - const prefix = "Beat ID: " - if entry.Level != zapcore.InfoLevel || !strings.HasPrefix(entry.Message, prefix) { + if entry.Level != zapcore.InfoLevel || (entry.Message != "Beat info" && entry.Message != "Build info") { continue } - s.BeatUUID = entry.Message[len(prefix):] - break + systemInfo, ok := entry.Fields["system_info"].(map[string]interface{}) + if !ok { + continue + } + for k, info := range systemInfo { + switch k { + case "beat": + beatInfo := info.(map[string]interface{}) + s.BeatUUID = beatInfo["uuid"].(string) + case "build": + buildInfo := info.(map[string]interface{}) + s.Version = buildInfo["version"].(string) + } + } + if s.BeatUUID != "" && s.Version != "" { + break + } } for entry := range logs.C() { @@ -257,11 +345,15 @@ func (s *Server) waitUntilListening(logs *LogEntryIterator) error { } if len(prefixes) == 0 { - s.URL = makeHTTPURLString(elasticHTTPListeningAddr) + urlScheme := "http" + if tls { + urlScheme = "https" + } + s.URL = makeURLString(urlScheme, elasticHTTPListeningAddr) if s.Config.Jaeger != nil { s.JaegerGRPCAddr = jaegerGRPCListeningAddr if s.Config.Jaeger.HTTPEnabled { - s.JaegerHTTPURL = makeHTTPURLString(jaegerHTTPListeningAddr) + s.JaegerHTTPURL = makeURLString(urlScheme, jaegerHTTPListeningAddr) } } return nil @@ -286,11 +378,14 @@ func (s *Server) consumeStderr(procStderr io.Reader) { s.Stderr = stderrPipeReader type logEntry struct { - Timestamp logpTimestamp - Level zapcore.Level - Logger string - Caller string - Message string + Timestamp logpTimestamp `json:"@timestamp"` + Message string `json:"message"` + Level zapcore.Level `json:"log.level"` + Logger string `json:"log.logger"` + Origin struct { + File string `json:"file.name"` + Line int `json:"file.line"` + } `json:"log.origin"` } decoder := json.NewDecoder(procStderr) @@ -307,16 +402,17 @@ func (s *Server) consumeStderr(procStderr io.Reader) { if err := json.Unmarshal(raw, &fields); err != nil { break } - delete(fields, "timestamp") - delete(fields, "level") - delete(fields, "logger") - delete(fields, "caller") + delete(fields, "@timestamp") + delete(fields, "log.level") + delete(fields, "log.logger") + delete(fields, "log.origin") delete(fields, "message") s.Logs.add(LogEntry{ Timestamp: time.Time(entry.Timestamp), Logger: entry.Logger, Level: entry.Level, - Caller: entry.Caller, + File: entry.Origin.File, + Line: entry.Origin.Line, Message: entry.Message, Fields: fields, }) @@ -342,6 +438,14 @@ func (s *Server) Close() error { return s.Wait() } +// Kill forcefully shuts down the server. +func (s *Server) Kill() error { + if s.cmd != nil { + s.cmd.Process.Kill() + } + return s.Wait() +} + // Wait waits for the server to exit. // // Wait waits up to 10 seconds for the process's stderr to be closed, @@ -380,14 +484,12 @@ func (s *Server) Tracer() *apm.Tracer { s.tb.Fatal(err) } httpTransport.SetServerURL(serverURL) - httpTransport.SetSecretToken(s.Config.SecretToken) + httpTransport.SetSecretToken(s.Config.AgentAuth.SecretToken) + httpTransport.Client.Transport.(*http.Transport).TLSClientConfig = s.TLS var transport transport.Transport = httpTransport if s.EventMetadataFilter != nil { - transport = &filteringTransport{ - HTTPTransport: httpTransport, - filter: s.EventMetadataFilter, - } + transport = NewFilteringTransport(httpTransport, s.EventMetadataFilter) } tracer, err := apm.NewTracerOptions(apm.TracerOptions{Transport: transport}) if err != nil { @@ -412,7 +514,7 @@ func (s *Server) GetExpvar() *Expvar { return expvar } -func makeHTTPURLString(host string) string { - u := url.URL{Scheme: "http", Host: host} +func makeURLString(scheme, host string) string { + u := url.URL{Scheme: scheme, Host: host} return u.String() } diff --git a/systemtest/apmservertest/server_test.go b/systemtest/apmservertest/server_test.go index e771deb30fc..6d776a2c907 100644 --- a/systemtest/apmservertest/server_test.go +++ b/systemtest/apmservertest/server_test.go @@ -18,6 +18,8 @@ package apmservertest_test import ( + "net/url" + "os" "testing" "github.com/stretchr/testify/assert" @@ -26,6 +28,14 @@ import ( "github.com/elastic/apm-server/systemtest/apmservertest" ) +func TestMain(m *testing.M) { + // Ensure events are sent to stdout by default in apmservertest tests, + // so we don't pollute Elasticsearch in parallel with systemtest tests + // running. + os.Setenv("APMSERVERTEST_DEFAULT_OUTPUT", "console") + os.Exit(m.Run()) +} + func TestAPMServer(t *testing.T) { srv := apmservertest.NewServer(t) require.NotNil(t, srv) @@ -40,6 +50,27 @@ func TestUnstartedAPMServer(t *testing.T) { assert.EqualError(t, err, "apm-server not started") } +func TestAPMServerStartTLS(t *testing.T) { + srv := apmservertest.NewUnstartedServer(t) + require.NotNil(t, srv) + err := srv.StartTLS() + assert.NoError(t, err) + + serverURL, err := url.Parse(srv.URL) + require.NoError(t, err) + assert.Equal(t, "https", serverURL.Scheme) + + // Make sure the Tracer is configured with the + // appropriate CA certificate. + tracer := srv.Tracer() + tracer.StartTransaction("name", "type").End() + tracer.Flush(nil) + assert.Zero(t, tracer.Stats().Errors) + + err = srv.Close() + assert.NoError(t, err) +} + func TestExpvar(t *testing.T) { srv := apmservertest.NewServer(t) expvar := srv.GetExpvar() diff --git a/systemtest/approvals.go b/systemtest/approvals.go index 7a7a5c74453..60347831529 100644 --- a/systemtest/approvals.go +++ b/systemtest/approvals.go @@ -37,6 +37,8 @@ import ( // // If the events differ, then the test will fail. func ApproveEvents(t testing.TB, name string, hits []estest.SearchHit, dynamic ...string) { + t.Helper() + // Fields generated by the server (e.g. observer.*) // agent which may change between tests. // diff --git a/systemtest/approvals/TestApprovedMetrics/data_streams_disabled.approved.json b/systemtest/approvals/TestApprovedMetrics/data_streams_disabled.approved.json new file mode 100644 index 00000000000..70cdae05aa1 --- /dev/null +++ b/systemtest/approvals/TestApprovedMetrics/data_streams_disabled.approved.json @@ -0,0 +1,297 @@ +{ + "events": [ + { + "@timestamp": "2017-05-30T18:53:41.364Z", + "agent": { + "name": "elastic-node", + "version": "3.14.0" + }, + "ecs": { + "version": "dynamic" + }, + "event": { + "ingested": "dynamic" + }, + "go.memstats.heap.sys.bytes": 6520832, + "host": { + "ip": "127.0.0.1" + }, + "labels": { + "tag1": "one", + "tag2": 2 + }, + "metricset.name": "app", + "observer": { + "ephemeral_id": "dynamic", + "hostname": "dynamic", + "id": "dynamic", + "type": "apm-server", + "version": "dynamic", + "version_major": "dynamic" + }, + "process": { + "pid": 1234 + }, + "processor": { + "event": "metric", + "name": "metric" + }, + "service": { + "language": { + "name": "ecmascript" + }, + "name": "1234_service-12a3", + "node": { + "name": "node-1" + } + }, + "user": { + "email": "user@mail.com", + "id": "axb123hg", + "name": "logged-in-user" + } + }, + { + "@timestamp": "2017-05-30T18:53:41.366Z", + "agent": { + "name": "elastic-node", + "version": "3.14.0" + }, + "ecs": { + "version": "dynamic" + }, + "event": { + "ingested": "dynamic" + }, + "host": { + "ip": "127.0.0.1" + }, + "labels": { + "tag1": "one", + "tag2": 2 + }, + "metricset.name": "app", + "observer": { + "ephemeral_id": "dynamic", + "hostname": "dynamic", + "id": "dynamic", + "type": "apm-server", + "version": "dynamic", + "version_major": "dynamic" + }, + "process": { + "pid": 1234 + }, + "processor": { + "event": "metric", + "name": "metric" + }, + "service": { + "language": { + "name": "ecmascript" + }, + "name": "1234_service-12a3", + "node": { + "name": "node-1" + } + }, + "system.process.cgroup.memory.mem.limit.bytes": 2048, + "system.process.cgroup.memory.mem.usage.bytes": 1024, + "user": { + "email": "user@mail.com", + "id": "axb123hg", + "name": "logged-in-user" + } + }, + { + "@timestamp": "2017-05-30T18:53:41.366Z", + "agent": { + "name": "elastic-node", + "version": "3.14.0" + }, + "ecs": { + "version": "dynamic" + }, + "event": { + "ingested": "dynamic" + }, + "host": { + "ip": "127.0.0.1" + }, + "labels": { + "tag1": "one", + "tag2": 2 + }, + "metricset.name": "app", + "observer": { + "ephemeral_id": "dynamic", + "hostname": "dynamic", + "id": "dynamic", + "type": "apm-server", + "version": "dynamic", + "version_major": "dynamic" + }, + "process": { + "pid": 1234 + }, + "processor": { + "event": "metric", + "name": "metric" + }, + "service": { + "language": { + "name": "ecmascript" + }, + "name": "1234_service-12a3", + "node": { + "name": "node-1" + } + }, + "system.process.cgroup.cpu.cfs.period.us": 1024, + "system.process.cgroup.cpu.cfs.quota.us": 2048, + "system.process.cgroup.cpu.id": 2048, + "system.process.cgroup.cpu.stats.periods": 2048, + "system.process.cgroup.cpu.stats.throttled.ns": 2048, + "system.process.cgroup.cpu.stats.throttled.periods": 2048, + "system.process.cgroup.cpuacct.id": 2048, + "system.process.cgroup.cpuacct.total.ns": 2048, + "user": { + "email": "user@mail.com", + "id": "axb123hg", + "name": "logged-in-user" + } + }, + { + "@timestamp": "2017-05-30T18:53:41.366Z", + "agent": { + "name": "elastic-node", + "version": "3.14.0" + }, + "ecs": { + "version": "dynamic" + }, + "event": { + "ingested": "dynamic" + }, + "host": { + "ip": "127.0.0.1" + }, + "labels": { + "tag1": "one", + "tag2": 2 + }, + "latency_distribution": { + "counts": [ + 1, + 2, + 3 + ], + "values": [ + 1.1, + 2.2, + 3.3 + ] + }, + "metricset.name": "app", + "observer": { + "ephemeral_id": "dynamic", + "hostname": "dynamic", + "id": "dynamic", + "type": "apm-server", + "version": "dynamic", + "version_major": "dynamic" + }, + "process": { + "pid": 1234 + }, + "processor": { + "event": "metric", + "name": "metric" + }, + "service": { + "language": { + "name": "ecmascript" + }, + "name": "1234_service-12a3", + "node": { + "name": "node-1" + } + }, + "user": { + "email": "user@mail.com", + "id": "axb123hg", + "name": "logged-in-user" + } + }, + { + "@timestamp": "2017-05-30T18:53:42.281Z", + "agent": { + "name": "elastic-node", + "version": "3.14.0" + }, + "ecs": { + "version": "dynamic" + }, + "event": { + "ingested": "dynamic" + }, + "host": { + "ip": "127.0.0.1" + }, + "labels": { + "code": 200, + "some": "abc", + "success": true, + "tag1": "one", + "tag2": 2 + }, + "metricset.name": "span_breakdown", + "observer": { + "ephemeral_id": "dynamic", + "hostname": "dynamic", + "id": "dynamic", + "type": "apm-server", + "version": "dynamic", + "version_major": "dynamic" + }, + "process": { + "pid": 1234 + }, + "processor": { + "event": "metric", + "name": "metric" + }, + "service": { + "language": { + "name": "ecmascript" + }, + "name": "1234_service-12a3", + "node": { + "name": "node-1" + } + }, + "span": { + "self_time": { + "count": 1, + "sum.us": 633 + }, + "subtype": "mysql", + "type": "db" + }, + "transaction": { + "breakdown.count": 12, + "duration": { + "count": 2, + "sum.us": 12 + }, + "name": "GET /", + "type": "request" + }, + "user": { + "email": "user@mail.com", + "id": "axb123hg", + "name": "logged-in-user" + } + } + ] +} diff --git a/systemtest/approvals/TestApprovedMetrics/data_streams_enabled.approved.json b/systemtest/approvals/TestApprovedMetrics/data_streams_enabled.approved.json new file mode 100644 index 00000000000..94d49477047 --- /dev/null +++ b/systemtest/approvals/TestApprovedMetrics/data_streams_enabled.approved.json @@ -0,0 +1,317 @@ +{ + "events": [ + { + "@timestamp": "2017-05-30T18:53:41.364Z", + "agent": { + "name": "elastic-node", + "version": "3.14.0" + }, + "data_stream.dataset": "apm.app.1234_service_12a3", + "data_stream.namespace": "default", + "data_stream.type": "metrics", + "ecs": { + "version": "dynamic" + }, + "event": { + "agent_id_status": "missing", + "ingested": "dynamic" + }, + "go.memstats.heap.sys.bytes": 6520832, + "host": { + "ip": "127.0.0.1" + }, + "labels": { + "tag1": "one", + "tag2": 2 + }, + "metricset.name": "app", + "observer": { + "ephemeral_id": "dynamic", + "hostname": "dynamic", + "id": "dynamic", + "type": "apm-server", + "version": "dynamic", + "version_major": "dynamic" + }, + "process": { + "pid": 1234 + }, + "processor": { + "event": "metric", + "name": "metric" + }, + "service": { + "language": { + "name": "ecmascript" + }, + "name": "1234_service-12a3", + "node": { + "name": "node-1" + } + }, + "user": { + "email": "user@mail.com", + "id": "axb123hg", + "name": "logged-in-user" + } + }, + { + "@timestamp": "2017-05-30T18:53:41.366Z", + "agent": { + "name": "elastic-node", + "version": "3.14.0" + }, + "data_stream.dataset": "apm.app.1234_service_12a3", + "data_stream.namespace": "default", + "data_stream.type": "metrics", + "ecs": { + "version": "dynamic" + }, + "event": { + "agent_id_status": "missing", + "ingested": "dynamic" + }, + "host": { + "ip": "127.0.0.1" + }, + "labels": { + "tag1": "one", + "tag2": 2 + }, + "metricset.name": "app", + "observer": { + "ephemeral_id": "dynamic", + "hostname": "dynamic", + "id": "dynamic", + "type": "apm-server", + "version": "dynamic", + "version_major": "dynamic" + }, + "process": { + "pid": 1234 + }, + "processor": { + "event": "metric", + "name": "metric" + }, + "service": { + "language": { + "name": "ecmascript" + }, + "name": "1234_service-12a3", + "node": { + "name": "node-1" + } + }, + "system.process.cgroup.memory.mem.limit.bytes": 2048, + "system.process.cgroup.memory.mem.usage.bytes": 1024, + "user": { + "email": "user@mail.com", + "id": "axb123hg", + "name": "logged-in-user" + } + }, + { + "@timestamp": "2017-05-30T18:53:41.366Z", + "agent": { + "name": "elastic-node", + "version": "3.14.0" + }, + "data_stream.dataset": "apm.app.1234_service_12a3", + "data_stream.namespace": "default", + "data_stream.type": "metrics", + "ecs": { + "version": "dynamic" + }, + "event": { + "agent_id_status": "missing", + "ingested": "dynamic" + }, + "host": { + "ip": "127.0.0.1" + }, + "labels": { + "tag1": "one", + "tag2": 2 + }, + "metricset.name": "app", + "observer": { + "ephemeral_id": "dynamic", + "hostname": "dynamic", + "id": "dynamic", + "type": "apm-server", + "version": "dynamic", + "version_major": "dynamic" + }, + "process": { + "pid": 1234 + }, + "processor": { + "event": "metric", + "name": "metric" + }, + "service": { + "language": { + "name": "ecmascript" + }, + "name": "1234_service-12a3", + "node": { + "name": "node-1" + } + }, + "system.process.cgroup.cpu.cfs.period.us": 1024, + "system.process.cgroup.cpu.cfs.quota.us": 2048, + "system.process.cgroup.cpu.id": 2048, + "system.process.cgroup.cpu.stats.periods": 2048, + "system.process.cgroup.cpu.stats.throttled.ns": 2048, + "system.process.cgroup.cpu.stats.throttled.periods": 2048, + "system.process.cgroup.cpuacct.id": 2048, + "system.process.cgroup.cpuacct.total.ns": 2048, + "user": { + "email": "user@mail.com", + "id": "axb123hg", + "name": "logged-in-user" + } + }, + { + "@timestamp": "2017-05-30T18:53:41.366Z", + "agent": { + "name": "elastic-node", + "version": "3.14.0" + }, + "data_stream.dataset": "apm.app.1234_service_12a3", + "data_stream.namespace": "default", + "data_stream.type": "metrics", + "ecs": { + "version": "dynamic" + }, + "event": { + "agent_id_status": "missing", + "ingested": "dynamic" + }, + "host": { + "ip": "127.0.0.1" + }, + "labels": { + "tag1": "one", + "tag2": 2 + }, + "latency_distribution": { + "counts": [ + 1, + 2, + 3 + ], + "values": [ + 1.1, + 2.2, + 3.3 + ] + }, + "metricset.name": "app", + "observer": { + "ephemeral_id": "dynamic", + "hostname": "dynamic", + "id": "dynamic", + "type": "apm-server", + "version": "dynamic", + "version_major": "dynamic" + }, + "process": { + "pid": 1234 + }, + "processor": { + "event": "metric", + "name": "metric" + }, + "service": { + "language": { + "name": "ecmascript" + }, + "name": "1234_service-12a3", + "node": { + "name": "node-1" + } + }, + "user": { + "email": "user@mail.com", + "id": "axb123hg", + "name": "logged-in-user" + } + }, + { + "@timestamp": "2017-05-30T18:53:42.281Z", + "agent": { + "name": "elastic-node", + "version": "3.14.0" + }, + "data_stream.dataset": "apm.internal", + "data_stream.namespace": "default", + "data_stream.type": "metrics", + "ecs": { + "version": "dynamic" + }, + "event": { + "agent_id_status": "missing", + "ingested": "dynamic" + }, + "host": { + "ip": "127.0.0.1" + }, + "labels": { + "code": 200, + "some": "abc", + "success": true, + "tag1": "one", + "tag2": 2 + }, + "metricset.name": "span_breakdown", + "observer": { + "ephemeral_id": "dynamic", + "hostname": "dynamic", + "id": "dynamic", + "type": "apm-server", + "version": "dynamic", + "version_major": "dynamic" + }, + "process": { + "pid": 1234 + }, + "processor": { + "event": "metric", + "name": "metric" + }, + "service": { + "language": { + "name": "ecmascript" + }, + "name": "1234_service-12a3", + "node": { + "name": "node-1" + } + }, + "span": { + "self_time": { + "count": 1, + "sum.us": 633 + }, + "subtype": "mysql", + "type": "db" + }, + "transaction": { + "breakdown.count": 12, + "duration": { + "count": 2, + "sum.us": 12 + }, + "name": "GET /", + "type": "request" + }, + "user": { + "email": "user@mail.com", + "id": "axb123hg", + "name": "logged-in-user" + } + } + ] +} diff --git a/systemtest/approvals/TestFleetIntegration.approved.json b/systemtest/approvals/TestFleetIntegration.approved.json new file mode 100644 index 00000000000..217fef8c3a5 --- /dev/null +++ b/systemtest/approvals/TestFleetIntegration.approved.json @@ -0,0 +1,80 @@ +{ + "events": [ + { + "@timestamp": "dynamic", + "agent": { + "name": "go", + "version": "0.0.0" + }, + "data_stream.dataset": "apm", + "data_stream.namespace": "default", + "data_stream.type": "traces", + "ecs": { + "version": "dynamic" + }, + "event": { + "agent_id_status": "missing", + "ingested": "dynamic", + "outcome": "unknown" + }, + "host": { + "architecture": "i386", + "hostname": "beowulf", + "ip": "10.11.12.13", + "name": "beowulf", + "os": { + "platform": "minix" + } + }, + "observer": { + "ephemeral_id": "dynamic", + "hostname": "dynamic", + "id": "dynamic", + "type": "apm-server", + "version": "dynamic", + "version_major": "dynamic" + }, + "process": { + "pid": 1, + "title": "systemtest.test" + }, + "processor": { + "event": "transaction", + "name": "transaction" + }, + "service": { + "language": { + "name": "go", + "version": "2.0" + }, + "name": "systemtest", + "node": { + "name": "beowulf" + }, + "runtime": { + "name": "gc", + "version": "2.0" + } + }, + "timestamp": { + "us": "dynamic" + }, + "trace": { + "id": "dynamic" + }, + "transaction": { + "duration": { + "us": 1000000 + }, + "id": "dynamic", + "name": "name", + "sampled": true, + "span_count": { + "dropped": 0, + "started": 0 + }, + "type": "type" + } + } + ] +} diff --git a/systemtest/approvals/TestNoMatchingSourcemap.approved.json b/systemtest/approvals/TestNoMatchingSourcemap.approved.json new file mode 100644 index 00000000000..ff802017a04 --- /dev/null +++ b/systemtest/approvals/TestNoMatchingSourcemap.approved.json @@ -0,0 +1,89 @@ +{ + "events": [ + { + "@timestamp": "dynamic", + "agent": { + "name": "rum-js", + "version": "0.0.0" + }, + "client": { + "ip": "127.0.0.1" + }, + "ecs": { + "version": "dynamic" + }, + "event": { + "outcome": "unknown" + }, + "observer": { + "ephemeral_id": "dynamic", + "hostname": "dynamic", + "id": "dynamic", + "type": "apm-server", + "version": "dynamic", + "version_major": "dynamic" + }, + "parent": { + "id": "611f4fa950f04631" + }, + "processor": { + "event": "span", + "name": "transaction" + }, + "service": { + "name": "apm-agent-js" + }, + "source": { + "ip": "127.0.0.1", + "port": "dynamic" + }, + "span": { + "duration": { + "us": 643000 + }, + "http.url.original": "http://localhost:8000/test/e2e/general-usecase/span", + "id": "aaaaaaaaaaaaaaaa", + "name": "transaction", + "stacktrace": [ + { + "abs_path": "http://subdomain1.localhost:8000/test/e2e/general-usecase/bundle.js.map", + "exclude_from_grouping": false, + "filename": "test/e2e/general-usecase/bundle.js.map", + "function": "\u003canonymous\u003e", + "line": { + "column": 18, + "number": 1 + } + }, + { + "abs_path": "http://subdomain2.localhost:8000/test/e2e/general-usecase/bundle.js.map", + "exclude_from_grouping": false, + "filename": "~/test/e2e/general-usecase/bundle.js.map", + "function": "\u003canonymous\u003e", + "library_frame": true, + "line": { + "column": 18, + "number": 1 + } + } + ], + "start": { + "us": 0 + }, + "type": "transaction" + }, + "timestamp": { + "us": "dynamic" + }, + "trace": { + "id": "611f4fa950f04631aaaaaaaaaaaaaaaa" + }, + "transaction": { + "id": "611f4fa950f04631" + }, + "url": { + "original": "http://localhost:8000/test/e2e/general-usecase/span" + } + } + ] +} diff --git a/systemtest/approvals/TestOTLPGRPCMetrics.approved.json b/systemtest/approvals/TestOTLPGRPCMetrics.approved.json new file mode 100644 index 00000000000..8195485fced --- /dev/null +++ b/systemtest/approvals/TestOTLPGRPCMetrics.approved.json @@ -0,0 +1,51 @@ +{ + "events": [ + { + "@timestamp": "dynamic", + "agent": { + "name": "opentelemetry/go", + "version": "0.19.0" + }, + "ecs": { + "version": "dynamic" + }, + "event": { + "ingested": "dynamic" + }, + "float64_counter": 1, + "int64_recorder": { + "counts": [ + 1, + 1, + 1, + 1 + ], + "values": [ + 50.5, + 550, + 5500, + 10000 + ] + }, + "metricset.name": "app", + "observer": { + "ephemeral_id": "dynamic", + "hostname": "dynamic", + "id": "dynamic", + "type": "apm-server", + "version": "dynamic", + "version_major": "dynamic" + }, + "processor": { + "event": "metric", + "name": "metric" + }, + "service": { + "language": { + "name": "go" + }, + "name": "unknown_service_systemtest_test" + } + } + ] +} diff --git a/systemtest/approvals/TestOTLPGRPCTraces/data_streams_disabled.approved.json b/systemtest/approvals/TestOTLPGRPCTraces/data_streams_disabled.approved.json new file mode 100644 index 00000000000..60428878a88 --- /dev/null +++ b/systemtest/approvals/TestOTLPGRPCTraces/data_streams_disabled.approved.json @@ -0,0 +1,66 @@ +{ + "events": [ + { + "@timestamp": "1970-01-01T00:02:03.000Z", + "agent": { + "name": "opentelemetry/go", + "version": "0.19.0" + }, + "ecs": { + "version": "dynamic" + }, + "event": { + "ingested": "dynamic", + "outcome": "success" + }, + "labels": { + "resource_attribute_array": [ + "a", + "b" + ], + "span_attribute_array": [ + "a", + "b", + "c" + ] + }, + "observer": { + "ephemeral_id": "dynamic", + "hostname": "dynamic", + "id": "dynamic", + "type": "apm-server", + "version": "dynamic", + "version_major": "dynamic" + }, + "processor": { + "event": "transaction", + "name": "transaction" + }, + "service": { + "framework": { + "name": "systemtest" + }, + "language": { + "name": "go" + }, + "name": "unknown_service_systemtest_test" + }, + "timestamp": { + "us": 123000000 + }, + "trace": { + "id": "d2acbef8b37655e48548fd9d61ad6114" + }, + "transaction": { + "duration": { + "us": 1000000 + }, + "id": "b3ee9be3b687a611", + "name": "operation_name", + "result": "Success", + "sampled": true, + "type": "custom" + } + } + ] +} diff --git a/systemtest/approvals/TestOTLPGRPCTraces/data_streams_enabled.approved.json b/systemtest/approvals/TestOTLPGRPCTraces/data_streams_enabled.approved.json new file mode 100644 index 00000000000..ae27a4cfd3f --- /dev/null +++ b/systemtest/approvals/TestOTLPGRPCTraces/data_streams_enabled.approved.json @@ -0,0 +1,114 @@ +{ + "events": [ + { + "@timestamp": "1970-01-01T00:02:03.000Z", + "agent": { + "name": "opentelemetry/go", + "version": "0.19.0" + }, + "data_stream.dataset": "apm", + "data_stream.namespace": "default", + "data_stream.type": "traces", + "ecs": { + "version": "dynamic" + }, + "event": { + "agent_id_status": "missing", + "ingested": "dynamic", + "outcome": "success" + }, + "labels": { + "resource_attribute_array": [ + "a", + "b" + ], + "span_attribute_array": [ + "a", + "b", + "c" + ] + }, + "observer": { + "ephemeral_id": "dynamic", + "hostname": "dynamic", + "id": "dynamic", + "type": "apm-server", + "version": "dynamic", + "version_major": "dynamic" + }, + "processor": { + "event": "transaction", + "name": "transaction" + }, + "service": { + "framework": { + "name": "systemtest" + }, + "language": { + "name": "go" + }, + "name": "unknown_service_systemtest_test" + }, + "timestamp": { + "us": 123000000 + }, + "trace": { + "id": "d2acbef8b37655e48548fd9d61ad6114" + }, + "transaction": { + "duration": { + "us": 1000000 + }, + "id": "b3ee9be3b687a611", + "name": "operation_name", + "result": "Success", + "sampled": true, + "type": "custom" + } + }, + { + "@timestamp": "1970-01-01T00:02:03.001Z", + "agent": { + "name": "opentelemetry/go", + "version": "0.19.0" + }, + "data_stream.dataset": "apm.app", + "data_stream.namespace": "default", + "data_stream.type": "logs", + "ecs": { + "version": "dynamic" + }, + "labels": { + "resource_attribute_array": [ + "a", + "b" + ] + }, + "message": "a_span_event", + "observer": { + "ephemeral_id": "dynamic", + "hostname": "dynamic", + "id": "dynamic", + "type": "apm-server", + "version": "dynamic", + "version_major": "dynamic" + }, + "processor": { + "event": "log", + "name": "log" + }, + "service": { + "framework": { + "name": "systemtest" + }, + "language": { + "name": "go" + }, + "name": "unknown_service_systemtest_test" + }, + "trace": { + "id": "d2acbef8b37655e48548fd9d61ad6114" + } + } + ] +} diff --git a/systemtest/approvals/TestRUMErrorSourcemapping.approved.json b/systemtest/approvals/TestRUMErrorSourcemapping.approved.json new file mode 100644 index 00000000000..2928f4b2dcb --- /dev/null +++ b/systemtest/approvals/TestRUMErrorSourcemapping.approved.json @@ -0,0 +1,304 @@ +{ + "events": [ + { + "@timestamp": "dynamic", + "agent": { + "name": "rum-js", + "version": "0.0.0" + }, + "client": { + "ip": "127.0.0.1" + }, + "ecs": { + "version": "dynamic" + }, + "error": { + "culprit": "webpack:///webpack/bootstrap 6002740481c9666b0d38 in \u003canonymous\u003e", + "exception": [ + { + "message": "Uncaught Error: timeout test error", + "stacktrace": [ + { + "abs_path": "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "context": { + "post": [ + "", + " \t\t// Check if module is in cache", + " \t\tif(installedModules[moduleId])", + " \t\t\treturn installedModules[moduleId].exports;", + "" + ], + "pre": [ + " \t// The module cache", + " \tvar installedModules = {};", + "", + " \t// The require function" + ] + }, + "exclude_from_grouping": false, + "filename": "webpack:///webpack/bootstrap 6002740481c9666b0d38", + "function": "__webpack_require__", + "line": { + "column": 0, + "context": " \tfunction __webpack_require__(moduleId) {", + "number": 5 + }, + "original": { + "abs_path": "http://localhost:8000/test/../test/e2e/general-usecase/bundle.js.map", + "colno": 18, + "filename": "test/e2e/general-usecase/bundle.js.map", + "function": "\u003canonymous\u003e", + "library_frame": true, + "lineno": 1 + }, + "sourcemap": { + "updated": true + } + }, + { + "abs_path": "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "context": { + "post": [ + "", + " \t// __webpack_public_path__", + " \t__webpack_require__.p = \"\";", + "", + " \t// Load entry module and return exports" + ], + "pre": [ + "", + " \t// expose the modules object (__webpack_modules__)", + " \t__webpack_require__.m = modules;", + "", + " \t// expose the module cache" + ] + }, + "exclude_from_grouping": false, + "filename": "webpack:///webpack/bootstrap 6002740481c9666b0d38", + "function": "\u003cunknown\u003e", + "line": { + "column": 0, + "context": " \t__webpack_require__.c = installedModules;", + "number": 33 + }, + "original": { + "abs_path": "http://localhost:8000/test/./e2e/general-usecase/bundle.js.map", + "colno": 181, + "filename": "~/test/e2e/general-usecase/bundle.js.map", + "function": "invokeTask", + "lineno": 1 + }, + "sourcemap": { + "updated": true + } + }, + { + "abs_path": "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "context": { + "post": [ + "", + " \t\t// Check if module is in cache", + " \t\tif(installedModules[moduleId])", + " \t\t\treturn installedModules[moduleId].exports;", + "" + ], + "pre": [ + " \t// The module cache", + " \tvar installedModules = {};", + "", + " \t// The require function" + ] + }, + "exclude_from_grouping": false, + "filename": "webpack:///webpack/bootstrap 6002740481c9666b0d38", + "function": "\u003cunknown\u003e", + "line": { + "column": 0, + "context": " \tfunction __webpack_require__(moduleId) {", + "number": 5 + }, + "original": { + "abs_path": "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "colno": 15, + "filename": "~/test/e2e/general-usecase/bundle.js.map", + "function": "runTask", + "lineno": 1 + }, + "sourcemap": { + "updated": true + } + }, + { + "abs_path": "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "context": { + "post": [ + "", + "", + "", + "/** WEBPACK FOOTER **", + " ** webpack/bootstrap 6002740481c9666b0d38" + ], + "pre": [ + "", + " \t// __webpack_public_path__", + " \t__webpack_require__.p = \"\";", + "", + " \t// Load entry module and return exports" + ] + }, + "exclude_from_grouping": false, + "filename": "webpack:///webpack/bootstrap 6002740481c9666b0d38", + "function": "moduleId", + "line": { + "column": 0, + "context": " \treturn __webpack_require__(0);", + "number": 39 + }, + "original": { + "abs_path": "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "colno": 199, + "filename": "~/test/e2e/general-usecase/bundle.js.map", + "function": "invoke", + "lineno": 1 + }, + "sourcemap": { + "updated": true + } + }, + { + "abs_path": "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "context": { + "post": [ + " \t\t\treturn installedModules[moduleId].exports;", + "", + " \t\t// Create a new module (and put it into the cache)", + " \t\tvar module = installedModules[moduleId] = {", + " \t\t\texports: {}," + ], + "pre": [ + "", + " \t// The require function", + " \tfunction __webpack_require__(moduleId) {", + "", + " \t\t// Check if module is in cache" + ] + }, + "exclude_from_grouping": false, + "filename": "webpack:///webpack/bootstrap 6002740481c9666b0d38", + "function": "\u003canonymous\u003e", + "line": { + "column": 0, + "context": " \t\tif(installedModules[moduleId])", + "number": 8 + }, + "original": { + "abs_path": "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "colno": 33, + "filename": "~/test/e2e/general-usecase/bundle.js.map", + "function": "timer", + "lineno": 1 + }, + "sourcemap": { + "updated": true + } + } + ], + "type": "Error" + } + ], + "grouping_key": "89e23da755c2dd759d2d529e37c92b8f", + "grouping_name": "Uncaught Error: log timeout test error", + "id": "aba2688e033848ce9c4e4005f1caa534", + "log": { + "message": "Uncaught Error: log timeout test error", + "stacktrace": [ + { + "abs_path": "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "context": { + "post": [ + "", + " \t\t// Check if module is in cache", + " \t\tif(installedModules[moduleId])", + " \t\t\treturn installedModules[moduleId].exports;", + "" + ], + "pre": [ + " \t// The module cache", + " \tvar installedModules = {};", + "", + " \t// The require function" + ] + }, + "exclude_from_grouping": false, + "filename": "webpack:///webpack/bootstrap 6002740481c9666b0d38", + "function": "\u003canonymous\u003e", + "line": { + "column": 0, + "context": " \tfunction __webpack_require__(moduleId) {", + "number": 5 + }, + "original": { + "abs_path": "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "colno": 18, + "filename": "~/test/e2e/general-usecase/bundle.js.map", + "function": "\u003canonymous\u003e", + "lineno": 1 + }, + "sourcemap": { + "updated": true + } + } + ] + } + }, + "event": { + "ingested": "dynamic" + }, + "http": { + "request": { + "referrer": "http://localhost:8000/test/e2e/" + } + }, + "message": "Uncaught Error: log timeout test error", + "observer": { + "ephemeral_id": "dynamic", + "hostname": "dynamic", + "id": "dynamic", + "type": "apm-server", + "version": "dynamic", + "version_major": "dynamic" + }, + "processor": { + "event": "error", + "name": "error" + }, + "service": { + "name": "apm-agent-js", + "version": "1.0.1" + }, + "source": { + "ip": "127.0.0.1", + "port": "dynamic" + }, + "timestamp": { + "us": "dynamic" + }, + "url": { + "domain": "localhost", + "full": "http://localhost:8000/test/e2e/general-usecase/", + "original": "http://localhost:8000/test/e2e/general-usecase/", + "path": "/test/e2e/general-usecase/", + "port": 8000, + "scheme": "http" + }, + "user_agent": { + "device": { + "name": "Other" + }, + "name": "Go-http-client", + "original": "Go-http-client/1.1", + "version": "1.1" + } + } + ] +} diff --git a/systemtest/approvals/TestRUMSpanSourcemapping.approved.json b/systemtest/approvals/TestRUMSpanSourcemapping.approved.json new file mode 100644 index 00000000000..090352b6b9c --- /dev/null +++ b/systemtest/approvals/TestRUMSpanSourcemapping.approved.json @@ -0,0 +1,140 @@ +{ + "events": [ + { + "@timestamp": "dynamic", + "agent": { + "name": "rum-js", + "version": "0.0.0" + }, + "client": { + "ip": "127.0.0.1" + }, + "ecs": { + "version": "dynamic" + }, + "event": { + "outcome": "unknown" + }, + "observer": { + "ephemeral_id": "dynamic", + "hostname": "dynamic", + "id": "dynamic", + "type": "apm-server", + "version": "dynamic", + "version_major": "dynamic" + }, + "parent": { + "id": "611f4fa950f04631" + }, + "processor": { + "event": "span", + "name": "transaction" + }, + "service": { + "name": "apm-agent-js" + }, + "source": { + "ip": "127.0.0.1", + "port": "dynamic" + }, + "span": { + "duration": { + "us": 643000 + }, + "http.url.original": "http://localhost:8000/test/e2e/general-usecase/span", + "id": "aaaaaaaaaaaaaaaa", + "name": "transaction", + "stacktrace": [ + { + "abs_path": "http://subdomain1.localhost:8000/test/e2e/general-usecase/bundle.js.map", + "context": { + "post": [ + "", + " \t\t// Check if module is in cache", + " \t\tif(installedModules[moduleId])", + " \t\t\treturn installedModules[moduleId].exports;", + "" + ], + "pre": [ + " \t// The module cache", + " \tvar installedModules = {};", + "", + " \t// The require function" + ] + }, + "exclude_from_grouping": false, + "filename": "webpack:///webpack/bootstrap 6002740481c9666b0d38", + "function": "\u003cunknown\u003e", + "line": { + "column": 0, + "context": " \tfunction __webpack_require__(moduleId) {", + "number": 5 + }, + "original": { + "abs_path": "http://subdomain1.localhost:8000/test/e2e/general-usecase/bundle.js.map", + "colno": 18, + "filename": "test/e2e/general-usecase/bundle.js.map", + "function": "\u003canonymous\u003e", + "lineno": 1 + }, + "sourcemap": { + "updated": true + } + }, + { + "abs_path": "http://subdomain2.localhost:8000/test/e2e/general-usecase/bundle.js.map", + "context": { + "post": [ + "", + " \t\t// Check if module is in cache", + " \t\tif(installedModules[moduleId])", + " \t\t\treturn installedModules[moduleId].exports;", + "" + ], + "pre": [ + " \t// The module cache", + " \tvar installedModules = {};", + "", + " \t// The require function" + ] + }, + "exclude_from_grouping": false, + "filename": "webpack:///webpack/bootstrap 6002740481c9666b0d38", + "function": "\u003canonymous\u003e", + "line": { + "column": 0, + "context": " \tfunction __webpack_require__(moduleId) {", + "number": 5 + }, + "original": { + "abs_path": "http://subdomain2.localhost:8000/test/e2e/general-usecase/bundle.js.map", + "colno": 18, + "filename": "~/test/e2e/general-usecase/bundle.js.map", + "function": "\u003canonymous\u003e", + "lineno": 1 + }, + "sourcemap": { + "updated": true + } + } + ], + "start": { + "us": 0 + }, + "type": "transaction" + }, + "timestamp": { + "us": "dynamic" + }, + "trace": { + "id": "611f4fa950f04631aaaaaaaaaaaaaaaa" + }, + "transaction": { + "id": "611f4fa950f04631" + }, + "url": { + "original": "http://localhost:8000/test/e2e/general-usecase/span" + } + } + ] +} diff --git a/systemtest/approvals/TestRUMXForwardedFor.approved.json b/systemtest/approvals/TestRUMXForwardedFor.approved.json new file mode 100644 index 00000000000..d9e72b3f705 --- /dev/null +++ b/systemtest/approvals/TestRUMXForwardedFor.approved.json @@ -0,0 +1,77 @@ +{ + "events": [ + { + "@timestamp": "dynamic", + "agent": { + "name": "rum-js", + "version": "5.5.0" + }, + "client": { + "geo": { + "city_name": "Perth", + "continent_name": "Oceania", + "country_iso_code": "AU", + "country_name": "Australia", + "location": { + "lat": -31.9474, + "lon": 115.8648 + }, + "region_iso_code": "AU-WA", + "region_name": "Western Australia" + }, + "ip": "220.244.41.16" + }, + "ecs": { + "version": "dynamic" + }, + "event": { + "ingested": "dynamic", + "outcome": "unknown" + }, + "observer": { + "ephemeral_id": "dynamic", + "hostname": "dynamic", + "id": "dynamic", + "type": "apm-server", + "version": "dynamic", + "version_major": "dynamic" + }, + "processor": { + "event": "transaction", + "name": "transaction" + }, + "service": { + "name": "rum-js-test" + }, + "source": { + "ip": "127.0.0.1", + "port": "dynamic" + }, + "timestamp": { + "us": "dynamic" + }, + "trace": { + "id": "611f4fa950f04631aaaaaaaaaaaaaaaa" + }, + "transaction": { + "duration": { + "us": 643000 + }, + "id": "611f4fa950f04631", + "sampled": true, + "span_count": { + "started": 0 + }, + "type": "page-load" + }, + "user_agent": { + "device": { + "name": "Other" + }, + "name": "Go-http-client", + "original": "Go-http-client/1.1", + "version": "1.1" + } + } + ] +} diff --git a/systemtest/approvals/TestServiceDestinationAggregation.approved.json b/systemtest/approvals/TestServiceDestinationAggregation.approved.json index 9d18c8708fb..934d377abb8 100644 --- a/systemtest/approvals/TestServiceDestinationAggregation.approved.json +++ b/systemtest/approvals/TestServiceDestinationAggregation.approved.json @@ -2,6 +2,9 @@ "events": [ { "@timestamp": "dynamic", + "agent": { + "name": "go" + }, "ecs": { "version": "dynamic" }, @@ -9,9 +12,7 @@ "ingested": "dynamic", "outcome": "unknown" }, - "metricset": { - "period": 1000 - }, + "metricset.name": "service_destination", "observer": { "ephemeral_id": "dynamic", "hostname": "dynamic", @@ -33,9 +34,7 @@ "resource": "resource", "response_time": { "count": 5, - "sum": { - "us": 5000000 - } + "sum.us": 5000000 } } } diff --git a/systemtest/approvals/TestSourcemapUploadInvalidElasticsearchConfig.approved.json b/systemtest/approvals/TestSourcemapUploadInvalidElasticsearchConfig.approved.json new file mode 100644 index 00000000000..305389a8c89 --- /dev/null +++ b/systemtest/approvals/TestSourcemapUploadInvalidElasticsearchConfig.approved.json @@ -0,0 +1,309 @@ +{ + "events": [ + { + "@timestamp": "dynamic", + "agent": { + "name": "rum-js", + "version": "0.0.0" + }, + "client": { + "ip": "127.0.0.1" + }, + "ecs": { + "version": "dynamic" + }, + "error": { + "culprit": "webpack:///webpack/bootstrap 6002740481c9666b0d38 in \u003canonymous\u003e", + "exception": [ + { + "message": "Uncaught Error: timeout test error", + "stacktrace": [ + { + "abs_path": "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "context": { + "post": [ + "", + " \t\t// Check if module is in cache", + " \t\tif(installedModules[moduleId])", + " \t\t\treturn installedModules[moduleId].exports;", + "" + ], + "pre": [ + " \t// The module cache", + " \tvar installedModules = {};", + "", + " \t// The require function" + ] + }, + "exclude_from_grouping": false, + "filename": "webpack:///webpack/bootstrap 6002740481c9666b0d38", + "function": "__webpack_require__", + "library_frame": false, + "line": { + "column": 0, + "context": " \tfunction __webpack_require__(moduleId) {", + "number": 5 + }, + "original": { + "abs_path": "http://localhost:8000/test/../test/e2e/general-usecase/bundle.js.map", + "colno": 18, + "filename": "test/e2e/general-usecase/bundle.js.map", + "function": "\u003canonymous\u003e", + "library_frame": true, + "lineno": 1 + }, + "sourcemap": { + "updated": true + } + }, + { + "abs_path": "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "context": { + "post": [ + "", + " \t// __webpack_public_path__", + " \t__webpack_require__.p = \"\";", + "", + " \t// Load entry module and return exports" + ], + "pre": [ + "", + " \t// expose the modules object (__webpack_modules__)", + " \t__webpack_require__.m = modules;", + "", + " \t// expose the module cache" + ] + }, + "exclude_from_grouping": false, + "filename": "webpack:///webpack/bootstrap 6002740481c9666b0d38", + "function": "\u003cunknown\u003e", + "library_frame": false, + "line": { + "column": 0, + "context": " \t__webpack_require__.c = installedModules;", + "number": 33 + }, + "original": { + "abs_path": "http://localhost:8000/test/./e2e/general-usecase/bundle.js.map", + "colno": 181, + "filename": "~/test/e2e/general-usecase/bundle.js.map", + "function": "invokeTask", + "library_frame": false, + "lineno": 1 + }, + "sourcemap": { + "updated": true + } + }, + { + "abs_path": "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "context": { + "post": [ + "", + " \t\t// Check if module is in cache", + " \t\tif(installedModules[moduleId])", + " \t\t\treturn installedModules[moduleId].exports;", + "" + ], + "pre": [ + " \t// The module cache", + " \tvar installedModules = {};", + "", + " \t// The require function" + ] + }, + "exclude_from_grouping": false, + "filename": "webpack:///webpack/bootstrap 6002740481c9666b0d38", + "function": "\u003cunknown\u003e", + "library_frame": false, + "line": { + "column": 0, + "context": " \tfunction __webpack_require__(moduleId) {", + "number": 5 + }, + "original": { + "abs_path": "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "colno": 15, + "filename": "~/test/e2e/general-usecase/bundle.js.map", + "function": "runTask", + "lineno": 1 + }, + "sourcemap": { + "updated": true + } + }, + { + "abs_path": "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "context": { + "post": [ + "", + "", + "", + "/** WEBPACK FOOTER **", + " ** webpack/bootstrap 6002740481c9666b0d38" + ], + "pre": [ + "", + " \t// __webpack_public_path__", + " \t__webpack_require__.p = \"\";", + "", + " \t// Load entry module and return exports" + ] + }, + "exclude_from_grouping": false, + "filename": "webpack:///webpack/bootstrap 6002740481c9666b0d38", + "function": "moduleId", + "library_frame": false, + "line": { + "column": 0, + "context": " \treturn __webpack_require__(0);", + "number": 39 + }, + "original": { + "abs_path": "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "colno": 199, + "filename": "~/test/e2e/general-usecase/bundle.js.map", + "function": "invoke", + "lineno": 1 + }, + "sourcemap": { + "updated": true + } + }, + { + "abs_path": "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "context": { + "post": [ + " \t\t\treturn installedModules[moduleId].exports;", + "", + " \t\t// Create a new module (and put it into the cache)", + " \t\tvar module = installedModules[moduleId] = {", + " \t\t\texports: {}," + ], + "pre": [ + "", + " \t// The require function", + " \tfunction __webpack_require__(moduleId) {", + "", + " \t\t// Check if module is in cache" + ] + }, + "exclude_from_grouping": false, + "filename": "webpack:///webpack/bootstrap 6002740481c9666b0d38", + "function": "\u003canonymous\u003e", + "library_frame": false, + "line": { + "column": 0, + "context": " \t\tif(installedModules[moduleId])", + "number": 8 + }, + "original": { + "abs_path": "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "colno": 33, + "filename": "~/test/e2e/general-usecase/bundle.js.map", + "function": "timer", + "lineno": 1 + }, + "sourcemap": { + "updated": true + } + } + ], + "type": "Error" + } + ], + "grouping_key": "89e23da755c2dd759d2d529e37c92b8f", + "grouping_name": "Uncaught Error: log timeout test error", + "id": "aba2688e033848ce9c4e4005f1caa534", + "log": { + "message": "Uncaught Error: log timeout test error", + "stacktrace": [ + { + "abs_path": "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "context": { + "post": [ + "", + " \t\t// Check if module is in cache", + " \t\tif(installedModules[moduleId])", + " \t\t\treturn installedModules[moduleId].exports;", + "" + ], + "pre": [ + " \t// The module cache", + " \tvar installedModules = {};", + "", + " \t// The require function" + ] + }, + "exclude_from_grouping": false, + "filename": "webpack:///webpack/bootstrap 6002740481c9666b0d38", + "function": "\u003canonymous\u003e", + "library_frame": false, + "line": { + "column": 0, + "context": " \tfunction __webpack_require__(moduleId) {", + "number": 5 + }, + "original": { + "abs_path": "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "colno": 18, + "filename": "~/test/e2e/general-usecase/bundle.js.map", + "function": "\u003canonymous\u003e", + "lineno": 1 + }, + "sourcemap": { + "updated": true + } + } + ] + } + }, + "event": { + "ingested": "dynamic" + }, + "http": { + "request": { + "referrer": "http://localhost:8000/test/e2e/" + } + }, + "observer": { + "ephemeral_id": "dynamic", + "hostname": "dynamic", + "id": "dynamic", + "type": "apm-server", + "version": "dynamic", + "version_major": "dynamic" + }, + "processor": { + "event": "error", + "name": "error" + }, + "service": { + "name": "apm-agent-js", + "version": "1.0.1" + }, + "source": { + "ip": "127.0.0.1" + }, + "timestamp": { + "us": "dynamic" + }, + "url": { + "domain": "localhost", + "full": "http://localhost:8000/test/e2e/general-usecase/", + "original": "http://localhost:8000/test/e2e/general-usecase/", + "path": "/test/e2e/general-usecase/", + "port": 8000, + "scheme": "http" + }, + "user_agent": { + "device": { + "name": "Other" + }, + "name": "Go-http-client", + "original": "Go-http-client/1.1", + "version": "1.1" + } + } + ] +} diff --git a/systemtest/approvals/TestTransactionAggregation.approved.json b/systemtest/approvals/TestTransactionAggregation.approved.json index 1a051d6c8fd..20891e21d9e 100644 --- a/systemtest/approvals/TestTransactionAggregation.approved.json +++ b/systemtest/approvals/TestTransactionAggregation.approved.json @@ -2,6 +2,7 @@ "events": [ { "@timestamp": "dynamic", + "_doc_count": 5, "agent": { "name": "go" }, @@ -16,6 +17,7 @@ "hostname": "beowulf", "name": "beowulf" }, + "metricset.name": "transaction", "observer": { "ephemeral_id": "dynamic", "hostname": "dynamic", @@ -35,26 +37,25 @@ } }, "timeseries": { - "instance": "systemtest:name:865d6816622184cd" + "instance": "systemtest:abc:d8f2bb8faa13bba6" }, "transaction": { - "duration": { - "histogram": { - "counts": [ - 1 - ], - "values": [ - 1003519 - ] - } + "duration.histogram": { + "counts": [ + 5 + ], + "values": [ + 1003519 + ] }, - "name": "name", + "name": "abc", "root": true, "type": "backend" } }, { "@timestamp": "dynamic", + "_doc_count": 10, "agent": { "name": "go" }, @@ -69,6 +70,7 @@ "hostname": "beowulf", "name": "beowulf" }, + "metricset.name": "transaction", "observer": { "ephemeral_id": "dynamic", "hostname": "dynamic", @@ -88,25 +90,20 @@ } }, "timeseries": { - "instance": "systemtest:name:875650e21029d5b" + "instance": "systemtest:def:8445550d2ba82fde" }, "transaction": { - "duration": { - "histogram": { - "counts": [ - 1 - ], - "values": [ - 1003519 - ] - } + "duration.histogram": { + "counts": [ + 10 + ], + "values": [ + 1003519 + ] }, - "name": "name", + "name": "def", "root": true, - "type": "page-load" - }, - "user_agent": { - "name": "Chrome" + "type": "backend" } } ] diff --git a/systemtest/approvals/TestTransactionAggregationShutdown.approved.json b/systemtest/approvals/TestTransactionAggregationShutdown.approved.json index fc4a64cfdb2..7e564eda094 100644 --- a/systemtest/approvals/TestTransactionAggregationShutdown.approved.json +++ b/systemtest/approvals/TestTransactionAggregationShutdown.approved.json @@ -2,6 +2,7 @@ "events": [ { "@timestamp": "dynamic", + "_doc_count": 1, "agent": { "name": "go" }, @@ -16,6 +17,7 @@ "hostname": "beowulf", "name": "beowulf" }, + "metricset.name": "transaction", "observer": { "ephemeral_id": "dynamic", "hostname": "dynamic", @@ -35,18 +37,16 @@ } }, "timeseries": { - "instance": "systemtest:name:56ef3b5d147616b4" + "instance": "systemtest:name:28a46e05f123d23f" }, "transaction": { - "duration": { - "histogram": { - "counts": [ - 1 - ], - "values": [ - 1003519 - ] - } + "duration.histogram": { + "counts": [ + 1 + ], + "values": [ + 1003519 + ] }, "name": "name", "root": true, diff --git a/systemtest/auth_test.go b/systemtest/auth_test.go new file mode 100644 index 00000000000..e52425d7b0e --- /dev/null +++ b/systemtest/auth_test.go @@ -0,0 +1,218 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package systemtest_test + +import ( + "bytes" + "io/ioutil" + "math/rand" + "net/http" + "net/url" + "strconv" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/systemtest" + "github.com/elastic/apm-server/systemtest/apmservertest" +) + +func TestAuth(t *testing.T) { + systemtest.InvalidateAPIKeys(t) + defer systemtest.InvalidateAPIKeys(t) + + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + secretToken := strconv.Itoa(rng.Int()) + + srv := apmservertest.NewUnstartedServer(t) + srv.Config.AgentAuth.SecretToken = secretToken + srv.Config.AgentAuth.APIKey = &apmservertest.APIKeyAuthConfig{Enabled: true} + srv.Config.AgentAuth.Anonymous = &apmservertest.AnonymousAuthConfig{ + Enabled: true, + AllowAgent: []string{"apm-agent-js", "rum-js"}, + } + srv.Config.RUM = &apmservertest.RUMConfig{Enabled: true} + err := srv.Start() + require.NoError(t, err) + + apiKey := createAPIKey(t, t.Name()+":all") + apiKeySourcemap := createAPIKey(t, t.Name()+":sourcemap", "--sourcemap") + apiKeyIngest := createAPIKey(t, t.Name()+":ingest", "--ingest") + apiKeyAgentConfig := createAPIKey(t, t.Name()+":agentconfig", "--agent-config") + + runWithMethods := func(t *testing.T, name string, f func(t *testing.T, apiKey string, headers http.Header)) { + t.Run(name, func(t *testing.T) { + t.Run("anonymous", func(t *testing.T) { f(t, "", nil) }) + t.Run("secret_token", func(t *testing.T) { + f(t, "", http.Header{"Authorization": []string{"Bearer " + secretToken}}) + }) + t.Run("api_key", func(t *testing.T) { + f(t, "all", http.Header{"Authorization": []string{"ApiKey " + apiKey}}) + f(t, "sourcemap", http.Header{"Authorization": []string{"ApiKey " + apiKeySourcemap}}) + f(t, "ingest", http.Header{"Authorization": []string{"ApiKey " + apiKeyIngest}}) + f(t, "agentconfig", http.Header{"Authorization": []string{"ApiKey " + apiKeyAgentConfig}}) + }) + }) + } + + runWithMethods(t, "root", func(t *testing.T, apiKey string, headers http.Header) { + req, _ := http.NewRequest("GET", srv.URL, nil) + copyHeaders(req.Header, headers) + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, http.StatusOK, resp.StatusCode) + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + if len(headers) == 0 { + assert.Empty(t, body) + } else { + assert.NotEmpty(t, body) + } + }) + + backendEventsPayload, err := ioutil.ReadFile("../testdata/intake-v2/transactions.ndjson") + require.NoError(t, err) + runWithMethods(t, "ingest", func(t *testing.T, apiKey string, headers http.Header) { + req, _ := http.NewRequest("POST", srv.URL+"/intake/v2/events", bytes.NewReader(backendEventsPayload)) + req.Header.Set("Content-Type", "application/x-ndjson") + copyHeaders(req.Header, headers) + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + body, _ := ioutil.ReadAll(resp.Body) + if len(headers) == 0 { + assert.Equal(t, http.StatusForbidden, resp.StatusCode, string(body)) + } else if apiKey == "sourcemap" || apiKey == "agentconfig" { + assert.Equal(t, http.StatusForbidden, resp.StatusCode, string(body)) + } else { + assert.Equal(t, http.StatusAccepted, resp.StatusCode, string(body)) + } + }) + + runWithMethods(t, "sourcemap", func(t *testing.T, apiKey string, headers http.Header) { + req := newUploadSourcemapRequest(t, srv, "../testdata/sourcemap/bundle.js.map", + "http://localhost:8000/test/e2e/../e2e/general-usecase/bundle.js.map", // bundle filepath + "apm-agent-js", // service name + "1.0.1", // service version + ) + copyHeaders(req.Header, headers) + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + if len(headers) == 0 { + assert.Equal(t, http.StatusForbidden, resp.StatusCode) + } else if apiKey == "ingest" || apiKey == "agentconfig" { + assert.Equal(t, http.StatusForbidden, resp.StatusCode) + } else { + assert.Equal(t, http.StatusAccepted, resp.StatusCode) + } + }) + + // Create agent config to test the anonymous and authenticated responses. + settings := map[string]string{"transaction_sample_rate": "0.1", "sanitize_field_names": "foo,bar,baz"} + systemtest.CreateAgentConfig(t, "systemtest_service", "", "", settings) + completeSettings := `{"sanitize_field_names":"foo,bar,baz","transaction_sample_rate":"0.1"}` + anonymousSettings := `{"transaction_sample_rate":"0.1"}` + runWithMethods(t, "agentconfig", func(t *testing.T, apiKey string, headers http.Header) { + req, _ := http.NewRequest("GET", srv.URL+"/config/v1/agents", nil) + copyHeaders(req.Header, headers) + req.Header.Add("Content-Type", "application/json") + req.URL.RawQuery = url.Values{"service.name": []string{"systemtest_service"}}.Encode() + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + if apiKey == "ingest" || apiKey == "sourcemap" { + assert.Equal(t, http.StatusForbidden, resp.StatusCode) + } else { + assert.Equal(t, http.StatusOK, resp.StatusCode) + body, _ := ioutil.ReadAll(resp.Body) + if len(headers) == 0 { + // Anonymous auth succeeds because RUM is enabled, which + // auto enables anonymous auth. However, only a subset of + // the config is returned. + assert.Equal(t, anonymousSettings, strings.TrimSpace(string(body))) + } else { + assert.Equal(t, completeSettings, strings.TrimSpace(string(body))) + } + } + }) + + // RUM endpoints do not require auth, but if credentials are provided they will still be checked. + runWithMethods(t, "rum_agentconfig", func(t *testing.T, apiKey string, headers http.Header) { + req, _ := http.NewRequest("GET", srv.URL+"/config/v1/rum/agents", nil) + copyHeaders(req.Header, headers) + req.Header.Add("Content-Type", "application/json") + req.URL.RawQuery = url.Values{"service.name": []string{"systemtest_service"}}.Encode() + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + if apiKey == "ingest" || apiKey == "sourcemap" { + assert.Equal(t, http.StatusForbidden, resp.StatusCode) + } else { + assert.Equal(t, http.StatusOK, resp.StatusCode) + body, _ := ioutil.ReadAll(resp.Body) + if len(headers) == 0 { + // Anonymous auth succeeds because RUM is enabled, which + // auto enables anonymous auth. However, only a subset of + // the config is returned. + assert.Equal(t, anonymousSettings, strings.TrimSpace(string(body))) + } else { + assert.Equal(t, completeSettings, strings.TrimSpace(string(body))) + } + } + }) + rumEventsPayload, err := ioutil.ReadFile("../testdata/intake-v2/transactions_spans_rum.ndjson") + require.NoError(t, err) + runWithMethods(t, "rum_ingest", func(t *testing.T, apiKey string, headers http.Header) { + req, _ := http.NewRequest("POST", srv.URL+"/intake/v2/rum/events", bytes.NewReader(rumEventsPayload)) + req.Header.Set("Content-Type", "application/x-ndjson") + copyHeaders(req.Header, headers) + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + body, _ := ioutil.ReadAll(resp.Body) + if len(headers) == 0 { + assert.Equal(t, http.StatusAccepted, resp.StatusCode, string(body)) + } else if apiKey == "sourcemap" || apiKey == "agentconfig" { + assert.Equal(t, http.StatusForbidden, resp.StatusCode, string(body)) + } else { + assert.Equal(t, http.StatusAccepted, resp.StatusCode, string(body)) + } + }) +} + +func copyHeaders(to, from http.Header) { + for k, values := range from { + for _, v := range values { + to.Add(k, v) + } + } +} + +func createAPIKey(t *testing.T, name string, args ...string) string { + args = append([]string{"--name", name, "--json"}, args...) + cmd := apiKeyCommand("create", args...) + out, err := cmd.CombinedOutput() + require.NoError(t, err) + attrs := decodeJSONMap(t, bytes.NewReader(out)) + return attrs["credentials"].(string) +} diff --git a/systemtest/benchtest/clients.go b/systemtest/benchtest/clients.go new file mode 100644 index 00000000000..a1cb1392bff --- /dev/null +++ b/systemtest/benchtest/clients.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package benchtest + +import ( + "context" + "crypto/tls" + "os" + "testing" + + "go.opentelemetry.io/otel/exporters/otlp" + "go.opentelemetry.io/otel/exporters/otlp/otlpgrpc" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "go.elastic.co/apm" + "go.elastic.co/apm/transport" +) + +func init() { + // Close default tracer, we'll create new ones. + apm.DefaultTracer.Close() + + // Disable TLS certificate verification; not important for benchmarking. + os.Setenv("ELASTIC_APM_VERIFY_SERVER_CERT", "true") +} + +// NewTracer returns a new Elastic APM tracer, configured +// to send to the target APM Server. +func NewTracer(tb testing.TB) *apm.Tracer { + httpTransport, err := transport.NewHTTPTransport() + if err != nil { + tb.Fatal(err) + } + httpTransport.SetServerURL(serverURL) + httpTransport.SetSecretToken(*secretToken) + tracer, err := apm.NewTracerOptions(apm.TracerOptions{ + Transport: httpTransport, + }) + if err != nil { + tb.Fatal(err) + } + tb.Cleanup(tracer.Close) + return tracer +} + +// NewOTLPExporter returns a new OpenTelemetry Go exporter, configured +// to export to the target APM Server. +func NewOTLPExporter(tb testing.TB) *otlp.Exporter { + endpoint := serverURL.Host + if serverURL.Port() == "" { + switch serverURL.Scheme { + case "http": + endpoint += ":80" + case "https": + endpoint += ":443" + } + } + opts := []otlpgrpc.Option{ + otlpgrpc.WithEndpoint(endpoint), + otlpgrpc.WithDialOption(grpc.WithBlock()), + } + if *secretToken != "" { + opts = append(opts, otlpgrpc.WithHeaders(map[string]string{ + "Authorization": "Bearer " + *secretToken, + })) + } + if serverURL.Scheme == "http" { + opts = append(opts, otlpgrpc.WithInsecure()) + } else { + tlsCredentials := credentials.NewTLS(&tls.Config{ + InsecureSkipVerify: true, + }) + opts = append(opts, otlpgrpc.WithTLSCredentials(tlsCredentials)) + } + exporter, err := otlp.NewExporter(context.Background(), otlpgrpc.NewDriver(opts...)) + if err != nil { + tb.Fatal(err) + } + tb.Cleanup(func() { exporter.Shutdown(context.Background()) }) + return exporter +} diff --git a/systemtest/benchtest/expvar.go b/systemtest/benchtest/expvar.go new file mode 100644 index 00000000000..0df5c49c33f --- /dev/null +++ b/systemtest/benchtest/expvar.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package benchtest + +import ( + "encoding/json" + "net/http" + "runtime" +) + +// TODO(axw) reuse apmservertest.Expvar, expose function(s) for fetching +// from APM Server given a URL. + +type expvar struct { + runtime.MemStats `json:"memstats"` + LibbeatStats + ElasticResponseStats + OTLPResponseStats + + // UncompressedBytes holds the number of bytes of uncompressed + // data that the server has read from the Elastic APM events + // intake endpoint. + // + // TODO(axw) instrument the net/http.Transport to count bytes + // transferred, so we can measure for OTLP and Jaeger too. + // Alternatively, implement an in-memory reverse proxy that + // does the same. + UncompressedBytes int64 `json:"apm-server.decoder.uncompressed.bytes"` +} + +type ElasticResponseStats struct { + TotalElasticResponses int64 `json:"apm-server.server.response.count"` + ErrorElasticResponses int64 `json:"apm-server.server.response.errors.count"` +} + +type OTLPResponseStats struct { + TotalOTLPMetricsResponses int64 `json:"apm-server.otlp.grpc.metrics.response.count"` + ErrorOTLPMetricsResponses int64 `json:"apm-server.otlp.grpc.metrics.response.errors.count"` + TotalOTLPTracesResponses int64 `json:"apm-server.otlp.grpc.traces.response.count"` + ErrorOTLPTracesResponses int64 `json:"apm-server.otlp.grpc.traces.response.errors.count"` +} + +type LibbeatStats struct { + ActiveEvents int64 `json:"libbeat.output.events.active"` + TotalEvents int64 `json:"libbeat.output.events.total"` +} + +func queryExpvar(out *expvar) error { + req, err := http.NewRequest("GET", *server+"/debug/vars", nil) + if err != nil { + return err + } + req.Header.Set("Accept", "application/json") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + return json.NewDecoder(resp.Body).Decode(out) +} diff --git a/systemtest/benchtest/flags.go b/systemtest/benchtest/flags.go new file mode 100644 index 00000000000..56760fbfc45 --- /dev/null +++ b/systemtest/benchtest/flags.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package benchtest + +import ( + "flag" + "fmt" + "net/url" + "os" + "regexp" + "strconv" + "strings" + "testing" + "time" +) + +var ( + server = flag.String("server", getenvDefault("ELASTIC_APM_SERVER_URL", "http://localhost:8200"), "apm-server URL") + count = flag.Uint("count", 1, "run benchmarks `n` times") + agentsListStr = flag.String("agents", "1", "comma-separated `list` of agent counts to run each benchmark with") + benchtime = flag.Duration("benchtime", time.Second, "run each benchmark for duration `d`") + secretToken = flag.String("secret-token", os.Getenv("ELASTIC_APM_SECRET_TOKEN"), "secret token for APM Server") + match = flag.String("run", "", "run only benchmarks matching `regexp`") + + cpuprofile = flag.String("cpuprofile", "", "Write a CPU profile to the specified file before exiting.") + memprofile = flag.String("memprofile", "", "Write an allocation profile to the file before exiting.") + mutexprofile = flag.String("mutexprofile", "", "Write a mutex contention profile to the file before exiting.") + blockprofile = flag.String("blockprofile", "", "Write a goroutine blocking profile to the file before exiting.") + + agentsList []int + serverURL *url.URL + runRE *regexp.Regexp +) + +func getenvDefault(name, defaultValue string) string { + value := os.Getenv(name) + if value != "" { + return value + } + return defaultValue +} + +func parseFlags() error { + flag.Parse() + + // Parse -agents. + agentsList = nil + for _, val := range strings.Split(*agentsListStr, ",") { + val = strings.TrimSpace(val) + if val == "" { + continue + } + n, err := strconv.Atoi(val) + if err != nil || n <= 0 { + return fmt.Errorf("invalid value %q for -agents\n", val) + } + agentsList = append(agentsList, n) + } + + // Parse -server. + u, err := url.Parse(*server) + if err != nil { + return err + } + serverURL = u + + // Parse -run. + if *match != "" { + re, err := regexp.Compile(*match) + if err != nil { + return err + } + runRE = re + } else { + runRE = regexp.MustCompile(".") + } + + // Set flags in package testing. + testing.Init() + if err := flag.Set("test.benchtime", benchtime.String()); err != nil { + return err + } + return nil +} diff --git a/systemtest/benchtest/main.go b/systemtest/benchtest/main.go new file mode 100644 index 00000000000..c2236f1dfe3 --- /dev/null +++ b/systemtest/benchtest/main.go @@ -0,0 +1,185 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package benchtest + +import ( + "errors" + "fmt" + "log" + "os" + "reflect" + "regexp" + "runtime" + "sort" + "strings" + "testing" + "time" + + "go.elastic.co/apm/stacktrace" +) + +// BenchmarkFunc is the benchmark function type accepted by Run. +type BenchmarkFunc func(*testing.B) + +const benchmarkFuncPrefix = "Benchmark" + +type benchmark struct { + name string + f BenchmarkFunc +} + +func runBenchmark(f func(b *testing.B)) (testing.BenchmarkResult, bool, error) { + // Run the benchmark. testing.Benchmark will invoke the function + // multiple times, but only returns the final result. + var ok bool + var before, after expvar + result := testing.Benchmark(func(b *testing.B) { + if err := queryExpvar(&before); err != nil { + b.Error(err) + ok = !b.Failed() + return + } + f(b) + for !b.Failed() { + if err := queryExpvar(&after); err != nil { + b.Error(err) + break + } + if after.ActiveEvents == 0 { + break + } + time.Sleep(100 * time.Millisecond) + } + ok = !b.Failed() + }) + if result.Extra != nil { + addExpvarMetrics(result, before, after) + } + return result, ok, nil +} + +func addExpvarMetrics(result testing.BenchmarkResult, before, after expvar) { + result.MemAllocs = after.MemStats.Mallocs - before.MemStats.Mallocs + result.MemBytes = after.MemStats.TotalAlloc - before.MemStats.TotalAlloc + result.Bytes = after.UncompressedBytes - before.UncompressedBytes + result.Extra["events/sec"] = float64(after.TotalEvents-before.TotalEvents) / result.T.Seconds() + + // Record the number of error responses returned by the server: lower is better. + errorResponsesAfter := after.ErrorElasticResponses + after.ErrorOTLPTracesResponses + after.ErrorOTLPMetricsResponses + errorResponsesBefore := before.ErrorElasticResponses + before.ErrorOTLPTracesResponses + before.ErrorOTLPMetricsResponses + errorResponses := errorResponsesAfter - errorResponsesBefore + result.Extra["error_responses/sec"] = float64(errorResponses) / result.T.Seconds() +} + +func fullBenchmarkName(name string, agents int) string { + if agents != 1 { + return fmt.Sprintf("%s-%d", name, agents) + } + return name +} + +func benchmarkFuncName(f BenchmarkFunc) (string, error) { + ffunc := runtime.FuncForPC(reflect.ValueOf(f).Pointer()) + if ffunc == nil { + return "", errors.New("runtime.FuncForPC returned nil") + } + fullName := ffunc.Name() + _, name := stacktrace.SplitFunctionName(fullName) + if !strings.HasPrefix(name, benchmarkFuncPrefix) { + return "", fmt.Errorf("benchmark function names must begin with %q (got %q)", fullName, benchmarkFuncPrefix) + } + return name, nil +} + +// Run runs the given benchmarks according to the flags defined. +// +// Run expects to receive statically-defined functions whose names +// are all prefixed with "Benchmark", like those that are designed +// to work with "go test". +func Run(allBenchmarks ...BenchmarkFunc) error { + if err := parseFlags(); err != nil { + return err + } + var profiles profiles + if err := profiles.init(); err != nil { + return err + } + defer func() { + if err := profiles.writeProfiles(); err != nil { + log.Printf("failed to write profiles: %s", err) + } + }() + + var matchRE *regexp.Regexp + if *match != "" { + re, err := regexp.Compile(*match) + if err != nil { + return err + } + matchRE = re + } + benchmarks := make([]benchmark, 0, len(allBenchmarks)) + for _, benchmarkFunc := range allBenchmarks { + name, err := benchmarkFuncName(benchmarkFunc) + if err != nil { + return err + } + if matchRE == nil || matchRE.MatchString(name) { + benchmarks = append(benchmarks, benchmark{ + name: name, + f: benchmarkFunc, + }) + } + } + sort.Slice(benchmarks, func(i, j int) bool { + return benchmarks[i].name < benchmarks[j].name + }) + + var maxLen int + for _, agents := range agentsList { + for _, benchmark := range benchmarks { + if n := len(fullBenchmarkName(benchmark.name, agents)); n > maxLen { + maxLen = n + } + } + } + + for _, agents := range agentsList { + runtime.GOMAXPROCS(int(agents)) + for _, benchmark := range benchmarks { + name := fullBenchmarkName(benchmark.name, agents) + for i := 0; i < int(*count); i++ { + profileChan := profiles.record(name) + result, ok, err := runBenchmark(benchmark.f) + if err != nil { + return err + } + if !ok { + fmt.Fprintf(os.Stderr, "--- FAIL: %s\n", name) + return fmt.Errorf("benchmark %q failed", name) + } else { + fmt.Fprintf(os.Stderr, "%-*s\t%s\t%s\n", maxLen, name, result, result.MemString()) + } + if err := <-profileChan; err != nil { + return err + } + } + } + } + return nil +} diff --git a/systemtest/benchtest/profiles.go b/systemtest/benchtest/profiles.go new file mode 100644 index 00000000000..942fc39051f --- /dev/null +++ b/systemtest/benchtest/profiles.go @@ -0,0 +1,194 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package benchtest + +import ( + "context" + "fmt" + "io/ioutil" + "net/http" + "os" + "strconv" + "time" + + "github.com/google/pprof/profile" +) + +func fetchProfile(urlPath string, duration time.Duration) (*profile.Profile, error) { + req, err := http.NewRequest("GET", *server+urlPath, nil) + if err != nil { + return nil, err + } + if duration > 0 { + query := req.URL.Query() + query.Set("seconds", strconv.Itoa(int(duration.Seconds()))) + req.URL.RawQuery = query.Encode() + + timeout := time.Duration(float64(duration) * 1.5) + ctx := req.Context() + ctx, cancel := context.WithTimeout(req.Context(), timeout) + defer cancel() + req = req.WithContext(ctx) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + body, _ := ioutil.ReadAll(resp.Body) + return nil, fmt.Errorf("failed to fetch profile (%s): %s", resp.Status, body) + } + return profile.Parse(resp.Body) +} + +type profiles struct { + benchmarkNames []string + cpu []*profile.Profile + heap []*profile.Profile + mutex []*profile.Profile + block []*profile.Profile +} + +func (p *profiles) init() error { + return p.recordCumulatives() +} + +func (p *profiles) record(benchmarkName string) <-chan error { + record := func() error { + p.benchmarkNames = append(p.benchmarkNames, benchmarkName) + if err := p.recordCPU(); err != nil { + return err + } + return p.recordCumulatives() + } + ch := make(chan error, 1) + go func() { ch <- record() }() + return ch +} + +func (p *profiles) recordCPU() error { + if *cpuprofile == "" { + return nil + } + duration := 2 * (*benchtime) + profile, err := fetchProfile("/debug/pprof/profile", duration) + if err != nil { + return fmt.Errorf("failed to fetch CPU profile: %w", err) + } + p.cpu = append(p.cpu, profile) + return nil +} + +func (p *profiles) recordCumulatives() error { + if err := p.recordCumulative(memprofile, "/debug/pprof/heap", &p.heap); err != nil { + return err + } + if err := p.recordCumulative(mutexprofile, "/debug/pprof/mutex", &p.mutex); err != nil { + return err + } + if err := p.recordCumulative(blockprofile, "/debug/pprof/block", &p.block); err != nil { + return err + } + return nil +} + +func (p *profiles) recordCumulative(flag *string, urlPath string, out *[]*profile.Profile) error { + if *flag == "" { + return nil + } + profile, err := fetchProfile(urlPath, 0) + if err != nil { + return err + } + *out = append(*out, profile) + return nil +} + +func (p *profiles) writeProfiles() error { + if err := p.writeCumulative(*memprofile, p.heap); err != nil { + return err + } + if err := p.writeCumulative(*mutexprofile, p.mutex); err != nil { + return err + } + if err := p.writeCumulative(*blockprofile, p.block); err != nil { + return err + } + if err := p.writeDeltas(*cpuprofile, p.cpu); err != nil { + return err + } + return nil +} + +func (p *profiles) writeCumulative(filename string, cumulative []*profile.Profile) error { + if len(cumulative) == 0 { + return nil + } + p0 := cumulative[0] + deltas := make([]*profile.Profile, len(cumulative)-1) + for i, p1 := range cumulative[1:] { + delta, err := computeDeltaProfile(p0, p1) + if err != nil { + return err + } + deltas[i] = delta + p0 = p1 + } + return p.writeDeltas(filename, deltas) +} + +func (p *profiles) writeDeltas(filename string, deltas []*profile.Profile) error { + if len(deltas) == 0 { + return nil + } + merged, err := p.mergeBenchmarkProfiles(deltas) + if err != nil { + return err + } + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + return merged.Write(f) +} + +func (p *profiles) mergeBenchmarkProfiles(profiles []*profile.Profile) (*profile.Profile, error) { + for i, profile := range profiles { + benchmarkName := p.benchmarkNames[i] + profile.SetLabel("benchmark", []string{benchmarkName}) + } + merged, err := profile.Merge(profiles) + if err != nil { + return nil, fmt.Errorf("error merging profiles: %w", err) + } + return merged, nil +} + +func computeDeltaProfile(p0, p1 *profile.Profile) (*profile.Profile, error) { + p0.Scale(-1) + defer p0.Scale(-1) // return to initial state + + merged, err := profile.Merge([]*profile.Profile{p0, p1}) + if err != nil { + return nil, fmt.Errorf("error computing delta profile: %w", err) + } + merged.TimeNanos = p1.TimeNanos + merged.DurationNanos = p1.TimeNanos - p0.TimeNanos + return merged, nil +} diff --git a/systemtest/cmd/apmbench/main.go b/systemtest/cmd/apmbench/main.go new file mode 100644 index 00000000000..caf8e175580 --- /dev/null +++ b/systemtest/cmd/apmbench/main.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package main + +import ( + "context" + "log" + "testing" + + sdktrace "go.opentelemetry.io/otel/sdk/trace" + + "github.com/elastic/apm-server/systemtest/benchtest" +) + +func Benchmark1000Transactions(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + tracer := benchtest.NewTracer(b) + for pb.Next() { + for i := 0; i < 1000; i++ { + tracer.StartTransaction("name", "type").End() + } + // TODO(axw) implement a transport that enables streaming + // events in a way that we can block when the queue is full, + // without flushing. Alternatively, make this an option in + // TracerOptions? + tracer.Flush(nil) + } + }) +} + +func BenchmarkOTLPTraces(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + exporter := benchtest.NewOTLPExporter(b) + tracerProvider := sdktrace.NewTracerProvider( + sdktrace.WithSampler(sdktrace.AlwaysSample()), + sdktrace.WithBatcher(exporter, sdktrace.WithBlocking()), + ) + tracer := tracerProvider.Tracer("tracer") + for pb.Next() { + _, span := tracer.Start(context.Background(), "name") + span.End() + } + tracerProvider.ForceFlush(context.Background()) + }) +} + +func main() { + if err := benchtest.Run( + Benchmark1000Transactions, + BenchmarkOTLPTraces, + ); err != nil { + log.Fatal(err) + } +} diff --git a/systemtest/containers.go b/systemtest/containers.go index 4261dbe472c..d50f0d720b6 100644 --- a/systemtest/containers.go +++ b/systemtest/containers.go @@ -18,16 +18,41 @@ package systemtest import ( + "archive/tar" + "bytes" "context" + "encoding/json" + "errors" "fmt" + "io" + "io/ioutil" "log" + "net" + "net/url" "os" "os/exec" + "path/filepath" + "runtime" + "strings" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/client" + "github.com/docker/go-connections/nat" + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/wait" + "golang.org/x/sync/errgroup" + + "github.com/elastic/apm-server/systemtest/apmservertest" + "github.com/elastic/apm-server/systemtest/estest" + "github.com/elastic/go-elasticsearch/v7" +) + +const ( + startContainersTimeout = 5 * time.Minute + + fleetServerPort = "8220" ) // StartStackContainers starts Docker containers for Elasticsearch and Kibana. @@ -37,7 +62,7 @@ import ( func StartStackContainers() error { cmd := exec.Command( "docker-compose", "-f", "../docker-compose.yml", - "up", "-d", "elasticsearch", "kibana", + "up", "-d", "elasticsearch", "kibana", "fleet-server", ) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr @@ -45,31 +70,74 @@ func StartStackContainers() error { return err } - // Wait for up to 5 minutes for Kibana to become healthy, + // Wait for up to 5 minutes for Kibana and Fleet Server to become healthy, // which implies Elasticsearch is healthy too. - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + ctx, cancel := context.WithTimeout(context.Background(), startContainersTimeout) defer cancel() + g, ctx := errgroup.WithContext(ctx) + g.Go(func() error { return waitContainerHealthy(ctx, "kibana") }) + g.Go(func() error { return waitContainerHealthy(ctx, "fleet-server") }) + return g.Wait() +} + +// NewUnstartedElasticsearchContainer returns a new ElasticsearchContainer. +func NewUnstartedElasticsearchContainer() (*ElasticsearchContainer, error) { + // Create a testcontainer.ContainerRequest based on the "elasticsearch service + // defined in docker-compose. docker, err := client.NewClientWithOpts(client.FromEnv) if err != nil { - return err + return nil, err } defer docker.Close() - containers, err := docker.ContainerList(ctx, types.ContainerListOptions{ - Filters: filters.NewArgs( - filters.Arg("label", "com.docker.compose.project=apm-server"), - filters.Arg("label", "com.docker.compose.service=kibana"), - ), - }) + container, err := stackContainerInfo(context.Background(), docker, "elasticsearch") + if err != nil { + return nil, err + } + containerDetails, err := docker.ContainerInspect(context.Background(), container.ID) + if err != nil { + return nil, err + } + + req := testcontainers.ContainerRequest{ + Image: container.Image, + AutoRemove: true, + } + req.WaitingFor = wait.ForHTTP("/").WithPort("9200/tcp") + + for port := range containerDetails.Config.ExposedPorts { + req.ExposedPorts = append(req.ExposedPorts, string(port)) + } + + env := make(map[string]string) + for _, kv := range containerDetails.Config.Env { + sep := strings.IndexRune(kv, '=') + k, v := kv[:sep], kv[sep+1:] + env[k] = v + } + for network := range containerDetails.NetworkSettings.Networks { + req.Networks = append(req.Networks, network) + } + + // BUG(axw) ElasticsearchContainer currently does not support security. + env["xpack.security.enabled"] = "false" + + return &ElasticsearchContainer{request: req, Env: env}, nil +} + +func waitContainerHealthy(ctx context.Context, serviceName string) error { + docker, err := client.NewClientWithOpts(client.FromEnv) if err != nil { return err } - if n := len(containers); n != 1 { - return fmt.Errorf("expected 1 kibana container, got %d", n) + defer docker.Close() + + container, err := stackContainerInfo(ctx, docker, serviceName) + if err != nil { + return err } - container := containers[0] first := true for { containerJSON, err := docker.ContainerInspect(ctx, container.ID) @@ -80,10 +148,379 @@ func StartStackContainers() error { break } if first { - log.Printf("Waiting for Kibana container (%s) to become healthy", container.ID) + log.Printf("Waiting for %s container (%s) to become healthy", serviceName, container.ID) first = false } time.Sleep(5 * time.Second) } return nil } + +func stackContainerInfo(ctx context.Context, docker *client.Client, name string) (*types.Container, error) { + containers, err := docker.ContainerList(ctx, types.ContainerListOptions{ + Filters: filters.NewArgs( + filters.Arg("label", "com.docker.compose.project=apm-server"), + filters.Arg("label", "com.docker.compose.service="+name), + ), + }) + if err != nil { + return nil, err + } + if n := len(containers); n != 1 { + return nil, fmt.Errorf("expected 1 %s container, got %d", name, n) + } + return &containers[0], nil +} + +// ElasticsearchContainer represents an ephemeral Elasticsearch container. +// +// This can be used when the docker-compose "elasticsearch" service is insufficient. +type ElasticsearchContainer struct { + request testcontainers.ContainerRequest + container testcontainers.Container + + // Env holds the environment variables to pass to the container, + // and will be initialised with the values in the docker-compose + // "elasticsearch" service definition. + // + // BUG(axw) ElasticsearchContainer currently does not support security, + // and will set "xpack.security.enabled=false" by default. + Env map[string]string + + // Addr holds the "host:port" address for Elasticsearch's REST API. + // This will be populated by Start. + Addr string + + // Client holds a client for interacting with Elasticsearch's REST API. + // This will be populated by Start. + Client *estest.Client +} + +// Start starts the container. +// +// The Addr and Client fields will be updated on successful return. +// +// The container will be removed when Close() is called, or otherwise by a +// reaper process if the test process is aborted. +func (c *ElasticsearchContainer) Start() error { + ctx, cancel := context.WithTimeout(context.Background(), startContainersTimeout) + defer cancel() + + // Update request from user-definable fields. + c.request.Env = c.Env + + container, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ + ContainerRequest: c.request, + }) + if err != nil { + return err + } + c.container = container + + if err := c.container.Start(ctx); err != nil { + return err + } + ip, err := container.Host(ctx) + if err != nil { + return err + } + port, err := container.MappedPort(ctx, "9200") + if err != nil { + return err + } + c.Addr = net.JoinHostPort(ip, port.Port()) + + esURL := url.URL{Scheme: "http", Host: c.Addr} + config := newElasticsearchConfig() + config.Addresses[0] = esURL.String() + client, err := elasticsearch.NewClient(config) + if err != nil { + return err + } + c.Client = &estest.Client{Client: client} + + c.container = container + return nil +} + +// Close terminates and removes the container. +func (c *ElasticsearchContainer) Close() error { + if c.container == nil { + return nil + } + return c.container.Terminate(context.Background()) +} + +// NewUnstartedElasticAgentContainer returns a new ElasticAgentContainer. +func NewUnstartedElasticAgentContainer() (*ElasticAgentContainer, error) { + // Create a testcontainer.ContainerRequest to run Elastic Agent. + // We pull some configuration from the Kibana docker-compose service, + // such as the Docker network to use. + + docker, err := client.NewClientWithOpts(client.FromEnv) + if err != nil { + return nil, err + } + defer docker.Close() + + fleetServerContainer, err := stackContainerInfo(context.Background(), docker, "fleet-server") + if err != nil { + return nil, err + } + fleetServerContainerDetails, err := docker.ContainerInspect(context.Background(), fleetServerContainer.ID) + if err != nil { + return nil, err + } + + var fleetServerIPAddress string + var networks []string + for network, settings := range fleetServerContainerDetails.NetworkSettings.Networks { + networks = append(networks, network) + if fleetServerIPAddress == "" && settings.IPAddress != "" { + fleetServerIPAddress = settings.IPAddress + } + } + fleetServerURL := &url.URL{ + Scheme: "https", + Host: net.JoinHostPort(fleetServerIPAddress, fleetServerPort), + } + containerCACertPath := "/etc/pki/tls/certs/fleet-ca.pem" + hostCACertPath, err := filepath.Abs("../testing/docker/fleet-server/ca.pem") + if err != nil { + return nil, err + } + + // Use the same stack version as used for fleet-server. + agentImageVersion := fleetServerContainer.Image[strings.LastIndex(fleetServerContainer.Image, ":")+1:] + agentImage := "docker.elastic.co/beats/elastic-agent:" + agentImageVersion + if err := pullDockerImage(context.Background(), docker, agentImage); err != nil { + return nil, err + } + agentImageDetails, _, err := docker.ImageInspectWithRaw(context.Background(), agentImage) + if err != nil { + return nil, err + } + stackVersion := agentImageDetails.Config.Labels["org.label-schema.version"] + + // Build a custom elastic-agent image with a locally built apm-server binary injected. + agentImage, err = buildElasticAgentImage(context.Background(), docker, stackVersion, agentImageVersion) + if err != nil { + return nil, err + } + + req := testcontainers.ContainerRequest{ + Image: agentImage, + AutoRemove: true, + Networks: networks, + BindMounts: map[string]string{hostCACertPath: containerCACertPath}, + Env: map[string]string{ + "FLEET_URL": fleetServerURL.String(), + "FLEET_CA": containerCACertPath, + }, + } + return &ElasticAgentContainer{ + request: req, + StackVersion: agentImageVersion, + }, nil +} + +// ElasticAgentContainer represents an ephemeral Elastic Agent container. +type ElasticAgentContainer struct { + container testcontainers.Container + request testcontainers.ContainerRequest + + // StackVersion holds the stack version of the container image, + // e.g. 8.0.0-SNAPSHOT. + StackVersion string + + // ExposedPorts holds an optional list of ports to expose to the host. + ExposedPorts []string + + // WaitingFor holds an optional wait strategy. + WaitingFor wait.Strategy + + // Addrs holds the "host:port" address for each exposed port, mapped + // by exposed port. This will be populated by Start. + Addrs map[string]string + + // FleetEnrollmentToken holds an optional Fleet enrollment token to + // use for enrolling the agent with Fleet. The agent will only enroll + // if this is specified. + FleetEnrollmentToken string +} + +// Start starts the container. +// +// The Addr and Client fields will be updated on successful return. +// +// The container will be removed when Close() is called, or otherwise by a +// reaper process if the test process is aborted. +func (c *ElasticAgentContainer) Start() error { + ctx, cancel := context.WithTimeout(context.Background(), startContainersTimeout) + defer cancel() + + // Update request from user-definable fields. + if c.FleetEnrollmentToken != "" { + c.request.Env["FLEET_ENROLL"] = "1" + c.request.Env["FLEET_ENROLLMENT_TOKEN"] = c.FleetEnrollmentToken + } + c.request.ExposedPorts = c.ExposedPorts + c.request.WaitingFor = c.WaitingFor + + container, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ + ContainerRequest: c.request, + }) + if err != nil { + return err + } + c.container = container + + if err := container.Start(ctx); err != nil { + return err + } + if len(c.request.ExposedPorts) > 0 { + hostIP, err := container.Host(ctx) + if err != nil { + return err + } + c.Addrs = make(map[string]string) + for _, exposedPort := range c.request.ExposedPorts { + mappedPort, err := container.MappedPort(ctx, nat.Port(exposedPort)) + if err != nil { + return err + } + c.Addrs[exposedPort] = net.JoinHostPort(hostIP, mappedPort.Port()) + } + } + + c.container = container + return nil +} + +// Close terminates and removes the container. +func (c *ElasticAgentContainer) Close() error { + if c.container == nil { + return nil + } + return c.container.Terminate(context.Background()) +} + +// Logs returns an io.ReadCloser that can be used for reading the +// container's combined stdout/stderr log. If the container has not +// been created by Start(), Logs will return an error. +func (c *ElasticAgentContainer) Logs(ctx context.Context) (io.ReadCloser, error) { + if c.container == nil { + return nil, errors.New("container not created") + } + return c.container.Logs(ctx) +} + +func pullDockerImage(ctx context.Context, docker *client.Client, imageRef string) error { + rc, err := docker.ImagePull(context.Background(), imageRef, types.ImagePullOptions{}) + if err != nil { + return err + } + defer rc.Close() + _, err = io.Copy(ioutil.Discard, rc) + return err +} + +func matchFleetServerAPIStatusHealthy(r io.Reader) bool { + var status struct { + Name string `json:"name"` + Version string `json:"version"` + Status string `json:"status"` + } + if err := json.NewDecoder(r).Decode(&status); err != nil { + return false + } + return status.Status == "HEALTHY" +} + +// buildElasticAgentImage builds a Docker image from the published image with a locally built apm-server injected. +func buildElasticAgentImage(ctx context.Context, docker *client.Client, stackVersion, imageVersion string) (string, error) { + imageName := fmt.Sprintf("elastic-agent-systemtest:%s", imageVersion) + log.Printf("Building image %s...", imageName) + + // Build apm-server, and copy it into the elastic-agent container's "install" directory. + // This bypasses downloading the artifact. + arch := runtime.GOARCH + if arch == "amd64" { + arch = "x86_64" + } + apmServerInstallDir := fmt.Sprintf("./state/data/install/apm-server-%s-linux-%s", stackVersion, arch) + apmServerBinary, err := apmservertest.BuildServerBinary("linux") + if err != nil { + return "", err + } + + // Binaries to copy from disk into the build context. + binaries := map[string]string{ + "apm-server": apmServerBinary, + } + + // Generate Dockerfile contents. + var dockerfile bytes.Buffer + fmt.Fprintf(&dockerfile, "FROM docker.elastic.co/beats/elastic-agent:%s\n", imageVersion) + fmt.Fprintf(&dockerfile, "COPY --chown=elastic-agent:elastic-agent apm-server apm-server.yml %s/\n", apmServerInstallDir) + + // Files to generate in the build context. + generatedFiles := map[string][]byte{ + "Dockerfile": dockerfile.Bytes(), + "apm-server.yml": []byte(""), + } + + var buildContext bytes.Buffer + tarw := tar.NewWriter(&buildContext) + for name, path := range binaries { + f, err := os.Open(path) + if err != nil { + return "", err + } + defer f.Close() + info, err := f.Stat() + if err != nil { + return "", err + } + if err := tarw.WriteHeader(&tar.Header{ + Name: name, + Size: info.Size(), + Mode: 0755, + Uname: "elastic-agent", + Gname: "elastic-agent", + }); err != nil { + return "", err + } + if _, err := io.Copy(tarw, f); err != nil { + return "", err + } + } + for name, content := range generatedFiles { + if err := tarw.WriteHeader(&tar.Header{ + Name: name, + Size: int64(len(content)), + Mode: 0644, + Uname: "elastic-agent", + Gname: "elastic-agent", + }); err != nil { + return "", err + } + if _, err := tarw.Write(content); err != nil { + return "", err + } + } + if err := tarw.Close(); err != nil { + return "", err + } + + resp, err := docker.ImageBuild(ctx, &buildContext, types.ImageBuildOptions{Tags: []string{imageName}}) + if err != nil { + return "", err + } + defer resp.Body.Close() + if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + return "", err + } + log.Printf("Built image %s", imageName) + return imageName, nil +} diff --git a/systemtest/elasticsearch.go b/systemtest/elasticsearch.go index 2d37d59dd28..d73095a1f08 100644 --- a/systemtest/elasticsearch.go +++ b/systemtest/elasticsearch.go @@ -37,7 +37,7 @@ import ( const ( adminElasticsearchUser = "admin" adminElasticsearchPass = "changeme" - maxElasticsearchBackoff = time.Second + maxElasticsearchBackoff = 10 * time.Second ) var ( @@ -75,9 +75,10 @@ func newElasticsearchConfig() elasticsearch.Config { addresses = append(addresses, u.String()) } return elasticsearch.Config{ - Addresses: addresses, + Addresses: addresses, + MaxRetries: 5, RetryBackoff: func(attempt int) time.Duration { - backoff := time.Duration(attempt*100) * time.Millisecond + backoff := (500 * time.Millisecond) * (1 << (attempt - 1)) if backoff > maxElasticsearchBackoff { backoff = maxElasticsearchBackoff } @@ -90,12 +91,12 @@ func newElasticsearchConfig() elasticsearch.Config { // and ingest node pipelines whose names start with "apm", // and deletes the default ILM policy "apm-rollover-30-days". func CleanupElasticsearch(t testing.TB) { - const prefix = "apm*" - requests := []estest.Request{ - esapi.IndicesDeleteRequest{Index: []string{prefix}}, - esapi.IngestDeletePipelineRequest{PipelineID: prefix}, - esapi.IndicesDeleteTemplateRequest{Name: prefix}, - } + const ( + legacyPrefix = "apm*" // Not "apm-*", as that would not capture the "apm" ingest pipeline. + apmTracesPrefix = "traces-apm*" + apmMetricsPrefix = "metrics-apm*" + apmLogsPrefix = "logs-apm*" + ) doReq := func(req estest.Request) error { _, err := Elasticsearch.Do(context.Background(), req, nil) @@ -105,12 +106,48 @@ func CleanupElasticsearch(t testing.TB) { return err } - var g errgroup.Group - for _, req := range requests { - req := req // copy for closure - g.Go(func() error { return doReq(req) }) + doParallel := func(requests ...estest.Request) { + t.Helper() + var g errgroup.Group + for _, req := range requests { + req := req // copy for closure + g.Go(func() error { return doReq(req) }) + } + if err := g.Wait(); err != nil { + t.Fatal(err) + } + } + + // Delete indices, data streams, and ingest pipelines. + if err := doReq(esapi.IndicesDeleteRequest{Index: []string{ + legacyPrefix, + // traces-apm*, metrics-apm*, and logs-apm* could get created + // as indices instead of data streams in some tests, so issue + // index delete requests for those too. + apmTracesPrefix, + apmMetricsPrefix, + apmLogsPrefix, + }}); err != nil { + t.Fatal(err) } - if err := g.Wait(); err != nil { + doParallel( + esapi.IndicesDeleteDataStreamRequest{Name: legacyPrefix}, + esapi.IndicesDeleteDataStreamRequest{Name: apmTracesPrefix}, + esapi.IndicesDeleteDataStreamRequest{Name: apmMetricsPrefix}, + esapi.IndicesDeleteDataStreamRequest{Name: apmLogsPrefix}, + esapi.IngestDeletePipelineRequest{PipelineID: legacyPrefix}, + ) + + // Delete index templates after deleting data streams. + doParallel( + esapi.IndicesDeleteTemplateRequest{Name: legacyPrefix}, + esapi.IndicesDeleteIndexTemplateRequest{Name: apmTracesPrefix}, + esapi.IndicesDeleteIndexTemplateRequest{Name: apmMetricsPrefix}, + esapi.IndicesDeleteIndexTemplateRequest{Name: apmLogsPrefix}, + ) + + // Delete index templates after deleting data streams. + if err := doReq(esapi.IndicesDeleteIndexTemplateRequest{Name: legacyPrefix}); err != nil { t.Fatal(err) } @@ -121,7 +158,9 @@ func CleanupElasticsearch(t testing.TB) { break } // Retry deleting, in case indices are still being deleted. - time.Sleep(100 * time.Millisecond) + const delay = 100 * time.Millisecond + t.Logf("failed to delete ILM policy (retrying in %s): %s", delay, err) + time.Sleep(delay) } } diff --git a/systemtest/environment_test.go b/systemtest/environment_test.go new file mode 100644 index 00000000000..5bc4ce4a0a7 --- /dev/null +++ b/systemtest/environment_test.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package systemtest_test + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" + + "github.com/elastic/apm-server/systemtest" + "github.com/elastic/apm-server/systemtest/apmservertest" + "github.com/elastic/apm-server/systemtest/estest" +) + +func TestDefaultServiceEnvironment(t *testing.T) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewUnstartedServer(t) + srv.Config.DefaultServiceEnvironment = "default" + err := srv.Start() + require.NoError(t, err) + + defer os.Unsetenv("ELASTIC_APM_ENVIRONMENT") + tracerDefaultEnvironment := srv.Tracer() + + os.Setenv("ELASTIC_APM_ENVIRONMENT", "specified") + tracerSpecifiedEnvironment := srv.Tracer() + + tracerDefaultEnvironment.StartTransaction("default_environment", "type").End() + tracerDefaultEnvironment.Flush(nil) + + tracerSpecifiedEnvironment.StartTransaction("specified_environment", "type").End() + tracerSpecifiedEnvironment.Flush(nil) + + result := systemtest.Elasticsearch.ExpectMinDocs(t, 2, "apm-*", + estest.TermQuery{Field: "processor.event", Value: "transaction"}, + ) + environments := make(map[string]string) + for _, hit := range result.Hits.Hits { + transactionName := gjson.GetBytes(hit.RawSource, "transaction.name").String() + serviceEnvironment := gjson.GetBytes(hit.RawSource, "service.environment").String() + environments[transactionName] = serviceEnvironment + } + assert.Equal(t, map[string]string{ + "default_environment": "default", + "specified_environment": "specified", + }, environments) +} diff --git a/systemtest/errorgrouping_test.go b/systemtest/errorgrouping_test.go new file mode 100644 index 00000000000..88b88ce7948 --- /dev/null +++ b/systemtest/errorgrouping_test.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package systemtest_test + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/tidwall/gjson" + "go.elastic.co/apm" + + "github.com/elastic/apm-server/systemtest" + "github.com/elastic/apm-server/systemtest/apmservertest" + "github.com/elastic/apm-server/systemtest/estest" +) + +func TestErrorGroupingName(t *testing.T) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewServer(t) + + tracer := srv.Tracer() + tracer.NewError(errors.New("only_exception_message")).Send() + tracer.NewErrorLog(apm.ErrorLogRecord{Message: "only_log_message"}).Send() + tracer.NewErrorLog(apm.ErrorLogRecord{Message: "log_message_overrides", Error: errors.New("exception_message_overridden")}).Send() + tracer.Flush(nil) + + result := systemtest.Elasticsearch.ExpectMinDocs(t, 3, "apm-*", estest.TermQuery{ + Field: "processor.event", + Value: "error", + }) + + var names []string + for _, hit := range result.Hits.Hits { + names = append(names, gjson.GetBytes(hit.RawSource, "error.grouping_name").String()) + } + + assert.ElementsMatch(t, []string{ + "only_exception_message", + "only_log_message", + "log_message_overrides", + }, names) +} diff --git a/systemtest/estest/client.go b/systemtest/estest/client.go index 610ed2dc099..2aa6149aac5 100644 --- a/systemtest/estest/client.go +++ b/systemtest/estest/client.go @@ -39,7 +39,10 @@ func (es *Client) Do( opts ...RequestOption, ) (*esapi.Response, error) { requestOptions := requestOptions{ - timeout: 10 * time.Second, + // Set the timeout to something high to account for Elasticsearch + // cluster and index/shard initialisation. Under normal conditions + // this timeout should never be reached. + timeout: time.Minute, interval: 100 * time.Millisecond, } for _, opt := range opts { @@ -134,6 +137,19 @@ func WithInterval(d time.Duration) RequestOption { type ConditionFunc func(*esapi.Response) bool +// AllCondition returns a ConditionFunc that returns true as +// long as none of the supplied conditions returns false. +func AllCondition(conds ...ConditionFunc) ConditionFunc { + return func(resp *esapi.Response) bool { + for _, cond := range conds { + if !cond(resp) { + return false + } + } + return true + } +} + func WithCondition(cond ConditionFunc) RequestOption { return func(opts *requestOptions) { opts.cond = cond diff --git a/systemtest/estest/search.go b/systemtest/estest/search.go index fb226280026..a594393b350 100644 --- a/systemtest/estest/search.go +++ b/systemtest/estest/search.go @@ -20,14 +20,50 @@ package estest import ( "context" "encoding/json" + "strings" + "testing" "github.com/elastic/go-elasticsearch/v7/esapi" "github.com/elastic/go-elasticsearch/v7/esutil" ) +// ExpectDocs searches index with query, returning the results. +// +// ExpectDocs is equivalent to calling ExpectMinDocs with a minimum of 1. +func (es *Client) ExpectDocs(t testing.TB, index string, query interface{}, opts ...RequestOption) SearchResult { + t.Helper() + return es.ExpectMinDocs(t, 1, index, query, opts...) +} + +// ExpectMinDocs searches index with query, returning the results. +// +// If the search returns fewer than min results within 10 seconds +// (by default), ExpectMinDocs will call t.Error(). +func (es *Client) ExpectMinDocs(t testing.TB, min int, index string, query interface{}, opts ...RequestOption) SearchResult { + t.Helper() + var result SearchResult + req := es.Search(index) + if min > 10 { + // Size defaults to 10. If the caller expects more than 10, + // return it in the search so we don't have to search again. + req = req.WithSize(min) + } + if query != nil { + req = req.WithQuery(query) + } + opts = append(opts, WithCondition(AllCondition( + result.Hits.MinHitsCondition(min), + result.Hits.TotalHitsCondition(req), + ))) + if _, err := req.Do(context.Background(), &result, opts...); err != nil { + t.Fatal(err) + } + return result +} + func (es *Client) Search(index string) *SearchRequest { req := &SearchRequest{es: es} - req.Index = []string{index} + req.Index = strings.Split(index, ",") return req } @@ -45,21 +81,58 @@ func (r *SearchRequest) WithQuery(q interface{}) *SearchRequest { return r } +func (r *SearchRequest) WithSort(fieldDirection ...string) *SearchRequest { + r.Sort = fieldDirection + return r +} + +func (r *SearchRequest) WithSize(size int) *SearchRequest { + r.Size = &size + return r +} + func (r *SearchRequest) Do(ctx context.Context, out *SearchResult, opts ...RequestOption) (*esapi.Response, error) { return r.es.Do(ctx, &r.SearchRequest, out, opts...) } type SearchResult struct { - Hits SearchHits `json:"hits"` + Hits SearchHits `json:"hits"` + Aggregations map[string]json.RawMessage `json:"aggregations"` } type SearchHits struct { - Hits []SearchHit `json:"hits"` + Total SearchHitsTotal `json:"total"` + Hits []SearchHit `json:"hits"` +} + +type SearchHitsTotal struct { + Value int `json:"value"` + Relation string `json:"relation"` // "eq" or "gte" } // NonEmptyCondition returns a ConditionFunc which will return true if h.Hits is non-empty. func (h *SearchHits) NonEmptyCondition() ConditionFunc { - return func(*esapi.Response) bool { return len(h.Hits) != 0 } + return h.MinHitsCondition(1) +} + +// MinHitsCondition returns a ConditionFunc which will return true if the number of h.Hits +// is at least min. +func (h *SearchHits) MinHitsCondition(min int) ConditionFunc { + return func(*esapi.Response) bool { return len(h.Hits) >= min } +} + +// TotalHitsCondition returns a ConditionFunc which will return true if the number of h.Hits +// is at least h.Total.Value. If the condition returns false, it will update req.Size to +// accommodate the number of hits in the following search. +func (h *SearchHits) TotalHitsCondition(req *SearchRequest) ConditionFunc { + return func(*esapi.Response) bool { + if len(h.Hits) >= h.Total.Value { + return true + } + size := h.Total.Value + req.Size = &size + return false + } } type SearchHit struct { diff --git a/systemtest/export_test.go b/systemtest/export_test.go new file mode 100644 index 00000000000..bff513ed827 --- /dev/null +++ b/systemtest/export_test.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package systemtest_test + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/systemtest/apmservertest" +) + +func exportConfigCommand(t *testing.T, args ...string) (_ *apmservertest.ServerCmd, homedir string) { + tempdir, err := ioutil.TempDir("", "systemtest") + require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(tempdir) }) + err = ioutil.WriteFile(filepath.Join(tempdir, "apm-server.yml"), nil, 0644) + require.NoError(t, err) + + allArgs := []string{"config", "--path.home", tempdir} + allArgs = append(allArgs, args...) + return apmservertest.ServerCommand("export", allArgs...), tempdir +} + +func TestExportConfigDefaults(t *testing.T) { + cmd, tempdir := exportConfigCommand(t) + out, err := cmd.CombinedOutput() + require.NoError(t, err) + + expectedConfig := strings.ReplaceAll(` +logging: + ecs: true + json: true + metrics: + enabled: false +path: + config: /home/apm-server + data: /home/apm-server/data + home: /home/apm-server + logs: /home/apm-server/logs +`[1:], "/home/apm-server", tempdir) + assert.Equal(t, expectedConfig, string(out)) +} + +func TestExportConfigOverrideDefaults(t *testing.T) { + cmd, tempdir := exportConfigCommand(t, + "-E", "logging.metrics.enabled=true", + ) + out, err := cmd.CombinedOutput() + require.NoError(t, err) + + expectedConfig := strings.ReplaceAll(` +logging: + ecs: true + json: true + metrics: + enabled: true +path: + config: /home/apm-server + data: /home/apm-server/data + home: /home/apm-server + logs: /home/apm-server/logs +`[1:], "/home/apm-server", tempdir) + assert.Equal(t, expectedConfig, string(out)) +} diff --git a/systemtest/fleet_test.go b/systemtest/fleet_test.go new file mode 100644 index 00000000000..1d376a345ba --- /dev/null +++ b/systemtest/fleet_test.go @@ -0,0 +1,294 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package systemtest_test + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go/wait" + "go.elastic.co/apm" + "go.elastic.co/apm/transport" + + "github.com/elastic/apm-server/systemtest" + "github.com/elastic/apm-server/systemtest/apmservertest" + "github.com/elastic/apm-server/systemtest/fleettest" +) + +func TestFleetIntegration(t *testing.T) { + apmIntegration := initAPMIntegration(t, nil) + tx := apmIntegration.Tracer.StartTransaction("name", "type") + tx.Duration = time.Second + tx.End() + apmIntegration.Tracer.Flush(nil) + + result := systemtest.Elasticsearch.ExpectDocs(t, "traces-*", nil) + systemtest.ApproveEvents( + t, t.Name(), result.Hits.Hits, + "@timestamp", "timestamp.us", + "trace.id", "transaction.id", + ) +} + +func TestFleetIntegrationAnonymousAuth(t *testing.T) { + apmIntegration := initAPMIntegration(t, map[string]interface{}{ + "secret_token": "abc123", + // RUM and anonymous auth are enabled by default. + "anonymous_allow_service": []interface{}{"allowed_service"}, + "anonymous_allow_agent": []interface{}{"allowed_agent"}, + }) + + makePayload := func(service, agent string) io.Reader { + const body = `{"metadata":{"service":{"name":%q,"agent":{"name":%q,"version":"5.5.0"}}}} +{"transaction":{"trace_id":"611f4fa950f04631aaaaaaaaaaaaaaaa","id":"611f4fa950f04631","type":"page-load","duration":643,"span_count":{"started":0}}}` + return strings.NewReader(fmt.Sprintf(body, service, agent)) + } + + test := func(service, agent string, statusCode int) { + req, _ := http.NewRequest("POST", apmIntegration.URL+"/intake/v2/rum/events", makePayload(service, agent)) + req.Header.Set("Content-Type", "application/x-ndjson") + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + respBody, _ := ioutil.ReadAll(resp.Body) + require.Equal(t, statusCode, resp.StatusCode, string(respBody)) + } + test("allowed_service", "allowed_agent", http.StatusAccepted) + test("allowed_service", "denied_agent", http.StatusForbidden) + test("denied_service", "allowed_agent", http.StatusForbidden) +} + +func TestFleetPackageNonMultiple(t *testing.T) { + systemtest.CleanupElasticsearch(t) + cleanupFleet(t, systemtest.Fleet) + defer cleanupFleet(t, systemtest.Fleet) + + agentPolicy, _, err := systemtest.Fleet.CreateAgentPolicy( + "apm_systemtest", "default", "Agent policy for APM Server system tests", + ) + require.NoError(t, err) + + apmPackage := getAPMIntegrationPackage(t, systemtest.Fleet) + packagePolicy := fleettest.NewPackagePolicy(apmPackage, "apm", "default", agentPolicy.ID) + initAPMIntegrationPackagePolicyInputs(t, packagePolicy, apmPackage, nil) + + err = systemtest.Fleet.CreatePackagePolicy(packagePolicy) + require.NoError(t, err) + + // Attempting to add the "apm" integration to the agent policy twice should fail. + packagePolicy.Name = "apm-2" + err = systemtest.Fleet.CreatePackagePolicy(packagePolicy) + require.Error(t, err) + assert.EqualError(t, err, "Unable to create package policy. Package 'apm' already exists on this agent policy.") +} + +func initAPMIntegration(t testing.TB, vars map[string]interface{}) apmIntegration { + systemtest.CleanupElasticsearch(t) + cleanupFleet(t, systemtest.Fleet) + t.Cleanup(func() { cleanupFleet(t, systemtest.Fleet) }) + + agentPolicy, enrollmentAPIKey, err := systemtest.Fleet.CreateAgentPolicy( + "apm_systemtest", "default", "Agent policy for APM Server system tests", + ) + require.NoError(t, err) + + // Add the "apm" integration to the agent policy. + apmPackage := getAPMIntegrationPackage(t, systemtest.Fleet) + packagePolicy := fleettest.NewPackagePolicy(apmPackage, "apm", "default", agentPolicy.ID) + packagePolicy.Package.Name = apmPackage.Name + packagePolicy.Package.Version = apmPackage.Version + packagePolicy.Package.Title = apmPackage.Title + initAPMIntegrationPackagePolicyInputs(t, packagePolicy, apmPackage, vars) + + err = systemtest.Fleet.CreatePackagePolicy(packagePolicy) + require.NoError(t, err) + + // Enroll an elastic-agent to run the APM integration. + agent, err := systemtest.NewUnstartedElasticAgentContainer() + require.NoError(t, err) + agent.FleetEnrollmentToken = enrollmentAPIKey.APIKey + t.Cleanup(func() { agent.Close() }) + t.Cleanup(func() { + // Log the elastic-agent container output if the test fails. + if !t.Failed() { + return + } + if logs, err := agent.Logs(context.Background()); err == nil { + defer logs.Close() + if out, err := ioutil.ReadAll(logs); err == nil { + t.Logf("elastic-agent logs: %s", out) + } + } + }) + + // Start elastic-agent with port 8200 exposed, and wait for the server to service + // healthcheck requests to port 8200. + agent.ExposedPorts = []string{"8200"} + agent.WaitingFor = wait.ForHTTP("/").WithPort("8200/tcp").WithStartupTimeout(5 * time.Minute) + err = agent.Start() + require.NoError(t, err) + serverURL := &url.URL{Scheme: "http", Host: agent.Addrs["8200"]} + + // Create a Tracer which sends to the APM Server running under Elastic Agent. + httpTransport, err := transport.NewHTTPTransport() + require.NoError(t, err) + origTransport := httpTransport.Client.Transport + if secretToken, ok := vars["secret_token"].(string); ok { + httpTransport.SetSecretToken(secretToken) + } + httpTransport.Client.Transport = roundTripperFunc(func(r *http.Request) (*http.Response, error) { + r.Header.Set("X-Real-Ip", "10.11.12.13") + return origTransport.RoundTrip(r) + }) + httpTransport.SetServerURL(serverURL) + tracer, err := apm.NewTracerOptions(apm.TracerOptions{ + Transport: apmservertest.NewFilteringTransport( + httpTransport, + apmservertest.DefaultMetadataFilter{}, + ), + }) + require.NoError(t, err) + t.Cleanup(tracer.Close) + return apmIntegration{ + Agent: agent, + Tracer: tracer, + URL: serverURL.String(), + } +} + +type apmIntegration struct { + Agent *systemtest.ElasticAgentContainer + + // Tracer holds an apm.Tracer that may be used to send events + // to the server. + Tracer *apm.Tracer + + // URL holds the APM Server URL. + URL string +} + +func initAPMIntegrationPackagePolicyInputs( + t testing.TB, packagePolicy *fleettest.PackagePolicy, apmPackage *fleettest.Package, varValues map[string]interface{}, +) { + assert.Len(t, apmPackage.PolicyTemplates, 1) + assert.Len(t, apmPackage.PolicyTemplates[0].Inputs, 1) + for _, input := range apmPackage.PolicyTemplates[0].Inputs { + vars := make(map[string]interface{}) + for _, inputVar := range input.Vars { + value, ok := varValues[inputVar.Name] + if !ok { + switch inputVar.Name { + case "host": + value = ":8200" + default: + value = inputVarDefault(inputVar) + } + } + varMap := map[string]interface{}{"type": inputVar.Type} + if value != nil { + varMap["value"] = value + } + vars[inputVar.Name] = varMap + } + packagePolicy.Inputs = append(packagePolicy.Inputs, fleettest.PackagePolicyInput{ + Type: input.Type, + Enabled: true, + Streams: []interface{}{}, + Vars: vars, + }) + } +} + +func inputVarDefault(inputVar fleettest.PackagePolicyTemplateInputVar) interface{} { + if inputVar.Default != nil { + return inputVar.Default + } + if inputVar.Multi { + return []interface{}{} + } + return nil +} + +func cleanupFleet(t testing.TB, fleet *fleettest.Client) { + cleanupFleetPolicies(t, fleet) + apmPackage := getAPMIntegrationPackage(t, fleet) + if apmPackage.Status == "installed" { + err := fleet.DeletePackage(apmPackage.Name, apmPackage.Version) + require.NoError(t, err) + } +} + +func getAPMIntegrationPackage(t testing.TB, fleet *fleettest.Client) *fleettest.Package { + var apmPackage *fleettest.Package + packages, err := fleet.ListPackages() + require.NoError(t, err) + for _, pkg := range packages { + if pkg.Name != "apm" { + continue + } + // ListPackages does not return all package details, + // so we call Package to get them. + apmPackage, err = fleet.Package(pkg.Name, pkg.Version) + require.NoError(t, err) + return apmPackage + } + t.Fatal("could not find package 'apm'") + panic("unreachable") +} + +func cleanupFleetPolicies(t testing.TB, fleet *fleettest.Client) { + apmAgentPolicies, err := fleet.AgentPolicies("ingest-agent-policies.name:apm_systemtest") + require.NoError(t, err) + if len(apmAgentPolicies) == 0 { + return + } + + agents, err := fleet.Agents() + require.NoError(t, err) + agentsByPolicy := make(map[string][]fleettest.Agent) + for _, agent := range agents { + agentsByPolicy[agent.PolicyID] = append(agentsByPolicy[agent.PolicyID], agent) + } + + for _, p := range apmAgentPolicies { + if agents := agentsByPolicy[p.ID]; len(agents) > 0 { + agentIDs := make([]string, len(agents)) + for i, agent := range agents { + agentIDs[i] = agent.ID + } + require.NoError(t, fleet.BulkUnenrollAgents(true, agentIDs...)) + } + err := fleet.DeleteAgentPolicy(p.ID) + require.NoError(t, err) + } +} + +type roundTripperFunc func(*http.Request) (*http.Response, error) + +func (f roundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return f(r) +} diff --git a/systemtest/fleettest/client.go b/systemtest/fleettest/client.go new file mode 100644 index 00000000000..830cbefcb56 --- /dev/null +++ b/systemtest/fleettest/client.go @@ -0,0 +1,338 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package fleettest + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" +) + +// Client provides methods for interacting with the Fleet API. +type Client struct { + fleetURL string +} + +// NewClient returns a new Client for interacting with the Fleet API, +// using the given Kibana URL. +func NewClient(kibanaURL string) *Client { + return &Client{fleetURL: kibanaURL + "/api/fleet"} +} + +// Setup invokes the Fleet Setup API, returning an error if it fails. +func (c *Client) Setup() error { + for _, path := range []string{"/setup", "/agents/setup"} { + req := c.newFleetRequest("POST", path, nil) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if err := consumeResponse(resp, nil); err != nil { + return err + } + } + return nil +} + +// Agents returns the list of enrolled agents. +func (c *Client) Agents() ([]Agent, error) { + resp, err := http.Get(c.fleetURL + "/agents") + if err != nil { + return nil, err + } + defer resp.Body.Close() + var result struct { + List []Agent `json:"list"` + } + if err := consumeResponse(resp, &result); err != nil { + return nil, err + } + return result.List, nil +} + +// BulkUnenrollAgents bulk-unenrolls agents. +func (c *Client) BulkUnenrollAgents(revoke bool, agentIDs ...string) error { + var body bytes.Buffer + type bulkUnenroll struct { + Agents []string `json:"agents"` + Revoke bool `json:"revoke"` + } + if err := json.NewEncoder(&body).Encode(bulkUnenroll{agentIDs, revoke}); err != nil { + return err + } + req := c.newFleetRequest("POST", "/agents/bulk_unenroll", &body) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + return consumeResponse(resp, nil) +} + +// AgentPolicies returns the Agent Policies matching the given KQL query. +func (c *Client) AgentPolicies(kuery string) ([]AgentPolicy, error) { + u, err := url.Parse(c.fleetURL + "/agent_policies") + if err != nil { + return nil, err + } + query := u.Query() + query.Add("kuery", kuery) + u.RawQuery = query.Encode() + resp, err := http.Get(u.String()) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var result struct { + Items []AgentPolicy `json:"items"` + } + if err := consumeResponse(resp, &result); err != nil { + return nil, err + } + return result.Items, nil +} + +// DeleteAgentPolicy deletes the Agent Policy with the given ID. +func (c *Client) DeleteAgentPolicy(id string) error { + var body bytes.Buffer + type deleteAgentPolicy struct { + ID string `json:"agentPolicyId"` + } + if err := json.NewEncoder(&body).Encode(deleteAgentPolicy{id}); err != nil { + return err + } + req := c.newFleetRequest("POST", "/agent_policies/delete", &body) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + return consumeResponse(resp, nil) +} + +// CreateAgentPolicy returns the default Agent Policy. +func (c *Client) CreateAgentPolicy(name, namespace, description string) (*AgentPolicy, *EnrollmentAPIKey, error) { + var body bytes.Buffer + type newAgentPolicy struct { + Name string `json:"name,omitempty"` + Namespace string `json:"namespace,omitempty"` + Description string `json:"description,omitempty"` + } + if err := json.NewEncoder(&body).Encode(newAgentPolicy{name, namespace, description}); err != nil { + return nil, nil, err + } + req, err := http.NewRequest("POST", c.fleetURL+"/agent_policies", &body) + if err != nil { + return nil, nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("kbn-xsrf", "1") + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + var result struct { + Item AgentPolicy `json:"item"` + } + if err := consumeResponse(resp, &result); err != nil { + return nil, nil, err + } + enrollmentAPIKey, err := c.getAgentPolicyEnrollmentAPIKey(result.Item.ID) + if err != nil { + return nil, nil, err + } + return &result.Item, enrollmentAPIKey, nil +} + +func (c *Client) getAgentPolicyEnrollmentAPIKey(policyID string) (*EnrollmentAPIKey, error) { + keys, err := c.enrollmentAPIKeys("policy_id:" + policyID) + if err != nil { + return nil, err + } + if n := len(keys); n != 1 { + return nil, fmt.Errorf("expected 1 enrollment API key, got %d", n) + } + resp, err := http.Get(c.fleetURL + "/enrollment-api-keys/" + keys[0].ID) + if err != nil { + return nil, err + } + var result struct { + Item EnrollmentAPIKey `json:"item"` + } + if err := consumeResponse(resp, &result); err != nil { + return nil, err + } + return &result.Item, nil +} + +func (c *Client) enrollmentAPIKeys(kuery string) ([]EnrollmentAPIKey, error) { + u, err := url.Parse(c.fleetURL + "/enrollment-api-keys") + if err != nil { + return nil, err + } + query := u.Query() + query.Add("kuery", kuery) + u.RawQuery = query.Encode() + resp, err := http.Get(u.String()) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var result struct { + Items []EnrollmentAPIKey `json:"list"` + } + if err := consumeResponse(resp, &result); err != nil { + return nil, err + } + return result.Items, nil +} + +// ListPackages lists all packages available for installation. +func (c *Client) ListPackages() ([]Package, error) { + resp, err := http.Get(c.fleetURL + "/epm/packages?experimental=true") + if err != nil { + return nil, err + } + defer resp.Body.Close() + var result struct { + Response []Package `json:"response"` + } + if err := consumeResponse(resp, &result); err != nil { + return nil, err + } + return result.Response, nil +} + +// Package returns information about the package with the given name and version. +func (c *Client) Package(name, version string) (*Package, error) { + resp, err := http.Get(c.fleetURL + "/epm/packages/" + name + "-" + version) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var result struct { + Response Package `json:"response"` + } + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + return nil, err + } + return &result.Response, nil +} + +// InstallPackage installs the package with the given name. +func (c *Client) InstallPackage(name, version string) error { + req := c.newFleetRequest("POST", "/epm/packages/"+name+"-"+version, nil) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + return consumeResponse(resp, nil) +} + +// DeletePackage deletes (uninstalls) the package with the given name and version. +func (c *Client) DeletePackage(name, version string) error { + req := c.newFleetRequest("DELETE", "/epm/packages/"+name+"-"+version, nil) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + return consumeResponse(resp, nil) +} + +// PackagePolicy returns information about the package policy with the given ID. +func (c *Client) PackagePolicy(id string) (*PackagePolicy, error) { + resp, err := http.Get(c.fleetURL + "/package_policies/" + id) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var result struct { + Item PackagePolicy `json:"item"` + } + if err := consumeResponse(resp, &result); err != nil { + return nil, err + } + return &result.Item, nil +} + +// CreatePackagePolicy adds an integration to a policy. +func (c *Client) CreatePackagePolicy(p *PackagePolicy) error { + var body bytes.Buffer + if err := json.NewEncoder(&body).Encode(p); err != nil { + return err + } + req := c.newFleetRequest("POST", "/package_policies", &body) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + return consumeResponse(resp, nil) +} + +// DeletePackagePolicy deletes one or more package policies. +func (c *Client) DeletePackagePolicy(ids ...string) error { + var params struct { + PackagePolicyIDs []string `json:"packagePolicyIds"` + } + params.PackagePolicyIDs = ids + var body bytes.Buffer + if err := json.NewEncoder(&body).Encode(params); err != nil { + return err + } + req := c.newFleetRequest("POST", "/package_policies/delete", &body) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + return consumeResponse(resp, nil) +} + +func (c *Client) newFleetRequest(method string, path string, body io.Reader) *http.Request { + req, err := http.NewRequest(method, c.fleetURL+path, body) + if err != nil { + panic(err) + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("kbn-xsrf", "1") + return req +} + +func consumeResponse(resp *http.Response, out interface{}) error { + if resp.StatusCode != http.StatusOK { + var e Error + if err := json.NewDecoder(resp.Body).Decode(&e); err != nil { + return err + } + return &e + } + if out != nil { + if err := json.NewDecoder(resp.Body).Decode(out); err != nil { + return err + } + } + return nil +} diff --git a/systemtest/fleettest/error.go b/systemtest/fleettest/error.go new file mode 100644 index 00000000000..2a7d0a5bcc6 --- /dev/null +++ b/systemtest/fleettest/error.go @@ -0,0 +1,31 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package fleettest + +// Error is an error type returned by Client methods on failed +// requests to the Kibana Fleet API. +type Error struct { + StatusCode int `json:"statusCode"` + ErrorCode string `json:"error"` + Message string `json:"message"` +} + +// Error returns the error message. +func (e *Error) Error() string { + return e.Message +} diff --git a/systemtest/fleettest/helpers.go b/systemtest/fleettest/helpers.go new file mode 100644 index 00000000000..48ec61e2cd8 --- /dev/null +++ b/systemtest/fleettest/helpers.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package fleettest + +// NewPackagePolicy returns a new PackagePolicy for the package, +// with the given name, namespace, and agent policy ID. +func NewPackagePolicy(pkg *Package, name, namespace, agentPolicyID string) *PackagePolicy { + out := &PackagePolicy{ + Name: name, + Namespace: namespace, + AgentPolicyID: agentPolicyID, + Enabled: true, + } + out.Package.Name = pkg.Name + out.Package.Version = pkg.Version + out.Package.Title = pkg.Title + return out +} diff --git a/systemtest/fleettest/types.go b/systemtest/fleettest/types.go new file mode 100644 index 00000000000..4411dd017f1 --- /dev/null +++ b/systemtest/fleettest/types.go @@ -0,0 +1,120 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package fleettest + +import "time" + +// Agent holds details of a Fleet Agent. +type Agent struct { + ID string `json:"id"` + Active bool `json:"active"` + Status string `json:"status"` + Type string `json:"type"` + PolicyID string `json:"policy_id,omitempty"` + EnrolledAt time.Time `json:"enrolled_at,omitempty"` + UserProvidedMetadata map[string]interface{} `json:"user_provided_metadata,omitempty"` + LocalMetadata map[string]interface{} `json:"local_metadata,omitempty"` +} + +// AgentPolicy holds details of a Fleet Agent Policy. +type AgentPolicy struct { + ID string `json:"id"` + Name string `json:"name"` + Namespace string `json:"namespace"` + Description string `json:"description"` + Revision int `json:"revision"` + + Agents int `json:"agents"` + IsDefault bool `json:"is_default"` + MonitoringEnabled []string `json:"monitoring_enabled"` + PackagePolicies []string `json:"package_policies"` + Status string `json:"status"` + UpdatedAt time.Time `json:"updated_at"` + UpdatedBy string `json:"updated_by"` +} + +// PackagePolicy holds details of a Fleet Package Policy. +type PackagePolicy struct { + ID string `json:"id,omitempty"` + Name string `json:"name"` + Namespace string `json:"namespace"` + Enabled bool `json:"enabled"` + Description string `json:"description"` + AgentPolicyID string `json:"policy_id"` + OutputID string `json:"output_id"` + Inputs []PackagePolicyInput `json:"inputs"` + Package struct { + Name string `json:"name"` + Version string `json:"version"` + Title string `json:"title"` + } `json:"package"` +} + +type PackagePolicyInput struct { + Type string `json:"type"` + Enabled bool `json:"enabled"` + Streams []interface{} `json:"streams"` + Config map[string]interface{} `json:"config,omitempty"` + Vars map[string]interface{} `json:"vars,omitempty"` +} + +type Package struct { + Name string `json:"name"` + Version string `json:"version"` + Release string `json:"release"` + Type string `json:"type"` + Title string `json:"title"` + Description string `json:"description"` + Download string `json:"download"` + Path string `json:"path"` + Status string `json:"status"` + PolicyTemplates []PackagePolicyTemplate `json:"policy_templates"` +} + +type PackagePolicyTemplate struct { + Inputs []PackagePolicyTemplateInput `json:"inputs"` +} + +type PackagePolicyTemplateInput struct { + Type string `json:"type"` + Title string `json:"title"` + TemplatePath string `json:"template_path"` + Description string `json:"description"` + Vars []PackagePolicyTemplateInputVar `json:"vars"` +} + +type PackagePolicyTemplateInputVar struct { + Name string `json:"name"` + Type string `json:"type"` + Default interface{} `json:"default"` + Required bool `json:"required"` + Multi bool `json:"multi"` +} + +type EnrollmentAPIKey struct { + ID string `json:"id"` + Active bool `json:"active"` + APIKeyID string `json:"api_key_id"` + Name string `json:"name"` + PolicyID string `json:"policy_id"` + CreatedAt time.Time `json:"created_at"` + + // APIKey is only returned when querying a specific enrollment API key, + // and not when listing keys. + APIKey string `json:"api_key,omitempty"` +} diff --git a/systemtest/go.mod b/systemtest/go.mod index 8e0b84fd82c..2cbbf783d22 100644 --- a/systemtest/go.mod +++ b/systemtest/go.mod @@ -3,32 +3,43 @@ module github.com/elastic/apm-server/systemtest go 1.14 require ( - github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect github.com/Microsoft/go-winio v0.4.14 // indirect - github.com/containerd/containerd v1.3.6 // indirect github.com/docker/distribution v2.7.1+incompatible // indirect - github.com/docker/docker v1.13.1 - github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible + github.com/docker/go-connections v0.4.0 github.com/elastic/apm-server/approvaltest v0.0.0-00010101000000-000000000000 github.com/elastic/go-elasticsearch/v7 v7.8.0 - github.com/elastic/go-sysinfo v1.4.0 // indirect + github.com/elastic/go-sysinfo v1.7.0 // indirect github.com/elastic/go-windows v1.0.1 // indirect + github.com/google/go-cmp v0.5.6 // indirect + github.com/google/pprof v0.0.0-20210406223550-17a10ee72223 github.com/jaegertracing/jaeger v1.18.1 github.com/mitchellh/mapstructure v1.1.2 github.com/morikuni/aec v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.0.1 // indirect - github.com/prometheus/procfs v0.1.3 // indirect - github.com/stretchr/testify v1.6.1 - github.com/tidwall/gjson v1.6.0 - go.elastic.co/apm v1.8.1-0.20200913025752-7af7e1529586 + github.com/prometheus/procfs v0.7.1 // indirect + github.com/stretchr/testify v1.7.0 + github.com/testcontainers/testcontainers-go v0.9.0 + github.com/tidwall/gjson v1.6.5 + github.com/tidwall/pretty v1.1.0 // indirect + go.elastic.co/apm v1.12.0 go.elastic.co/fastjson v1.1.0 + go.opentelemetry.io/otel v0.19.0 + go.opentelemetry.io/otel/exporters/otlp v0.19.0 + go.opentelemetry.io/otel/metric v0.19.0 + go.opentelemetry.io/otel/sdk v0.19.0 + go.opentelemetry.io/otel/sdk/export/metric v0.19.0 + go.opentelemetry.io/otel/sdk/metric v0.19.0 + go.opentelemetry.io/otel/trace v0.19.0 go.uber.org/zap v1.15.0 - golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e - golang.org/x/sys v0.0.0-20200909081042-eff7692f9009 - google.golang.org/grpc v1.30.0 + golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c + golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c + golang.org/x/tools v0.1.5 // indirect + google.golang.org/grpc v1.36.0 + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect gotest.tools v2.2.0+incompatible // indirect - howett.net/plist v0.0.0-20200419221736-3b63eb3a43b5 // indirect + howett.net/plist v0.0.0-20201203080718-1454fab16a06 // indirect ) replace ( diff --git a/systemtest/go.sum b/systemtest/go.sum index ecd2178978c..48c2e333316 100644 --- a/systemtest/go.sum +++ b/systemtest/go.sum @@ -6,8 +6,11 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/DataDog/zstd v1.4.4/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -29,6 +32,8 @@ github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgI github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -36,13 +41,20 @@ github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCS github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q= github.com/bsm/sarama-cluster v2.1.13+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/containerd/containerd v1.3.6 h1:SMfcKoQyWhaRsYq7290ioC6XFcHDNcHvcEMjF6ORpac= -github.com/containerd/containerd v1.3.6/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1 h1:pASeJT3R3YyVn+94qEPk0SnU1OQ20Jd/T+SPKy9xehY= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc h1:TP+534wVlf61smEIq1nwLLAjQVEK2EADoW3CX9AuT+8= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -52,8 +64,6 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/crossdock/crossdock-go v0.0.0-20160816171116-049aabb0122b/go.mod h1:v9FBN7gdVTpiD/+LZ7Po0UKvROyT87uLVxTHVky/dlQ= -github.com/cucumber/godog v0.8.1 h1:lVb+X41I4YDreE+ibZ50bdXmySxgRviYFgKY6Aw4XE8= -github.com/cucumber/godog v0.8.1/go.mod h1:vSh3r/lM+psC1BPXvdkSEuNjmXfpVqrMGYAElF6hxnA= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -61,6 +71,7 @@ github.com/dgraph-io/badger v1.5.3/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMT github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/engine v0.0.0-20191113042239-ea84732a7725 h1:j0zqmciWFnhB01BT/CyfoXNEONoxerGjkcxM8i6tlXI= @@ -76,23 +87,27 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1 github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/elastic/go-elasticsearch/v7 v7.8.0 h1:M9D55OK13IEgg51Jb57mZgseag1AsncwAUn4C6j1vlc= github.com/elastic/go-elasticsearch/v7 v7.8.0/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= -github.com/elastic/go-sysinfo v1.1.1 h1:ZVlaLDyhVkDfjwPGU55CQRCRolNpc7P0BbyhhQZQmMI= +github.com/elastic/go-licenser v0.3.1 h1:RmRukU/JUmts+rpexAw0Fvt2ly7VVu6mw8z4HrEzObU= +github.com/elastic/go-licenser v0.3.1/go.mod h1:D8eNQk70FOCVBl3smCGQt/lv7meBeQno2eI1S5apiHQ= github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= -github.com/elastic/go-sysinfo v1.4.0 h1:LUnK6TNOuy8JEByuDzTAQH3iQ6bIywy55+Z+QlKNSWk= -github.com/elastic/go-sysinfo v1.4.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= -github.com/elastic/go-windows v1.0.0 h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY= +github.com/elastic/go-sysinfo v1.7.0 h1:4vVvcfi255+8+TyQ7TYUTEK3A+G8v5FLE+ZKYL1z1Dg= +github.com/elastic/go-sysinfo v1.7.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.7.3/go.mod h1:V1d2J5pfxYH6EjBAgSK7YNXcXlTWxUHdE1sVDXkjnig= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -148,6 +163,14 @@ github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+ github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= github.com/go-openapi/validate v0.19.6/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= +github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= @@ -177,8 +200,10 @@ github.com/gocql/gocql v0.0.0-20200228163523-cd4b606dd2fb/go.mod h1:DL0ekTmBSTdl github.com/gogo/googleapis v1.0.1-0.20180501115203-b23578765ee5 h1:l3BMcdrtdBYa5PH99FBrPEWJGRODZFOjxHPnb2I7/98= github.com/gogo/googleapis v1.0.1-0.20180501115203-b23578765ee5/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -186,24 +211,40 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210406223550-17a10ee72223 h1:P/57BP4StLsl2u/YPB7CXWwvwq3RMbt0SBjrtoRcivI= +github.com/google/pprof v0.0.0-20210406223550-17a10ee72223/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -222,7 +263,9 @@ github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jaegertracing/jaeger v1.18.1 h1:eFqjEpTKq2FfiZ/YX53oxeCePdIZyWvDfXaTAGj0r5E= github.com/jaegertracing/jaeger v1.18.1/go.mod h1:WRzMFH62rje1VgbShlgk6UbWUNoo08uFFvs/x50aZKk= @@ -236,12 +279,14 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -257,6 +302,7 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -268,8 +314,10 @@ github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7 github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= @@ -281,6 +329,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= @@ -291,13 +340,20 @@ github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DV github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olivere/elastic v6.2.27+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -308,7 +364,6 @@ github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -333,12 +388,10 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.10 h1:QJQN3jYQhkamO4mhfUWqdDH2asK7ONOI9MTWjyAxNKM= github.com/prometheus/procfs v0.0.10/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.7.1 h1:TlEtJq5GvGqMykEwWzbZWjjztF86swFhsPix1i0bkgA= +github.com/prometheus/procfs v0.7.1/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -381,16 +434,24 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.0/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tidwall/gjson v1.6.0 h1:9VEQWz6LLMUsUl6PueE49ir4Ka6CzLymOAZDxpFsTDc= +github.com/testcontainers/testcontainers-go v0.9.0 h1:ZyftCfROjGrKlxk3MOUn2DAzWrUtzY/mj17iAkdUIvI= +github.com/testcontainers/testcontainers-go v0.9.0/go.mod h1:b22BFXhRbg4PJmeMVWh6ftqjyZHgiIl3w274e9r3C2E= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= -github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= +github.com/tidwall/gjson v1.6.5 h1:P/K9r+1pt9AK54uap7HcoIp6T3a7AoMg3v18tUis+Cg= +github.com/tidwall/gjson v1.6.5/go.mod h1:zeFuBCIqD4sN/gmqBzZ4j7Jd6UcA2Fc56x7QFsv+8fI= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= +github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE= +github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.0.1 h1:WE4RBSZ1x6McVVC8S/Md+Qse8YUv6HRObAx6ke00NY8= github.com/tidwall/pretty v1.0.1/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.0.2/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.1.0 h1:K3hMW5epkdAVwibsQEfR/7Zj0Qgt4DxtNumTq/VloO8= +github.com/tidwall/pretty v1.1.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/sjson v1.1.1 h1:7h1vk049Jnd5EH9NyzNiEuwYW4b5qgreBbqRC19AS3U= github.com/tidwall/sjson v1.1.1/go.mod h1:yvVuSnpEQv5cYIrO+AT6kw4QVfd5SDZoGIS7/5+fZFs= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -398,6 +459,8 @@ github.com/uber/jaeger-client-go v2.22.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMW github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/uber/tchannel-go v1.16.0/go.mod h1:Rrgz1eL8kMjW/nEzZos0t+Heq0O4LhnUJVA32OvWKHo= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vektra/mockery v0.0.0-20181123154057-e78b021dcbb5/go.mod h1:ppEjwdhyy7Y31EnHRDm1JkChoC7LXIJ7Ex0VYLWtZtQ= github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM= @@ -407,12 +470,10 @@ github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0 github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.elastic.co/apm v1.8.0 h1:AWEKpHwRal0yCMd4K8Oxy1HAa7xid+xq1yy+XjgoVU0= -go.elastic.co/apm v1.8.0/go.mod h1:tCw6CkOJgkWnzEthFN9HUP1uL3Gjc/Ur6m7gRPLaoH0= -go.elastic.co/apm v1.8.1-0.20200913025752-7af7e1529586 h1:JJ9QiMI7bUxZoU9SWMGvE8SFDErJ3eGCBFNlXZ38Vpc= -go.elastic.co/apm v1.8.1-0.20200913025752-7af7e1529586/go.mod h1:qoOSi09pnzJDh5fKnfY7bPmQgl8yl2tULdOu03xhui0= -go.elastic.co/fastjson v1.0.0 h1:ooXV/ABvf+tBul26jcVViPT3sBir0PvXgibYB1IQQzg= -go.elastic.co/fastjson v1.0.0/go.mod h1:PmeUOMMtLHQr9ZS9J9owrAVg0FkaZDRZJEFTTGHtchs= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.elastic.co/apm v1.12.0 h1:0rYcZM/GPMeH0Er6DMFfHA8Flg5tf+XD9QbenrWWYWM= +go.elastic.co/apm v1.12.0/go.mod h1:v8Yf+VZ3NplRjQUWlvPG4EV/GGtDNCVUMaafrCnmGEM= go.elastic.co/fastjson v1.1.0 h1:3MrGBWWVIxe/xvsbpghtkFoPciPhOCmjsR/HfwEeQR4= go.elastic.co/fastjson v1.1.0/go.mod h1:boNGISWMjQsUPy/t6yqt2/1Wx4YNPSe+mZjlyw9vKKI= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -420,6 +481,22 @@ go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qL go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.opentelemetry.io/otel v0.19.0 h1:Lenfy7QHRXPZVsw/12CWpxX6d/JkrX8wrx2vO8G80Ng= +go.opentelemetry.io/otel v0.19.0/go.mod h1:j9bF567N9EfomkSidSfmMwIwIBuP37AMAIzVW85OxSg= +go.opentelemetry.io/otel/exporters/otlp v0.19.0 h1:ez8agFGbFJJgBU9H3lfX0rxWhZlXqurgZKL4aDcOdqY= +go.opentelemetry.io/otel/exporters/otlp v0.19.0/go.mod h1:MY1xDqVxZmOlEYbMxUHLbg0uKlnmg4XSC6Qvh6XmPZk= +go.opentelemetry.io/otel/metric v0.19.0 h1:dtZ1Ju44gkJkYvo+3qGqVXmf88tc+a42edOywypengg= +go.opentelemetry.io/otel/metric v0.19.0/go.mod h1:8f9fglJPRnXuskQmKpnad31lcLJ2VmNNqIsx/uIwBSc= +go.opentelemetry.io/otel/oteltest v0.19.0 h1:YVfA0ByROYqTwOxqHVZYZExzEpfZor+MU1rU+ip2v9Q= +go.opentelemetry.io/otel/oteltest v0.19.0/go.mod h1:tI4yxwh8U21v7JD6R3BcA/2+RBoTKFexE/PJ/nSO7IA= +go.opentelemetry.io/otel/sdk v0.19.0 h1:13pQquZyGbIvGxBWcVzUqe8kg5VGbTBiKKKXpYCylRM= +go.opentelemetry.io/otel/sdk v0.19.0/go.mod h1:ouO7auJYMivDjywCHA6bqTI7jJMVQV1HdKR5CmH8DGo= +go.opentelemetry.io/otel/sdk/export/metric v0.19.0 h1:9A1PC2graOx3epRLRWbq4DPCdpMUYK8XeCrdAg6ycbI= +go.opentelemetry.io/otel/sdk/export/metric v0.19.0/go.mod h1:exXalzlU6quLTXiv29J+Qpj/toOzL3H5WvpbbjouTBo= +go.opentelemetry.io/otel/sdk/metric v0.19.0 h1:fka1Zc/lpRMS+KlTP/TRXZuaFtSjUg/maHV3U8rt1Mc= +go.opentelemetry.io/otel/sdk/metric v0.19.0/go.mod h1:t12+Mqmj64q1vMpxHlCGXGggo0sadYxEG6U+Us/9OA4= +go.opentelemetry.io/otel/trace v0.19.0 h1:1ucYlenXIDA1OlHVLDZKX0ObXV5RLaq06DtUKz5e5zc= +go.opentelemetry.io/otel/trace v0.19.0/go.mod h1:4IXiNextNOpPnRlI4ryK69mn5iC84bjBWZQA5DXz/qg= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -448,18 +525,23 @@ golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200214034016-1d94cc7ab1c6/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -477,20 +559,23 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -511,19 +596,28 @@ golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e h1:9vRrk9YW2BTzLP0VCB9ZDjU4cPqkg+IDWL7XgxA1yxQ= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867 h1:JoRuNIf+rpHl+VhScRQQvzbHed86tKkqwPMV34T8myw= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200909081042-eff7692f9009 h1:W0lCpv29Hv0UaM1LXb9QlBHLNP8UFfcKjblhVCWftOM= -golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180810170437-e96c4e24768d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181112210238-4b1f3b6b1646/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -546,22 +640,28 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200203023011-6f24f261dadb/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d h1:7M9AXzLrJWWGdDYtBblPHBTnHtaN6KKQ98OYb35mLlY= golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20200218151345-dad8c97a84f5 h1:jB9+PJSvu5tBfmJHy/OVapFdjDF3WvpkqRhxqrmzoEU= google.golang.org/genproto v0.0.0-20200218151345-dad8c97a84f5/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -569,14 +669,25 @@ google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRn google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.30.0 h1:M5a8xTlYTxwMn5ZFkwhRabsygDY5G8TYLyQDBxJNAxE= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.36.0 h1:o1bcQ6imQMIOpdrO3SWf2z5RV72WbDwdXuK0MDlc8As= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -588,23 +699,27 @@ gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuv gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v0.0.0-20181223230014-1083505acf35/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= -howett.net/plist v0.0.0-20200419221736-3b63eb3a43b5 h1:AQkaJpH+/FmqRjmXZPELom5zIERYZfwTjnHpfoVMQEc= -howett.net/plist v0.0.0-20200419221736-3b63eb3a43b5/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= +howett.net/plist v0.0.0-20201203080718-1454fab16a06 h1:QDxUo/w2COstK1wIBYpzQlHX/NqaQTcf9jyz347nI58= +howett.net/plist v0.0.0-20201203080718-1454fab16a06/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= diff --git a/systemtest/headers_test.go b/systemtest/headers_test.go new file mode 100644 index 00000000000..73474777a1a --- /dev/null +++ b/systemtest/headers_test.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package systemtest_test + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/systemtest" + "github.com/elastic/apm-server/systemtest/apmservertest" +) + +func TestResponseHeaders(t *testing.T) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewUnstartedServer(t) + srv.Config.ResponseHeaders = http.Header{} + srv.Config.ResponseHeaders.Set("both", "all_value") + srv.Config.RUM = &apmservertest.RUMConfig{Enabled: true, ResponseHeaders: http.Header{}} + srv.Config.RUM.ResponseHeaders.Set("only_rum", "rum_value") + srv.Config.RUM.ResponseHeaders.Set("both", "rum_value") + err := srv.Start() + require.NoError(t, err) + + // Non-RUM response headers are added to responses of non-RUM specific routes. + resp, err := http.Get(srv.URL) + require.NoError(t, err) + resp.Body.Close() + assert.Equal(t, []string{"all_value"}, resp.Header.Values("both")) + assert.Nil(t, resp.Header.Values("only_rum")) + + // Both RUM and non-RUM response headers are added to responses of RUM-specific routes. + // If the same key is defined in both, then the values are concatenated. + resp, err = http.Get(srv.URL + "/config/v1/rum/agents") + require.NoError(t, err) + resp.Body.Close() + assert.Equal(t, []string{"all_value", "rum_value"}, resp.Header.Values("both")) + assert.Equal(t, []string{"rum_value"}, resp.Header.Values("only_rum")) +} diff --git a/systemtest/helpers_test.go b/systemtest/helpers_test.go new file mode 100644 index 00000000000..3fa94c8a7b3 --- /dev/null +++ b/systemtest/helpers_test.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package systemtest_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/systemtest" + "github.com/elastic/apm-server/systemtest/apmservertest" +) + +// withDataStreams runs two sub-tests, calling f with and without data streams enabled. +func withDataStreams(t *testing.T, f func(t *testing.T, unstartedServer *apmservertest.Server)) { + t.Run("data_streams_disabled", func(t *testing.T) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewUnstartedServer(t) + f(t, srv) + }) + t.Run("data_streams_enabled", func(t *testing.T) { + systemtest.CleanupElasticsearch(t) + cleanupFleet(t, systemtest.Fleet) + integrationPackage := getAPMIntegrationPackage(t, systemtest.Fleet) + err := systemtest.Fleet.InstallPackage(integrationPackage.Name, integrationPackage.Version) + require.NoError(t, err) + srv := apmservertest.NewUnstartedServer(t) + srv.Config.DataStreams = &apmservertest.DataStreamsConfig{Enabled: true} + f(t, srv) + }) +} diff --git a/systemtest/ingest_test.go b/systemtest/ingest_test.go new file mode 100644 index 00000000000..1d29b850f6f --- /dev/null +++ b/systemtest/ingest_test.go @@ -0,0 +1,136 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package systemtest_test + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" + + "go.elastic.co/apm" + + "github.com/elastic/go-elasticsearch/v7/esapi" + "github.com/elastic/go-elasticsearch/v7/esutil" + + "github.com/elastic/apm-server/systemtest" + "github.com/elastic/apm-server/systemtest/apmservertest" + "github.com/elastic/apm-server/systemtest/estest" +) + +func TestIngestPipelinePipeline(t *testing.T) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewServer(t) + + tracer := srv.Tracer() + tx := tracer.StartTransaction("name", "type") + span1 := tx.StartSpan("name", "type", nil) + // If a destination address is recorded, and it is a valid + // IPv4 or IPv6 address, it will be copied to destination.ip. + span1.Context.SetDestinationAddress("::1", 1234) + span1.End() + span2 := tx.StartSpan("name", "type", nil) + span2.Context.SetDestinationAddress("testing.invalid", 1234) + span2.End() + tx.End() + tracer.Flush(nil) + + getSpanDoc := func(spanID string) estest.SearchHit { + result := systemtest.Elasticsearch.ExpectDocs(t, "apm-*", estest.TermQuery{ + Field: "span.id", + Value: spanID, + }) + require.Len(t, result.Hits.Hits, 1) + return result.Hits.Hits[0] + } + + span1Doc := getSpanDoc(span1.TraceContext().Span.String()) + destinationIP := gjson.GetBytes(span1Doc.RawSource, "destination.ip") + assert.True(t, destinationIP.Exists()) + assert.Equal(t, "::1", destinationIP.String()) + + span2Doc := getSpanDoc(span2.TraceContext().Span.String()) + destinationIP = gjson.GetBytes(span2Doc.RawSource, "destination.ip") + assert.False(t, destinationIP.Exists()) // destination.address is not an IP +} + +func TestDataStreamMigrationIngestPipeline(t *testing.T) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewServer(t) + + // Send a transaction, span, error, and metrics. + tracer := srv.Tracer() + tracer.RegisterMetricsGatherer(apm.GatherMetricsFunc(func(ctx context.Context, m *apm.Metrics) error { + m.Add("custom_metric", nil, 123) + return nil + })) + tx := tracer.StartTransaction("name", "type") + span := tx.StartSpan("name", "type", nil) + tracer.NewError(errors.New("boom")).Send() + span.End() + tx.End() + tracer.Flush(nil) + tracer.SendMetrics(nil) + + // We expect at least 6 events: + // - onboarding + // - transaction + // - span + // - error + // - internal metricset + // - app metricset + for _, query := range []interface{}{ + estest.TermQuery{Field: "processor.event", Value: "onboarding"}, + estest.TermQuery{Field: "processor.event", Value: "transaction"}, + estest.TermQuery{Field: "processor.event", Value: "span"}, + estest.TermQuery{Field: "processor.event", Value: "error"}, + estest.TermQuery{Field: "metricset.name", Value: "transaction_breakdown"}, + estest.TermQuery{Field: "metricset.name", Value: "app"}, + } { + systemtest.Elasticsearch.ExpectDocs(t, "apm-*", query) + } + + refresh := true + _, err := systemtest.Elasticsearch.Do(context.Background(), &esapi.ReindexRequest{ + Refresh: &refresh, + Body: esutil.NewJSONReader(map[string]interface{}{ + "source": map[string]interface{}{ + "index": "apm-*", + }, + "dest": map[string]interface{}{ + "index": "apm-migration", + "pipeline": "apm_data_stream_migration", + "op_type": "create", + }, + }), + }, nil) + require.NoError(t, err) + + // There should only be an onboarding doc in "apm-migration". + result := systemtest.Elasticsearch.ExpectDocs(t, "apm-migration", nil) + require.Len(t, result.Hits.Hits, 1) + assert.Equal(t, "onboarding", gjson.GetBytes(result.Hits.Hits[0].RawSource, "processor.event").String()) + + systemtest.Elasticsearch.ExpectMinDocs(t, 2, "traces-apm-migrated", nil) // transaction, span + systemtest.Elasticsearch.ExpectMinDocs(t, 1, "logs-apm.error-migrated", nil) + systemtest.Elasticsearch.ExpectMinDocs(t, 1, "metrics-apm.internal-migrated", nil) + systemtest.Elasticsearch.ExpectMinDocs(t, 1, "metrics-apm.app.systemtest-migrated", nil) +} diff --git a/systemtest/instrumentation_test.go b/systemtest/instrumentation_test.go index 0bec829522b..05ab031b2c3 100644 --- a/systemtest/instrumentation_test.go +++ b/systemtest/instrumentation_test.go @@ -18,8 +18,14 @@ package systemtest_test import ( - "context" + "bytes" "encoding/json" + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "sort" + "sync" "testing" "time" @@ -44,8 +50,7 @@ func TestAPMServerInstrumentation(t *testing.T) { tracer.StartTransaction("name", "type").End() tracer.Flush(nil) - var result estest.SearchResult - _, err = systemtest.Elasticsearch.Search("apm-*").WithQuery(estest.BoolQuery{ + result := systemtest.Elasticsearch.ExpectDocs(t, "apm-*", estest.BoolQuery{ Filter: []interface{}{ estest.TermQuery{ Field: "processor.event", @@ -60,11 +65,7 @@ func TestAPMServerInstrumentation(t *testing.T) { Value: "request", }, }, - }).Do(context.Background(), &result, - estest.WithTimeout(10*time.Second), - estest.WithCondition(result.Hits.NonEmptyCondition()), - ) - require.NoError(t, err) + }) var transactionDoc struct { Trace struct{ ID string } @@ -90,3 +91,165 @@ func TestAPMServerInstrumentation(t *testing.T) { } t.Fatal("failed to identify log message with matching trace IDs") } + +func TestAPMServerInstrumentationAuth(t *testing.T) { + test := func(t *testing.T, external, useSecretToken, useAPIKey bool) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewUnstartedServer(t) + srv.Config.AgentAuth.SecretToken = "hunter2" + srv.Config.AgentAuth.APIKey = &apmservertest.APIKeyAuthConfig{Enabled: true} + srv.Config.Instrumentation = &apmservertest.InstrumentationConfig{Enabled: true} + + serverURLChan := make(chan string, 1) + if external { + // The server URL is not known ahead of time, so we run + // a reverse proxy which waits for the server URL. + var serverURL string + var serverURLOnce sync.Once + proxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + serverURLOnce.Do(func() { + select { + case <-r.Context().Done(): + case serverURL = <-serverURLChan: + } + }) + u, err := url.Parse(serverURL) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + rp := httputil.NewSingleHostReverseProxy(u) + rp.ServeHTTP(w, r) + })) + defer proxy.Close() + srv.Config.Instrumentation.Hosts = []string{proxy.URL} + } + if useSecretToken { + srv.Config.Instrumentation.SecretToken = srv.Config.AgentAuth.SecretToken + } + if useAPIKey { + systemtest.InvalidateAPIKeys(t) + defer systemtest.InvalidateAPIKeys(t) + + cmd := apiKeyCommand("create", "--name", t.Name(), "--json") + out, err := cmd.CombinedOutput() + require.NoError(t, err) + attrs := decodeJSONMap(t, bytes.NewReader(out)) + srv.Config.Instrumentation.APIKey = attrs["credentials"].(string) + } + + err := srv.Start() + require.NoError(t, err) + serverURLChan <- srv.URL + + // Send a transaction to the server, causing the server to + // trace the request from the agent. + tracer := srv.Tracer() + tracer.StartTransaction("name", "type").End() + tracer.Flush(nil) + + systemtest.Elasticsearch.ExpectDocs(t, "apm-*", estest.BoolQuery{ + Filter: []interface{}{ + estest.TermQuery{ + Field: "processor.event", + Value: "transaction", + }, + estest.TermQuery{ + Field: "service.name", + Value: "apm-server", + }, + estest.TermQuery{ + Field: "transaction.type", + Value: "request", + }, + }, + }) + } + t.Run("self_no_auth", func(t *testing.T) { + // sending data to self, no auth specified + test(t, false, false, false) + }) + t.Run("external_secret_token", func(t *testing.T) { + // sending data to external server, secret token specified + test(t, true, true, false) + }) + t.Run("external_api_key", func(t *testing.T) { + // sending data to external server, API Key specified + test(t, true, false, true) + }) +} + +func TestAPMServerProfiling(t *testing.T) { + // TODO(axw) the heap profiling test specifically is flaky. This is + // a highly experimental feature, so disable system tests for now. + t.Skip("flaky test: https://github.com/elastic/apm-server/issues/5322") + + test := func(t *testing.T, profilingConfig *apmservertest.ProfilingConfig, expectedMetrics []string) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewUnstartedServer(t) + srv.Config.Instrumentation = &apmservertest.InstrumentationConfig{ + Enabled: true, + Profiling: profilingConfig, + } + err := srv.Start() + require.NoError(t, err) + + // Generate some load to cause the server to consume resources. + tracer := srv.Tracer() + for i := 0; i < 1000; i++ { + tracer.StartTransaction("name", "type").End() + } + tracer.Flush(nil) + + result := systemtest.Elasticsearch.ExpectDocs(t, "apm-*", estest.TermQuery{ + Field: "processor.event", + Value: "profile", + }) + assert.Equal(t, expectedMetrics, profileMetricNames(result)) + } + t.Run("cpu", func(t *testing.T) { + test(t, &apmservertest.ProfilingConfig{ + CPU: &apmservertest.CPUProfilingConfig{ + Enabled: true, + Interval: time.Second, + Duration: time.Second, + }, + }, []string{"cpu.ns", "duration", "samples.count"}) + }) + t.Run("heap", func(t *testing.T) { + test(t, &apmservertest.ProfilingConfig{ + Heap: &apmservertest.HeapProfilingConfig{ + Enabled: true, + Interval: time.Second, + }, + }, []string{ + "alloc_objects.count", + "alloc_space.bytes", + "inuse_objects.count", + "inuse_space.bytes", + }) + }) +} + +func profileMetricNames(result estest.SearchResult) []string { + unique := make(map[string]struct{}) + var metricNames []string + for _, hit := range result.Hits.Hits { + profileField, ok := hit.Source["profile"].(map[string]interface{}) + if !ok { + continue + } + for k, v := range profileField { + if _, ok := v.(float64); !ok { + continue + } + if _, ok := unique[k]; ok { + continue + } + unique[k] = struct{}{} + metricNames = append(metricNames, k) + } + } + sort.Strings(metricNames) + return metricNames +} diff --git a/systemtest/intake.go b/systemtest/intake.go new file mode 100644 index 00000000000..0676edf5918 --- /dev/null +++ b/systemtest/intake.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package systemtest + +import ( + "io/ioutil" + "net/http" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/systemtest/apmservertest" +) + +func SendRUMEventsPayload(t *testing.T, srv *apmservertest.Server, payloadFile string) { + sendEventsPayload(t, srv, "/intake/v2/rum/events", payloadFile) +} + +func SendBackendEventsPayload(t *testing.T, srv *apmservertest.Server, payloadFile string) { + sendEventsPayload(t, srv, "/intake/v2/events", payloadFile) +} + +func sendEventsPayload(t *testing.T, srv *apmservertest.Server, urlPath, payloadFile string) { + t.Helper() + + f, err := os.Open(payloadFile) + require.NoError(t, err) + defer f.Close() + + req, _ := http.NewRequest("POST", srv.URL+urlPath, f) + req.Header.Add("Content-Type", "application/x-ndjson") + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, http.StatusAccepted, resp.StatusCode, string(respBody)) +} diff --git a/systemtest/jaeger_test.go b/systemtest/jaeger_test.go index 3bd66da2576..ee924963585 100644 --- a/systemtest/jaeger_test.go +++ b/systemtest/jaeger_test.go @@ -20,13 +20,19 @@ package systemtest_test import ( "context" "encoding/json" + "net/url" "os" "testing" + jaegermodel "github.com/jaegertracing/jaeger/model" "github.com/jaegertracing/jaeger/proto-gen/api_v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/status" "github.com/elastic/apm-server/systemtest" "github.com/elastic/apm-server/systemtest/apmservertest" @@ -35,16 +41,36 @@ import ( func TestJaegerGRPC(t *testing.T) { systemtest.CleanupElasticsearch(t) - srv := apmservertest.NewUnstartedServer(t) srv.Config.Jaeger = &apmservertest.JaegerConfig{ GRPCEnabled: true, GRPCHost: "localhost:0", } + srv.Config.Monitoring = newFastMonitoringConfig() err := srv.Start() require.NoError(t, err) + testJaegerGRPC(t, srv, srv.JaegerGRPCAddr, grpc.WithInsecure()) +} + +func TestJaegerGRPCMuxed(t *testing.T) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewUnstartedServer(t) + srv.Config.Monitoring = newFastMonitoringConfig() + require.NoError(t, srv.Start()) + testJaegerGRPC(t, srv, serverAddr(srv), grpc.WithInsecure()) +} + +func TestJaegerGRPCMuxedTLS(t *testing.T) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewUnstartedServer(t) + srv.Config.Monitoring = newFastMonitoringConfig() + srv.Config.TLS = &apmservertest.TLSConfig{ClientAuthentication: "required"} + require.NoError(t, srv.StartTLS()) + testJaegerGRPC(t, srv, serverAddr(srv), grpc.WithTransportCredentials(credentials.NewTLS(srv.TLS))) +} - conn, err := grpc.Dial(srv.JaegerGRPCAddr, grpc.WithInsecure()) +func testJaegerGRPC(t *testing.T, srv *apmservertest.Server, addr string, dialOptions ...grpc.DialOption) { + conn, err := grpc.Dial(addr, dialOptions...) require.NoError(t, err) defer conn.Close() @@ -54,16 +80,12 @@ func TestJaegerGRPC(t *testing.T) { _, err = client.PostSpans(context.Background(), request) require.NoError(t, err) - var result estest.SearchResult - _, err = systemtest.Elasticsearch.Search("apm-*").WithQuery(estest.BoolQuery{ - Filter: []interface{}{ - estest.TermQuery{ - Field: "processor.event", - Value: "transaction", - }, - }, - }).Do(context.Background(), &result, estest.WithCondition(result.Hits.NonEmptyCondition())) - require.NoError(t, err) + doc := getBeatsMonitoringStats(t, srv, nil) + assert.Equal(t, int64(1), gjson.GetBytes(doc.RawSource, "beats_stats.metrics.apm-server.jaeger.grpc.collect.request.count").Int()) + + systemtest.Elasticsearch.ExpectDocs(t, "apm-*", estest.BoolQuery{Filter: []interface{}{ + estest.TermQuery{Field: "processor.event", Value: "transaction"}, + }}) // TODO(axw) check document contents. We currently do this in beater/jaeger. } @@ -71,14 +93,10 @@ func TestJaegerGRPC(t *testing.T) { func TestJaegerGRPCSampling(t *testing.T) { systemtest.CleanupElasticsearch(t) srv := apmservertest.NewUnstartedServer(t) - srv.Config.Jaeger = &apmservertest.JaegerConfig{ - GRPCEnabled: true, - GRPCHost: "localhost:0", - } err := srv.Start() require.NoError(t, err) - conn, err := grpc.Dial(srv.JaegerGRPCAddr, grpc.WithInsecure()) + conn, err := grpc.Dial(serverAddr(srv), grpc.WithInsecure()) require.NoError(t, err) defer conn.Close() @@ -90,6 +108,40 @@ func TestJaegerGRPCSampling(t *testing.T) { assert.Regexp(t, "no sampling rate available", err.Error()) } +func TestJaegerGRPCAuth(t *testing.T) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewUnstartedServer(t) + srv.Config.AgentAuth.SecretToken = "secret" + require.NoError(t, srv.Start()) + + conn, err := grpc.Dial(serverAddr(srv), grpc.WithInsecure()) + require.NoError(t, err) + defer conn.Close() + + client := api_v2.NewCollectorServiceClient(conn) + request, err := decodeJaegerPostSpansRequest("../testdata/jaeger/batch_0.json") + require.NoError(t, err) + + // Attempt to send spans without the auth tag -- this should fail. + _, err = client.PostSpans(context.Background(), request) + require.Error(t, err) + status := status.Convert(err) + assert.Equal(t, codes.Unauthenticated, status.Code()) + + // Now with the auth tag -- this should succeed. + request.Batch.Process.Tags = append(request.Batch.Process.Tags, jaegermodel.KeyValue{ + Key: "elastic-apm-auth", + VType: jaegermodel.ValueType_STRING, + VStr: "Bearer secret", + }) + _, err = client.PostSpans(context.Background(), request) + require.NoError(t, err) + + systemtest.Elasticsearch.ExpectDocs(t, "apm-*", estest.BoolQuery{Filter: []interface{}{ + estest.TermQuery{Field: "processor.event", Value: "transaction"}, + }}) +} + func decodeJaegerPostSpansRequest(filename string) (*api_v2.PostSpansRequest, error) { var request api_v2.PostSpansRequest f, err := os.Open(filename) @@ -99,3 +151,8 @@ func decodeJaegerPostSpansRequest(filename string) (*api_v2.PostSpansRequest, er defer f.Close() return &request, json.NewDecoder(f).Decode(&request) } + +func serverAddr(srv *apmservertest.Server) string { + url, _ := url.Parse(srv.URL) + return url.Host +} diff --git a/systemtest/kibana.go b/systemtest/kibana.go new file mode 100644 index 00000000000..95d2505a31d --- /dev/null +++ b/systemtest/kibana.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package systemtest + +import ( + "log" + "net/url" + + "github.com/elastic/apm-server/systemtest/apmservertest" + "github.com/elastic/apm-server/systemtest/fleettest" +) + +const ( + adminKibanaUser = adminElasticsearchUser + adminKibanaPass = adminElasticsearchPass +) + +var ( + // KibanaURL is the base URL for Kibana, including userinfo for + // authenticating as the admin user. + KibanaURL *url.URL + + // Fleet is a Fleet API client for use in tests. + Fleet *fleettest.Client +) + +func init() { + kibanaConfig := apmservertest.DefaultConfig().Kibana + u, err := url.Parse(kibanaConfig.Host) + if err != nil { + log.Fatal(err) + } + u.User = url.UserPassword(adminKibanaUser, adminKibanaPass) + KibanaURL = u + Fleet = fleettest.NewClient(KibanaURL.String()) +} diff --git a/systemtest/logging_test.go b/systemtest/logging_test.go index 454ec906325..13091025d21 100644 --- a/systemtest/logging_test.go +++ b/systemtest/logging_test.go @@ -18,6 +18,7 @@ package systemtest_test import ( + "context" "fmt" "io" "io/ioutil" @@ -25,15 +26,64 @@ import ( "strings" "testing" + "github.com/jaegertracing/jaeger/proto-gen/api_v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.elastic.co/apm/apmtest" "go.elastic.co/fastjson" + "go.opentelemetry.io/otel/exporters/otlp/otlpgrpc" "go.uber.org/zap/zapcore" + "google.golang.org/grpc" + "github.com/elastic/apm-server/systemtest" "github.com/elastic/apm-server/systemtest/apmservertest" ) +func TestAPMServerGRPCRequestLoggingValid(t *testing.T) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewUnstartedServer(t) + srv.Config.Jaeger = &apmservertest.JaegerConfig{ + GRPCEnabled: true, + GRPCHost: "localhost:0", + } + err := srv.Start() + require.NoError(t, err) + addr := srv.JaegerGRPCAddr + conn, err := grpc.Dial(addr, grpc.WithInsecure()) + require.NoError(t, err) + defer conn.Close() + + client := api_v2.NewCollectorServiceClient(conn) + request, err := decodeJaegerPostSpansRequest("../testdata/jaeger/batch_0.json") + require.NoError(t, err) + _, err = client.PostSpans(context.Background(), request) + require.NoError(t, err) + + tracerProvider := newOTLPTracerProvider(newOTLPExporter(t, srv, otlpgrpc.WithHeaders(map[string]string{ + "Authorization": "Bearer abc123", + }))) + err = sendOTLPTrace(context.Background(), tracerProvider) + require.NoError(t, err) + + srv.Close() + + var foundGRPC, foundJaeger bool + for _, entry := range srv.Logs.All() { + if entry.Logger == "beater.grpc" { + require.Equal(t, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", entry.Fields["grpc.request.method"]) + require.Equal(t, "OK", entry.Fields["grpc.response.status_code"]) + foundGRPC = true + } + if entry.Logger == "beater.jaeger" { + require.Equal(t, "/jaeger.api_v2.CollectorService/PostSpans", entry.Fields["grpc.request.method"]) + require.Equal(t, "OK", entry.Fields["grpc.response.status_code"]) + foundJaeger = true + } + } + require.True(t, foundGRPC) + require.True(t, foundJaeger) +} + func TestAPMServerRequestLoggingValid(t *testing.T) { srv := apmservertest.NewServer(t) eventsURL := srv.URL + "/intake/v2/events" @@ -69,8 +119,8 @@ func TestAPMServerRequestLoggingValid(t *testing.T) { srv.Close() for _, entry := range srv.Logs.All() { - if entry.Logger == "request" && entry.Fields["URL"] == "/intake/v2/events" { - statusCode, _ := entry.Fields["response_code"].(float64) + if entry.Logger == "request" && entry.Fields["url.original"] == "/intake/v2/events" { + statusCode, _ := entry.Fields["http.response.status_code"].(float64) logEntries = append(logEntries, entry) requestEntries = append(requestEntries, requestEntry{ level: entry.Level, @@ -95,8 +145,8 @@ func TestAPMServerRequestLoggingValid(t *testing.T) { }}, requestEntries) assert.NotContains(t, logEntries[0].Fields, "error") - assert.Regexp(t, "failed to validate transaction: .*", logEntries[1].Fields["error"]) - assert.Equal(t, "event exceeded the permitted size.", logEntries[2].Fields["error"]) + assert.Regexp(t, "validation error: 'transaction' required", logEntries[1].Fields["error.message"]) + assert.Equal(t, "event exceeded the permitted size.", logEntries[2].Fields["error.message"]) } // validMetadataJSON returns valid JSON-encoded metadata, diff --git a/systemtest/main_test.go b/systemtest/main_test.go index 3f167a182b2..d16fd8d7a69 100644 --- a/systemtest/main_test.go +++ b/systemtest/main_test.go @@ -27,5 +27,8 @@ func TestMain(m *testing.M) { if err := StartStackContainers(); err != nil { log.Fatal(err) } + if err := Fleet.Setup(); err != nil { + log.Fatal(err) + } os.Exit(m.Run()) } diff --git a/systemtest/metrics_test.go b/systemtest/metrics_test.go new file mode 100644 index 00000000000..91ecc42bff4 --- /dev/null +++ b/systemtest/metrics_test.go @@ -0,0 +1,243 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package systemtest_test + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + "net/http" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" + "go.elastic.co/apm" + + "github.com/elastic/apm-server/systemtest" + "github.com/elastic/apm-server/systemtest/apmservertest" + "github.com/elastic/apm-server/systemtest/estest" + "github.com/elastic/go-elasticsearch/v7/esapi" +) + +func TestApprovedMetrics(t *testing.T) { + withDataStreams(t, testApprovedMetrics) +} + +func testApprovedMetrics(t *testing.T, srv *apmservertest.Server) { + err := srv.Start() + require.NoError(t, err) + + eventsPayload, err := ioutil.ReadFile("../testdata/intake-v2/metricsets.ndjson") + require.NoError(t, err) + + req, _ := http.NewRequest("POST", srv.URL+"/intake/v2/events?verbose=true", bytes.NewReader(eventsPayload)) + req.Header.Set("Content-Type", "application/x-ndjson") + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, http.StatusAccepted, resp.StatusCode) + var ingestResult struct { + Accepted int + } + err = json.NewDecoder(resp.Body).Decode(&ingestResult) + assert.NoError(t, err) + + // Check the metrics documents are exactly as we expect. + indices := []string{"apm-*", "metrics-apm.*"} + result := systemtest.Elasticsearch.ExpectMinDocs(t, ingestResult.Accepted, strings.Join(indices, ","), estest.TermQuery{ + Field: "processor.event", + Value: "metric", + }) + systemtest.ApproveEvents(t, t.Name(), result.Hits.Hits) + + // Check dynamic mapping of histograms. + mappings := getFieldMappings(t, indices, []string{"latency_distribution"}) + assert.Equal(t, map[string]interface{}{ + "latency_distribution": map[string]interface{}{ + "full_name": "latency_distribution", + "mapping": map[string]interface{}{ + "latency_distribution": map[string]interface{}{ + "type": "histogram", + }, + }, + }, + }, mappings) +} + +func TestBreakdownMetrics(t *testing.T) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewServer(t) + + tracer := srv.Tracer() + tx := tracer.StartTransaction("tx_name", "tx_type") + span := tx.StartSpan("span_name", "span_type", nil) + span.Duration = 500 * time.Millisecond + span.End() + tx.Duration = time.Second + tx.End() + tracer.SendMetrics(nil) + tracer.Flush(nil) + + result := systemtest.Elasticsearch.ExpectMinDocs(t, 3, "apm-*", estest.BoolQuery{ + Filter: []interface{}{ + estest.TermQuery{ + Field: "processor.event", + Value: "metric", + }, + estest.TermQuery{ + Field: "transaction.type", + Value: "tx_type", + }, + }, + }) + + docs := unmarshalMetricsetDocs(t, result.Hits.Hits) + assert.ElementsMatch(t, []metricsetDoc{{ + Trasaction: metricsetTransaction{Type: "tx_type"}, + MetricsetName: "transaction_breakdown", + }, { + Trasaction: metricsetTransaction{Type: "tx_type"}, + Span: metricsetSpan{Type: "span_type"}, + MetricsetName: "span_breakdown", + }, { + Trasaction: metricsetTransaction{Type: "tx_type"}, + Span: metricsetSpan{Type: "app"}, + MetricsetName: "span_breakdown", + }}, docs) +} + +func TestApplicationMetrics(t *testing.T) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewServer(t) + + tracer := srv.Tracer() + tracer.RegisterMetricsGatherer(apm.GatherMetricsFunc(func(ctx context.Context, metrics *apm.Metrics) error { + metrics.Add("a.b.c", nil, 123) + metrics.Add("x.y.z", nil, 123.456) + return nil + })) + tracer.SendMetrics(nil) + tracer.Flush(nil) + + result := systemtest.Elasticsearch.ExpectDocs(t, "apm-*", estest.TermQuery{ + Field: "metricset.name", + Value: "app", + }) + + // The Go agent sends all metrics with the same set of labels in one metricset. + // This includes custom metrics, Go runtime metrics, system and process metrics. + expectedFields := []string{ + "golang.goroutines", + "system.memory.total", + "a.b.c", + "x.y.z", + } + for _, fieldName := range expectedFields { + var found bool + for _, hit := range result.Hits.Hits { + // Metrics are written with dotted field names rather than + // as hierarchical objects, so escape dots in the gjson path. + path := strings.Replace(fieldName, ".", "\\.", -1) + if gjson.GetBytes(hit.RawSource, path).Exists() { + found = true + break + } + } + assert.True(t, found, "field %q not found in 'app' metricset docs", fieldName) + } + + // Check that the index mapping has been updated for the custom + // metrics, with the expected dynamically mapped field types. + mappings := getFieldMappings(t, []string{"apm-*"}, []string{"a.b.c", "x.y.z"}) + assert.Equal(t, map[string]interface{}{ + "a.b.c": map[string]interface{}{ + "full_name": "a.b.c", + "mapping": map[string]interface{}{ + "c": map[string]interface{}{ + "type": "long", + }, + }, + }, + "x.y.z": map[string]interface{}{ + "full_name": "x.y.z", + "mapping": map[string]interface{}{ + "z": map[string]interface{}{ + "type": "float", + }, + }, + }, + }, mappings) +} + +func getFieldMappings(t testing.TB, index []string, fields []string) map[string]interface{} { + var allMappings map[string]struct { + Mappings map[string]interface{} + } + _, err := systemtest.Elasticsearch.Do(context.Background(), &esapi.IndicesGetFieldMappingRequest{ + Index: index, + Fields: fields, + }, &allMappings) + require.NoError(t, err) + + mappings := make(map[string]interface{}) + for _, index := range allMappings { + for k, v := range index.Mappings { + assert.NotContains(t, mappings, k, "field %q exists in multiple indices", k) + mappings[k] = v + } + } + return mappings +} + +type metricsetTransaction struct { + Type string `json:"type"` +} + +type metricsetSpan struct { + Type string `json:"type"` +} + +type metricsetSample struct { + Value float64 `json:"value"` +} + +type metricsetDoc struct { + Trasaction metricsetTransaction `json:"transaction"` + Span metricsetSpan `json:"span"` + MetricsetName string `json:"metricset.name"` +} + +func unmarshalMetricsetDocs(t testing.TB, hits []estest.SearchHit) []metricsetDoc { + var docs []metricsetDoc + for _, hit := range hits { + docs = append(docs, unmarshalMetricsetDoc(t, &hit)) + } + return docs +} + +func unmarshalMetricsetDoc(t testing.TB, hit *estest.SearchHit) metricsetDoc { + var doc metricsetDoc + if err := hit.UnmarshalSource(&doc); err != nil { + t.Fatal(err) + } + return doc +} diff --git a/systemtest/monitoring_test.go b/systemtest/monitoring_test.go index 6e22d65d5cc..fc77d0be583 100644 --- a/systemtest/monitoring_test.go +++ b/systemtest/monitoring_test.go @@ -34,11 +34,7 @@ import ( func TestAPMServerMonitoring(t *testing.T) { srv := apmservertest.NewUnstartedServer(t) - srv.Config.Monitoring = &apmservertest.MonitoringConfig{ - Enabled: true, - MetricsPeriod: time.Duration(time.Second), - StatePeriod: time.Duration(time.Second), - } + srv.Config.Monitoring = newFastMonitoringConfig() err := srv.Start() require.NoError(t, err) @@ -66,6 +62,7 @@ func TestAPMServerMonitoringBuiltinUser(t *testing.T) { Enabled: true, StatePeriod: time.Duration(time.Second), Elasticsearch: &apmservertest.ElasticsearchOutputConfig{ + Enabled: true, Username: username, Password: password, }, @@ -85,21 +82,16 @@ func getBeatsMonitoringStats(t testing.TB, srv *apmservertest.Server, out interf func getBeatsMonitoring(t testing.TB, srv *apmservertest.Server, type_ string, out interface{}) *beatsMonitoringDoc { var result estest.SearchResult - _, err := systemtest.Elasticsearch.Search(".monitoring-beats-*").WithQuery(estest.BoolQuery{ - Filter: []interface{}{ - estest.TermQuery{ - Field: type_ + ".beat.uuid", - Value: srv.BeatUUID, - }, - }, - }).Do(context.Background(), &result, - estest.WithTimeout(10*time.Second), - estest.WithCondition(result.Hits.NonEmptyCondition()), - ) - require.NoError(t, err) + req := systemtest.Elasticsearch.Search(".monitoring-beats-*").WithQuery( + estest.TermQuery{Field: type_ + ".beat.uuid", Value: srv.BeatUUID}, + ).WithSort("timestamp:desc") + if _, err := req.Do(context.Background(), &result, estest.WithCondition(result.Hits.MinHitsCondition(1))); err != nil { + t.Error(err) + } var doc beatsMonitoringDoc - err = json.Unmarshal([]byte(result.Hits.Hits[0].RawSource), &doc) + doc.RawSource = []byte(result.Hits.Hits[0].RawSource) + err := json.Unmarshal(doc.RawSource, &doc) require.NoError(t, err) if out != nil { switch doc.Type { @@ -113,6 +105,7 @@ func getBeatsMonitoring(t testing.TB, srv *apmservertest.Server, type_ string, o } type beatsMonitoringDoc struct { + RawSource []byte `json:"-"` Timestamp time.Time `json:"timestamp"` Type string `json:"type"` BeatsState `json:"beats_state,omitempty"` @@ -126,3 +119,11 @@ type BeatsState struct { type BeatsStats struct { Metrics map[string]interface{} `json:"metrics"` } + +func newFastMonitoringConfig() *apmservertest.MonitoringConfig { + return &apmservertest.MonitoringConfig{ + Enabled: true, + MetricsPeriod: 100 * time.Millisecond, + StatePeriod: 100 * time.Millisecond, + } +} diff --git a/systemtest/onboarding_test.go b/systemtest/onboarding_test.go index a358c57d055..5d88b168018 100644 --- a/systemtest/onboarding_test.go +++ b/systemtest/onboarding_test.go @@ -18,7 +18,6 @@ package systemtest_test import ( - "context" "testing" "github.com/stretchr/testify/assert" @@ -33,16 +32,13 @@ func TestAPMServerOnboarding(t *testing.T) { systemtest.CleanupElasticsearch(t) srv := apmservertest.NewServer(t) - var result estest.SearchResult - _, err := systemtest.Elasticsearch.Search("apm-*").WithQuery(estest.TermQuery{ + result := systemtest.Elasticsearch.ExpectDocs(t, "apm-*", estest.TermQuery{ Field: "processor.event", Value: "onboarding", - }).Do(context.Background(), &result, estest.WithCondition(result.Hits.NonEmptyCondition())) - require.NoError(t, err) + }) require.Len(t, result.Hits.Hits, 1) expvar := srv.GetExpvar() - require.NoError(t, err) observer := result.Hits.Hits[0].Source["observer"].(map[string]interface{}) assert.Equal(t, expvar.Vars["beat.info.ephemeral_id"], observer["ephemeral_id"]) } diff --git a/systemtest/otlp_test.go b/systemtest/otlp_test.go new file mode 100644 index 00000000000..fda8c090e79 --- /dev/null +++ b/systemtest/otlp_test.go @@ -0,0 +1,426 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package systemtest_test + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/otlp" + "go.opentelemetry.io/otel/exporters/otlp/otlpgrpc" + "go.opentelemetry.io/otel/metric" + export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" + controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" + processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" + "go.opentelemetry.io/otel/sdk/metric/selector/simple" + "go.opentelemetry.io/otel/sdk/resource" + sdkresource "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" + "golang.org/x/sync/errgroup" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/elastic/apm-server/systemtest" + "github.com/elastic/apm-server/systemtest/apmservertest" + "github.com/elastic/apm-server/systemtest/estest" +) + +var otelErrors = make(chan error, 1) + +func init() { + // otel.SetErrorHandler can only be called once per process. + otel.SetErrorHandler(otelErrorHandlerFunc(func(err error) { + if err == nil { + return + } + select { + case otelErrors <- err: + default: + } + })) +} + +func TestOTLPGRPCTraces(t *testing.T) { + withDataStreams(t, testOTLPGRPCTraces) +} + +func testOTLPGRPCTraces(t *testing.T, srv *apmservertest.Server) { + srv.Start() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + err := withOTLPTracer(newOTLPTracerProvider(newOTLPExporter(t, srv), sdktrace.WithResource( + resource.Merge(resource.Default(), sdkresource.NewWithAttributes( + attribute.Array("resource_attribute_array", []string{"a", "b"}), + )), + )), func(tracer trace.Tracer) { + startTime := time.Unix(123, 456) + endTime := startTime.Add(time.Second) + _, span := tracer.Start(ctx, "operation_name", trace.WithTimestamp(startTime), trace.WithAttributes( + attribute.Array("span_attribute_array", []string{"a", "b", "c"}), + )) + span.AddEvent("a_span_event", trace.WithTimestamp(startTime.Add(time.Millisecond))) + span.End(trace.WithTimestamp(endTime)) + }) + require.NoError(t, err) + + expectMin := 1 + if srv.Config.DataStreams != nil && srv.Config.DataStreams.Enabled { + expectMin++ // span events only indexed into data streams + } + + indices := "apm-*,traces-apm*,logs-apm*" + result := systemtest.Elasticsearch.ExpectMinDocs(t, expectMin, indices, estest.BoolQuery{ + Should: []interface{}{ + estest.TermQuery{Field: "processor.event", Value: "transaction"}, + estest.TermQuery{Field: "processor.event", Value: "log"}, + }, + MinimumShouldMatch: 1, + }) + systemtest.ApproveEvents(t, t.Name(), result.Hits.Hits) +} + +func TestOTLPGRPCMetrics(t *testing.T) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewUnstartedServer(t) + srv.Config.Monitoring = newFastMonitoringConfig() + err := srv.Start() + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + aggregator := simple.NewWithHistogramDistribution(histogram.WithExplicitBoundaries([]float64{1, 100, 1000, 10000})) + err = sendOTLPMetrics(t, ctx, srv, aggregator, func(meter metric.MeterMust) { + float64Counter := meter.NewFloat64Counter("float64_counter") + float64Counter.Add(context.Background(), 1) + + int64Recorder := meter.NewInt64ValueRecorder("int64_recorder") + int64Recorder.Record(context.Background(), 1) + int64Recorder.Record(context.Background(), 123) + int64Recorder.Record(context.Background(), 1024) + int64Recorder.Record(context.Background(), 20000) + }) + require.NoError(t, err) + + result := systemtest.Elasticsearch.ExpectDocs(t, "apm-*", estest.BoolQuery{Filter: []interface{}{ + estest.TermQuery{Field: "processor.event", Value: "metric"}, + }}) + systemtest.ApproveEvents(t, t.Name(), result.Hits.Hits, "@timestamp") + + // Make sure we report monitoring for the metrics consumer. Metric values are unit tested. + doc := getBeatsMonitoringStats(t, srv, nil) + assert.True(t, gjson.GetBytes(doc.RawSource, "beats_stats.metrics.apm-server.otlp.grpc.metrics.consumer").Exists()) +} + +func TestOTLPGRPCAuth(t *testing.T) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewUnstartedServer(t) + srv.Config.AgentAuth.SecretToken = "abc123" + err := srv.Start() + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + err = sendOTLPTrace(ctx, newOTLPTracerProvider(newOTLPExporter(t, srv))) + assert.Error(t, err) + assert.Equal(t, codes.Unauthenticated, status.Code(err)) + + err = sendOTLPTrace(ctx, newOTLPTracerProvider(newOTLPExporter(t, srv, otlpgrpc.WithHeaders(map[string]string{ + "Authorization": "Bearer abc123", + })))) + require.NoError(t, err) + systemtest.Elasticsearch.ExpectDocs(t, "apm-*", estest.BoolQuery{Filter: []interface{}{ + estest.TermQuery{Field: "processor.event", Value: "transaction"}, + }}) +} + +func TestOTLPClientIP(t *testing.T) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewServer(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + exporter := newOTLPExporter(t, srv) + err := sendOTLPTrace(ctx, newOTLPTracerProvider(exporter)) + assert.NoError(t, err) + + err = sendOTLPTrace(ctx, newOTLPTracerProvider(exporter, sdktrace.WithResource( + sdkresource.NewWithAttributes(attribute.String("service.name", "service1")), + ))) + require.NoError(t, err) + + err = sendOTLPTrace(ctx, newOTLPTracerProvider(exporter, sdktrace.WithResource( + sdkresource.NewWithAttributes( + attribute.String("service.name", "service2"), + attribute.String("telemetry.sdk.name", "iOS"), + attribute.String("telemetry.sdk.language", "swift"), + ), + ))) + require.NoError(t, err) + + // Non-iOS agent documents should have no client.ip field set. + result := systemtest.Elasticsearch.ExpectDocs(t, "apm-*", estest.TermQuery{ + Field: "service.name", Value: "service1", + }) + assert.False(t, gjson.GetBytes(result.Hits.Hits[0].RawSource, "client.ip").Exists()) + + // iOS agent documents should have a client.ip field set. + result = systemtest.Elasticsearch.ExpectDocs(t, "apm-*", estest.TermQuery{ + Field: "service.name", Value: "service2", + }) + assert.True(t, gjson.GetBytes(result.Hits.Hits[0].RawSource, "client.ip").Exists()) +} + +func TestOTLPAnonymous(t *testing.T) { + srv := apmservertest.NewUnstartedServer(t) + srv.Config.AgentAuth.SecretToken = "abc123" // enable auth & rate limiting + srv.Config.AgentAuth.Anonymous = &apmservertest.AnonymousAuthConfig{ + Enabled: true, + AllowAgent: []string{"iOS/swift"}, + AllowService: []string{"allowed_service"}, + } + err := srv.Start() + require.NoError(t, err) + + sendEvent := func(telemetrySDKName, telemetrySDKLanguage, serviceName string) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var attributes []attribute.KeyValue + if serviceName != "" { + attributes = append(attributes, attribute.String("service.name", serviceName)) + } + if telemetrySDKName != "" { + attributes = append(attributes, attribute.String("telemetry.sdk.name", telemetrySDKName)) + } + if telemetrySDKLanguage != "" { + attributes = append(attributes, attribute.String("telemetry.sdk.language", telemetrySDKLanguage)) + } + exporter := newOTLPExporter(t, srv) + resource := sdkresource.NewWithAttributes(attributes...) + return sendOTLPTrace(ctx, newOTLPTracerProvider(exporter, sdktrace.WithResource(resource))) + } + + err = sendEvent("iOS", "swift", "allowed_service") + assert.NoError(t, err) + + err = sendEvent("open-telemetry", "go", "allowed_service") + assert.Error(t, err) + errStatus, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, codes.PermissionDenied, errStatus.Code()) + assert.Equal(t, `unauthorized: anonymous access not permitted for agent "open-telemetry/go"`, errStatus.Message()) + + err = sendEvent("iOS", "swift", "unallowed_service") + assert.Error(t, err) + errStatus, ok = status.FromError(err) + require.True(t, ok) + assert.Equal(t, codes.PermissionDenied, errStatus.Code()) + assert.Equal(t, `unauthorized: anonymous access not permitted for service "unallowed_service"`, errStatus.Message()) + + // If the client does not send telemetry.sdk.*, we default agent name "otlp". + // This means it is not possible to bypass the allowed agents list. + err = sendEvent("", "", "allowed_service") + assert.Error(t, err) + errStatus, ok = status.FromError(err) + require.True(t, ok) + assert.Equal(t, codes.PermissionDenied, errStatus.Code()) + assert.Equal(t, `unauthorized: anonymous access not permitted for agent "otlp"`, errStatus.Message()) + + // If the client does not send a service name, we default to "unknown". + // This means it is not possible to bypass the allowed services list. + err = sendEvent("iOS", "swift", "") + assert.Error(t, err) + errStatus, ok = status.FromError(err) + require.True(t, ok) + assert.Equal(t, codes.PermissionDenied, errStatus.Code()) + assert.Equal(t, `unauthorized: anonymous access not permitted for service "unknown"`, errStatus.Message()) +} + +func TestOTLPRateLimit(t *testing.T) { + // The configured rate limit. + const eventRateLimit = 10 + + // The actual rate limit: a 3x "burst multiplier" is applied, + // and each gRPC method call is counted towards the event rate + // limit as well. + const sendEventLimit = (3 * eventRateLimit) / 2 + + srv := apmservertest.NewUnstartedServer(t) + srv.Config.AgentAuth.SecretToken = "abc123" // enable auth & rate limiting + srv.Config.AgentAuth.Anonymous = &apmservertest.AnonymousAuthConfig{ + Enabled: true, + AllowAgent: []string{"iOS/swift"}, + RateLimit: &apmservertest.RateLimitConfig{ + IPLimit: 2, + EventLimit: eventRateLimit, + }, + } + err := srv.Start() + require.NoError(t, err) + + sendEvent := func(ip string) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + exporter := newOTLPExporter(t, srv, otlpgrpc.WithHeaders(map[string]string{"x-real-ip": ip})) + resource := sdkresource.NewWithAttributes( + attribute.String("service.name", "service2"), + attribute.String("telemetry.sdk.name", "iOS"), + attribute.String("telemetry.sdk.language", "swift"), + ) + return sendOTLPTrace(ctx, newOTLPTracerProvider(exporter, sdktrace.WithResource(resource))) + } + + // Check that for the configured IP limit (2), we can handle 3*event_limit without being rate limited. + var g errgroup.Group + for i := 0; i < sendEventLimit; i++ { + g.Go(func() error { return sendEvent("10.11.12.13") }) + g.Go(func() error { return sendEvent("10.11.12.14") }) + } + err = g.Wait() + assert.NoError(t, err) + + // The rate limiter cache only has space for 2 IPs, so the 3rd one reuses an existing + // limiter, which will have already been exhausted. + err = sendEvent("10.11.12.15") + require.Error(t, err) + errStatus, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, codes.ResourceExhausted, errStatus.Code()) + assert.Equal(t, "rate limit exceeded", errStatus.Message()) +} + +func newOTLPExporter(t testing.TB, srv *apmservertest.Server, options ...otlpgrpc.Option) *otlp.Exporter { + options = append(options, otlpgrpc.WithEndpoint(serverAddr(srv)), otlpgrpc.WithInsecure()) + driver := otlpgrpc.NewDriver(options...) + exporter, err := otlp.NewExporter(context.Background(), driver) + require.NoError(t, err) + t.Cleanup(func() { + exporter.Shutdown(context.Background()) + }) + return exporter +} + +func newOTLPTracerProvider(exporter *otlp.Exporter, options ...sdktrace.TracerProviderOption) *sdktrace.TracerProvider { + return sdktrace.NewTracerProvider(append([]sdktrace.TracerProviderOption{ + sdktrace.WithSyncer(exporter), + sdktrace.WithIDGenerator(&idGeneratorFuncs{ + newIDs: func(context.Context) (trace.TraceID, trace.SpanID) { + traceID, err := trace.TraceIDFromHex("d2acbef8b37655e48548fd9d61ad6114") + if err != nil { + panic(err) + } + spanID, err := trace.SpanIDFromHex("b3ee9be3b687a611") + if err != nil { + panic(err) + } + return traceID, spanID + }, + }), + }, options...)...) +} + +func sendOTLPTrace(ctx context.Context, tracerProvider *sdktrace.TracerProvider) error { + return withOTLPTracer(tracerProvider, func(tracer trace.Tracer) { + startTime := time.Unix(123, 456) + endTime := startTime.Add(time.Second) + _, span := tracer.Start(ctx, "operation_name", trace.WithTimestamp(startTime), trace.WithAttributes( + attribute.Array("span_attribute_array", []string{"a", "b", "c"}), + )) + span.End(trace.WithTimestamp(endTime)) + }) + return flushTracerProvider(ctx, tracerProvider) +} + +func withOTLPTracer(tracerProvider *sdktrace.TracerProvider, f func(trace.Tracer)) error { + tracer := tracerProvider.Tracer("systemtest") + f(tracer) + return flushTracerProvider(context.Background(), tracerProvider) +} + +func flushTracerProvider(ctx context.Context, tracerProvider *sdktrace.TracerProvider) error { + if err := tracerProvider.ForceFlush(ctx); err != nil { + return err + } + select { + case err := <-otelErrors: + return err + default: + return nil + } +} + +func sendOTLPMetrics( + t testing.TB, + ctx context.Context, + srv *apmservertest.Server, + aggregator export.AggregatorSelector, + recordMetrics func(metric.MeterMust), +) error { + exporter := newOTLPExporter(t, srv) + controller := controller.New( + processor.New(aggregator, exporter), + controller.WithExporter(exporter), + controller.WithCollectPeriod(time.Minute), + ) + if err := controller.Start(context.Background()); err != nil { + return err + } + meterProvider := controller.MeterProvider() + meter := metric.Must(meterProvider.Meter("test-meter")) + recordMetrics(meter) + + // Stopping the controller will collect and export metrics. + if err := controller.Stop(context.Background()); err != nil { + return err + } + select { + case err := <-otelErrors: + return err + default: + return nil + } +} + +type idGeneratorFuncs struct { + newIDs func(context context.Context) (trace.TraceID, trace.SpanID) + newSpanID func(ctx context.Context, traceID trace.TraceID) trace.SpanID +} + +func (m *idGeneratorFuncs) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) { + return m.newIDs(ctx) +} + +func (m *idGeneratorFuncs) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID { + return m.newSpanID(ctx, traceID) +} + +type otelErrorHandlerFunc func(error) + +func (f otelErrorHandlerFunc) Handle(err error) { + f(err) +} diff --git a/systemtest/rum_test.go b/systemtest/rum_test.go new file mode 100644 index 00000000000..9dd939831c1 --- /dev/null +++ b/systemtest/rum_test.go @@ -0,0 +1,179 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package systemtest_test + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/systemtest" + "github.com/elastic/apm-server/systemtest/apmservertest" + "github.com/elastic/apm-server/systemtest/estest" +) + +func TestRUMXForwardedFor(t *testing.T) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewUnstartedServer(t) + srv.Config.RUM = &apmservertest.RUMConfig{Enabled: true} + err := srv.Start() + require.NoError(t, err) + + serverURL, err := url.Parse(srv.URL) + require.NoError(t, err) + serverURL.Path = "/intake/v2/rum/events" + + const body = `{"metadata":{"service":{"name":"rum-js-test","agent":{"name":"rum-js","version":"5.5.0"}}}} +{"transaction":{"trace_id":"611f4fa950f04631aaaaaaaaaaaaaaaa","id":"611f4fa950f04631","type":"page-load","duration":643,"span_count":{"started":0}}}` + + req, _ := http.NewRequest("POST", serverURL.String(), strings.NewReader(body)) + req.Header.Set("Content-Type", "application/x-ndjson") + req.Header.Set("X-Forwarded-For", "220.244.41.16") + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + require.Equal(t, http.StatusAccepted, resp.StatusCode) + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + + result := systemtest.Elasticsearch.ExpectDocs(t, "apm-*", estest.TermQuery{Field: "processor.event", Value: "transaction"}) + systemtest.ApproveEvents( + t, t.Name(), result.Hits.Hits, + // RUM timestamps are set by the server based on the time the payload is received. + "@timestamp", "timestamp.us", + // RUM events have the source port recorded, and in the tests it will be dynamic + "source.port", + ) +} + +func TestRUMAllowServiceNames(t *testing.T) { + srv := apmservertest.NewUnstartedServer(t) + srv.Config.AgentAuth.SecretToken = "abc123" + srv.Config.RUM = &apmservertest.RUMConfig{ + Enabled: true, + AllowServiceNames: []string{"allowed"}, + } + err := srv.Start() + require.NoError(t, err) + + // Send a RUM transaction where the service name in metadata is allowed, + // but is overridden in the transaction event's context with a disallowed + // service name. + reqBody := strings.NewReader(` +{"metadata":{"service":{"name":"allowed","version":"1.0.0","agent":{"name":"rum-js","version":"0.0.0"}}}} +{"transaction":{"trace_id":"x","id":"y","type":"z","duration":0,"span_count":{"started":1},"context":{"service":{"name":"disallowed"}}}} +`[1:]) + req, _ := http.NewRequest("POST", srv.URL+"/intake/v2/rum/events", reqBody) + req.Header.Add("Content-Type", "application/x-ndjson") + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + respBody, _ := ioutil.ReadAll(resp.Body) + assert.Equal(t, http.StatusForbidden, resp.StatusCode, string(respBody)) + assert.Equal(t, `{"accepted":0,"errors":[{"message":"unauthorized: anonymous access not permitted for service \"disallowed\""}]}`+"\n", string(respBody)) +} + +func TestRUMRateLimit(t *testing.T) { + srv := apmservertest.NewUnstartedServer(t) + srv.Config.AgentAuth.SecretToken = "abc123" // enable auth & rate limiting + srv.Config.RUM = &apmservertest.RUMConfig{Enabled: true} + srv.Config.AgentAuth.Anonymous = &apmservertest.AnonymousAuthConfig{ + Enabled: true, + RateLimit: &apmservertest.RateLimitConfig{ + IPLimit: 2, + + // Set the event limit to less than 10 (the batch size) + // to immediately return 429 rather than waiting until + // another batch can be processed. + EventLimit: 5, + }, + } + err := srv.Start() + require.NoError(t, err) + + sendEvents := func(ip string, n int) error { + body := bytes.NewBufferString(`{"metadata":{"service":{"name":"allowed","version":"1.0.0","agent":{"name":"rum-js","version":"0.0.0"}}}}` + "\n") + for i := 0; i < n; i++ { + body.WriteString(`{"transaction":{"trace_id":"x","id":"y","type":"z","duration":0,"span_count":{"started":1},"context":{"service":{"name":"foo"}}}}` + "\n") + } + + req, _ := http.NewRequest("POST", srv.URL+"/intake/v2/rum/events?verbose=true", body) + req.Header.Add("Content-Type", "application/x-ndjson") + req.Header.Add("X-Forwarded-For", ip) + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + respBody, _ := ioutil.ReadAll(resp.Body) + if resp.StatusCode != http.StatusAccepted { + return fmt.Errorf("%s (%s)", resp.Status, strings.TrimSpace(string(respBody))) + } + return nil + } + + // The configured event rate limit is multiplied by 3 for the initial burst. Check that + // for the configured IP limit (2), we can handle 3*event_limit without being rate limited. + err = sendEvents("10.11.12.13", 3*srv.Config.AgentAuth.Anonymous.RateLimit.EventLimit) + assert.NoError(t, err) + + // Sending the events over multiple requests should have the same outcome. + for i := 0; i < 3; i++ { + err = sendEvents("10.11.12.14", srv.Config.AgentAuth.Anonymous.RateLimit.EventLimit) + assert.NoError(t, err) + } + + // The rate limiter cache only has space for 2 IPs, so the 3rd one reuses an existing + // limiter, which will have already been exhausted. + err = sendEvents("10.11.12.15", 10) + require.Error(t, err) + + // The exact error differs, depending on whether rate limiting was applied at the request + // level, or at the event stream level. Either could occur. + assert.Regexp(t, `429 Too Many Requests .*`, err.Error()) +} + +func TestRUMCORS(t *testing.T) { + // Check that CORS configuration is effective. More specific behaviour is unit tested. + srv := apmservertest.NewUnstartedServer(t) + srv.Config.RUM = &apmservertest.RUMConfig{ + Enabled: true, + AllowOrigins: []string{"blue"}, + AllowHeaders: []string{"stick", "door"}, + } + err := srv.Start() + require.NoError(t, err) + + req, _ := http.NewRequest("OPTIONS", srv.URL+"/intake/v2/rum/events", nil) + req.Header.Set("Origin", "blue") + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "blue", resp.Header.Get("Access-Control-Allow-Origin")) + assert.Equal(t, "POST, OPTIONS", resp.Header.Get("Access-Control-Allow-Methods")) + assert.Equal(t, "stick, door, Content-Type, Content-Encoding, Accept", resp.Header.Get("Access-Control-Allow-Headers")) +} diff --git a/systemtest/sampling_test.go b/systemtest/sampling_test.go index 98c10bdf5f2..a3e657274a9 100644 --- a/systemtest/sampling_test.go +++ b/systemtest/sampling_test.go @@ -19,16 +19,22 @@ package systemtest_test import ( "context" + "errors" "fmt" + "strings" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" "go.elastic.co/apm" + "golang.org/x/sync/errgroup" "github.com/elastic/apm-server/systemtest" "github.com/elastic/apm-server/systemtest/apmservertest" "github.com/elastic/apm-server/systemtest/estest" + "github.com/elastic/go-elasticsearch/v7/esapi" ) func TestKeepUnsampled(t *testing.T) { @@ -50,24 +56,209 @@ func TestKeepUnsampled(t *testing.T) { tracer.StartTransaction("unsampled", transactionType).End() tracer.Flush(nil) - var result estest.SearchResult - _, err = systemtest.Elasticsearch.Search("apm-*").WithQuery(estest.BoolQuery{ - Filter: []interface{}{ - estest.TermQuery{ - Field: "transaction.type", - Value: transactionType, - }, - }, - }).Do(context.Background(), &result, - estest.WithCondition(result.Hits.NonEmptyCondition()), - ) - require.NoError(t, err) - expectedTransactionDocs := 1 if keepUnsampled { expectedTransactionDocs++ } + + result := systemtest.Elasticsearch.ExpectMinDocs(t, expectedTransactionDocs, "apm-*", estest.TermQuery{ + Field: "transaction.type", + Value: transactionType, + }) assert.Len(t, result.Hits.Hits, expectedTransactionDocs) }) } } + +func TestKeepUnsampledWarning(t *testing.T) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewUnstartedServer(t) + srv.Config.Sampling = &apmservertest.SamplingConfig{KeepUnsampled: false} + srv.Config.Aggregation = &apmservertest.AggregationConfig{ + Transactions: &apmservertest.TransactionAggregationConfig{Enabled: false}, + } + require.NoError(t, srv.Start()) + require.NoError(t, srv.Close()) + + var messages []string + for _, log := range srv.Logs.All() { + messages = append(messages, log.Message) + } + assert.Contains(t, messages, ""+ + "apm-server.sampling.keep_unsampled and apm-server.aggregation.transactions.enabled are both false, "+ + "which will lead to incorrect metrics being reported in the APM UI", + ) +} + +func TestTailSampling(t *testing.T) { + systemtest.CleanupElasticsearch(t) + cleanupFleet(t, systemtest.Fleet) + integrationPackage := getAPMIntegrationPackage(t, systemtest.Fleet) + err := systemtest.Fleet.InstallPackage(integrationPackage.Name, integrationPackage.Version) + require.NoError(t, err) + + srv1 := apmservertest.NewUnstartedServer(t) + srv1.Config.DataStreams = &apmservertest.DataStreamsConfig{Enabled: true} + srv1.Config.Sampling = &apmservertest.SamplingConfig{ + Tail: &apmservertest.TailSamplingConfig{ + Enabled: true, + Interval: time.Second, + Policies: []apmservertest.TailSamplingPolicy{{SampleRate: 0.5}}, + }, + } + srv1.Config.Monitoring = newFastMonitoringConfig() + require.NoError(t, srv1.Start()) + + srv2 := apmservertest.NewUnstartedServer(t) + srv2.Config.DataStreams = &apmservertest.DataStreamsConfig{Enabled: true} + srv2.Config.Sampling = srv1.Config.Sampling + require.NoError(t, srv2.Start()) + + const total = 200 + const expected = 100 // 50% + + tracer1 := srv1.Tracer() + tracer2 := srv2.Tracer() + for i := 0; i < total; i++ { + parent := tracer1.StartTransaction("GET /", "parent") + parent.Duration = time.Second * time.Duration(i+1) + child := tracer2.StartTransactionOptions("GET /", "child", apm.TransactionOptions{ + TraceContext: parent.TraceContext(), + }) + child.Duration = 500 * time.Millisecond * time.Duration(i+1) + child.End() + parent.End() + } + tracer1.Flush(nil) + tracer2.Flush(nil) + + // Flush the data stream while the test is running, as we have no + // control over the settings for the sampled traces index template. + refreshPeriodically(t, 250*time.Millisecond, "traces-apm.sampled-*") + + for _, transactionType := range []string{"parent", "child"} { + var result estest.SearchResult + t.Logf("waiting for %d %q transactions", expected, transactionType) + _, err := systemtest.Elasticsearch.Search("traces-*").WithQuery(estest.TermQuery{ + Field: "transaction.type", + Value: transactionType, + }).WithSize(total).Do(context.Background(), &result, + estest.WithCondition(result.Hits.MinHitsCondition(expected)), + ) + require.NoError(t, err) + assert.Len(t, result.Hits.Hits, expected) + } + + // Make sure apm-server.sampling.tail metrics are published. Metric values are unit tested. + doc := getBeatsMonitoringStats(t, srv1, nil) + assert.True(t, gjson.GetBytes(doc.RawSource, "beats_stats.metrics.apm-server.sampling.tail").Exists()) + + // Check tail-sampling config is reported in telemetry. + var state struct { + APMServer struct { + Sampling struct { + Tail struct { + Enabled bool + Policies int + } + } + } `mapstructure:"apm-server"` + } + getBeatsMonitoringState(t, srv1, &state) + assert.True(t, state.APMServer.Sampling.Tail.Enabled) + assert.Equal(t, 1, state.APMServer.Sampling.Tail.Policies) +} + +func TestTailSamplingUnlicensed(t *testing.T) { + // Start an ephemeral Elasticsearch container with a Basic license to + // test that tail-based sampling requires a platinum or trial license. + es, err := systemtest.NewUnstartedElasticsearchContainer() + require.NoError(t, err) + es.Env["xpack.license.self_generated.type"] = "basic" + require.NoError(t, es.Start()) + defer es.Close() + + // Data streams are required for tail-based sampling, but since we're using + // an ephemeral Elasticsearch container it's not straightforward to install + // the integration package. We won't be indexing anything, so just don't wait + // for the integration package to be installed in this test. + waitForIntegration := false + srv := apmservertest.NewUnstartedServer(t) + srv.Config.Output.Elasticsearch.Hosts = []string{es.Addr} + srv.Config.DataStreams = &apmservertest.DataStreamsConfig{ + Enabled: true, + WaitForIntegration: &waitForIntegration, + } + srv.Config.Sampling = &apmservertest.SamplingConfig{ + Tail: &apmservertest.TailSamplingConfig{ + Enabled: true, + Interval: time.Second, + Policies: []apmservertest.TailSamplingPolicy{{SampleRate: 0.5}}, + }, + } + require.NoError(t, srv.Start()) + + // Send some transactions to trigger an indexing attempt. + tracer := srv.Tracer() + for i := 0; i < 100; i++ { + tx := tracer.StartTransaction("GET /", "parent") + tx.Duration = time.Second * time.Duration(i+1) + tx.End() + } + tracer.Flush(nil) + + timeout := time.After(time.Minute) + logs := srv.Logs.Iterator() + var done bool + for !done { + select { + case entry := <-logs.C(): + done = strings.Contains(entry.Message, "invalid license") + case <-timeout: + t.Fatal("timed out waiting for log message") + } + } + + // Due to the failing license check, APM Server will refuse to index anything. + var result estest.SearchResult + _, err = es.Client.Search("traces-apm*").Do(context.Background(), &result) + assert.NoError(t, err) + assert.Empty(t, result.Hits.Hits) + + // The server will wait for the enqueued events to be published before + // shutting down gracefully, so shutdown forcefully. + srv.Kill() +} + +func refreshPeriodically(t *testing.T, interval time.Duration, index ...string) { + g, ctx := errgroup.WithContext(context.Background()) + ctx, cancel := context.WithCancel(ctx) + t.Cleanup(func() { + cancel() + assert.NoError(t, g.Wait()) + }) + g.Go(func() error { + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + allowNoIndices := true + ignoreUnavailable := true + request := esapi.IndicesRefreshRequest{ + Index: index, + AllowNoIndices: &allowNoIndices, + IgnoreUnavailable: &ignoreUnavailable, + } + for { + select { + case <-ctx.Done(): + return nil + case <-ticker.C: + } + if _, err := systemtest.Elasticsearch.Do(ctx, &request, nil); err != nil { + if errors.Is(err, context.Canceled) { + return nil + } + return err + } + } + }) +} diff --git a/systemtest/sourcemap_test.go b/systemtest/sourcemap_test.go new file mode 100644 index 00000000000..84bdb4f9751 --- /dev/null +++ b/systemtest/sourcemap_test.go @@ -0,0 +1,291 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package systemtest_test + +import ( + "bytes" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/systemtest" + "github.com/elastic/apm-server/systemtest/apmservertest" + "github.com/elastic/apm-server/systemtest/estest" +) + +func TestRUMErrorSourcemapping(t *testing.T) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewUnstartedServer(t) + srv.Config.RUM = &apmservertest.RUMConfig{Enabled: true} + err := srv.Start() + require.NoError(t, err) + + uploadSourcemap(t, srv, "../testdata/sourcemap/bundle.js.map", + "http://localhost:8000/test/e2e/../e2e/general-usecase/bundle.js.map", + "apm-agent-js", + "1.0.1", + ) + systemtest.Elasticsearch.ExpectDocs(t, "apm-*-sourcemap", nil) + + systemtest.SendRUMEventsPayload(t, srv, "../testdata/intake-v2/errors_rum.ndjson") + result := systemtest.Elasticsearch.ExpectDocs(t, "apm-*-error", nil) + + systemtest.ApproveEvents( + t, t.Name(), result.Hits.Hits, + // RUM timestamps are set by the server based on the time the payload is received. + "@timestamp", "timestamp.us", + // RUM events have the source port recorded, and in the tests it will be dynamic + "source.port", + ) +} + +func TestRUMSpanSourcemapping(t *testing.T) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewUnstartedServer(t) + srv.Config.RUM = &apmservertest.RUMConfig{Enabled: true} + err := srv.Start() + require.NoError(t, err) + + uploadSourcemap(t, srv, "../testdata/sourcemap/bundle.js.map", + "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "apm-agent-js", + "1.0.0", + ) + systemtest.Elasticsearch.ExpectDocs(t, "apm-*-sourcemap", nil) + systemtest.SendRUMEventsPayload(t, srv, "../testdata/intake-v2/transactions_spans_rum_2.ndjson") + result := systemtest.Elasticsearch.ExpectDocs(t, "apm-*-span", nil) + + systemtest.ApproveEvents( + t, t.Name(), result.Hits.Hits, + // RUM timestamps are set by the server based on the time the payload is received. + "@timestamp", "timestamp.us", + // RUM events have the source port recorded, and in the tests it will be dynamic + "source.port", + ) +} + +func TestDuplicateSourcemapWarning(t *testing.T) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewUnstartedServer(t) + srv.Config.RUM = &apmservertest.RUMConfig{Enabled: true} + err := srv.Start() + require.NoError(t, err) + + uploadSourcemap(t, srv, "../testdata/sourcemap/bundle.js.map", + "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "apm-agent-js", + "1.0.0", + ) + systemtest.Elasticsearch.ExpectDocs(t, "apm-*-sourcemap", nil) + + uploadSourcemap(t, srv, "../testdata/sourcemap/bundle.js.map", + "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "apm-agent-js", + "1.0.0", + ) + systemtest.Elasticsearch.ExpectMinDocs(t, 2, "apm-*-sourcemap", nil) + + require.NoError(t, srv.Close()) + var messages []string + for _, entry := range srv.Logs.All() { + messages = append(messages, entry.Message) + } + assert.Contains(t, messages, + `Overriding sourcemap for service apm-agent-js version 1.0.0 and `+ + `file http://localhost:8000/test/e2e/general-usecase/bundle.js.map`, + ) +} + +func TestNoMatchingSourcemap(t *testing.T) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewUnstartedServer(t) + srv.Config.RUM = &apmservertest.RUMConfig{Enabled: true} + err := srv.Start() + require.NoError(t, err) + + // upload sourcemap with a wrong service version + uploadSourcemap(t, srv, "../testdata/sourcemap/bundle.js.map", + "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "apm-agent-js", + "2.0", + ) + systemtest.Elasticsearch.ExpectDocs(t, "apm-*-sourcemap", nil) + + systemtest.SendRUMEventsPayload(t, srv, "../testdata/intake-v2/transactions_spans_rum_2.ndjson") + result := systemtest.Elasticsearch.ExpectDocs(t, "apm-*-span", nil) + + systemtest.ApproveEvents( + t, t.Name(), result.Hits.Hits, + // RUM timestamps are set by the server based on the time the payload is received. + "@timestamp", "timestamp.us", + // RUM events have the source port recorded, and in the tests it will be dynamic + "source.port", + ) +} + +func TestFetchLatestSourcemap(t *testing.T) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewUnstartedServer(t) + srv.Config.RUM = &apmservertest.RUMConfig{Enabled: true} + err := srv.Start() + require.NoError(t, err) + + // upload sourcemap file that finds no matchings + uploadSourcemap(t, srv, "../testdata/sourcemap/bundle.js.map", + "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "apm-agent-js", + "2.0", + ) + systemtest.Elasticsearch.ExpectDocs(t, "apm-*-sourcemap", nil) + + systemtest.SendRUMEventsPayload(t, srv, "../testdata/intake-v2/errors_rum.ndjson") + result := systemtest.Elasticsearch.ExpectDocs(t, "apm-*-error", nil) + assertSourcemapUpdated(t, result, false) + deleteIndex(t, "apm-*-error*") + + // upload second sourcemap file with same key, + // that actually leads to proper matchings + // this also tests that the cache gets invalidated, + // as otherwise the former sourcemap would be taken from the cache. + // upload sourcemap file that finds no matchings + uploadSourcemap(t, srv, "../testdata/sourcemap/bundle.js.map", + "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "apm-agent-js", + "1.0.1", + ) + systemtest.Elasticsearch.ExpectMinDocs(t, 2, "apm-*-sourcemap", nil) + + systemtest.SendRUMEventsPayload(t, srv, "../testdata/intake-v2/errors_rum.ndjson") + result = systemtest.Elasticsearch.ExpectDocs(t, "apm-*-error", nil) + assertSourcemapUpdated(t, result, true) +} + +func TestSourcemapCaching(t *testing.T) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewUnstartedServer(t) + srv.Config.RUM = &apmservertest.RUMConfig{Enabled: true} + err := srv.Start() + require.NoError(t, err) + + uploadSourcemap(t, srv, "../testdata/sourcemap/bundle.js.map", + "http://localhost:8000/test/e2e/general-usecase/bundle.js.map", + "apm-agent-js", + "1.0.1", + ) + systemtest.Elasticsearch.ExpectDocs(t, "apm-*-sourcemap", nil) + + // Index an error, applying source mapping and caching the source map in the process. + systemtest.SendRUMEventsPayload(t, srv, "../testdata/intake-v2/errors_rum.ndjson") + result := systemtest.Elasticsearch.ExpectDocs(t, "apm-*-error", nil) + assertSourcemapUpdated(t, result, true) + + // Delete the source map and error, and try again. + deleteIndex(t, "apm-*-sourcemap*") + deleteIndex(t, "apm-*-error*") + systemtest.SendRUMEventsPayload(t, srv, "../testdata/intake-v2/errors_rum.ndjson") + result = systemtest.Elasticsearch.ExpectMinDocs(t, 1, "apm-*-error", nil) + assertSourcemapUpdated(t, result, true) +} + +func uploadSourcemap(t *testing.T, srv *apmservertest.Server, sourcemapFile, bundleFilepath, serviceName, serviceVersion string) { + t.Helper() + + req := newUploadSourcemapRequest(t, srv, sourcemapFile, bundleFilepath, serviceName, serviceVersion) + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, http.StatusAccepted, resp.StatusCode, string(respBody)) +} + +func newUploadSourcemapRequest(t *testing.T, srv *apmservertest.Server, sourcemapFile, bundleFilepath, serviceName, serviceVersion string) *http.Request { + t.Helper() + + var data bytes.Buffer + mw := multipart.NewWriter(&data) + require.NoError(t, mw.WriteField("service_name", serviceName)) + require.NoError(t, mw.WriteField("service_version", serviceVersion)) + require.NoError(t, mw.WriteField("bundle_filepath", bundleFilepath)) + + f, err := os.Open(sourcemapFile) + require.NoError(t, err) + defer f.Close() + sourcemapFileWriter, err := mw.CreateFormFile("sourcemap", filepath.Base(sourcemapFile)) + require.NoError(t, err) + _, err = io.Copy(sourcemapFileWriter, f) + require.NoError(t, err) + require.NoError(t, mw.Close()) + + req, _ := http.NewRequest("POST", srv.URL+"/assets/v1/sourcemaps", &data) + req.Header.Add("Content-Type", mw.FormDataContentType()) + return req +} + +func deleteIndex(t *testing.T, name string) { + resp, err := systemtest.Elasticsearch.Indices.Delete([]string{name}) + require.NoError(t, err) + resp.Body.Close() + resp, err = systemtest.Elasticsearch.Indices.Flush() + require.NoError(t, err) + resp.Body.Close() +} + +func assertSourcemapUpdated(t *testing.T, result estest.SearchResult, updated bool) { + t.Helper() + + type StacktraceFrame struct { + Sourcemap struct { + Updated bool + } + } + type Error struct { + Exception []struct { + Stacktrace []StacktraceFrame + } + Log struct { + Stacktrace []StacktraceFrame + } + } + + for _, hit := range result.Hits.Hits { + var source struct { + Error Error + } + err := hit.UnmarshalSource(&source) + require.NoError(t, err) + + for _, exception := range source.Error.Exception { + for _, stacktrace := range exception.Stacktrace { + assert.Equal(t, updated, stacktrace.Sourcemap.Updated) + } + } + + for _, stacktrace := range source.Error.Log.Stacktrace { + assert.Equal(t, updated, stacktrace.Sourcemap.Updated) + } + } +} diff --git a/systemtest/template_test.go b/systemtest/template_test.go new file mode 100644 index 00000000000..27d6ee524ed --- /dev/null +++ b/systemtest/template_test.go @@ -0,0 +1,167 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package systemtest_test + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + "net/http" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/systemtest" + "github.com/elastic/apm-server/systemtest/apmservertest" + "github.com/elastic/apm-server/systemtest/estest" + "github.com/elastic/go-elasticsearch/v7/esapi" +) + +func TestIndexTemplateCoverage(t *testing.T) { + systemtest.CleanupElasticsearch(t) + srv := apmservertest.NewServer(t) + + // Index each supported event type. + var totalEvents int + for _, payloadFile := range []string{ + "../testdata/intake-v2/errors.ndjson", + "../testdata/intake-v2/metricsets.ndjson", + "../testdata/intake-v2/spans.ndjson", + "../testdata/intake-v2/transactions.ndjson", + } { + data, err := ioutil.ReadFile(payloadFile) + require.NoError(t, err) + req, _ := http.NewRequest("POST", srv.URL+"/intake/v2/events?verbose=true", bytes.NewReader(data)) + req.Header.Set("Content-Type", "application/x-ndjson") + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, http.StatusAccepted, resp.StatusCode) + + var result struct { + Accepted int + } + err = json.NewDecoder(resp.Body).Decode(&result) + require.NoError(t, err) + assert.NotZero(t, result.Accepted) + totalEvents += result.Accepted + } + + // Wait for events to be indexed. + systemtest.Elasticsearch.ExpectMinDocs(t, totalEvents, "apm-*", estest.BoolQuery{ + MustNot: []interface{}{estest.TermQuery{Field: "processor.event", Value: "onboarding"}}, + }) + + // Check index mappings are covered by the template with the exception of known dynamic fields (e.g. labels). + var indexMappings map[string]struct { + Mappings map[string]interface{} + } + _, err := systemtest.Elasticsearch.Do(context.Background(), + &esapi.IndicesGetMappingRequest{Index: []string{"apm-*"}}, + &indexMappings, + ) + require.NoError(t, err) + + indexTemplate := getIndexTemplate(t, srv.Version) + indexTemplateFlattenedFields := make(map[string]interface{}) + indexTemplateMappings := indexTemplate["mappings"].(map[string]interface{}) + getFlattenedFields(indexTemplateMappings["properties"].(map[string]interface{}), "", indexTemplateFlattenedFields) + + knownMetrics := []string{ + "negative", // negative.d.o.t.t.e.d + "dotted", // dotted.float.gauge + "go", // go.memstats.heap.sys + "short_gauge", + "integer_gauge", + "long_gauge", + "float_gauge", + "double_gauge", + "byte_counter", + "short_counter", + "latency_distribution", + } + + for index, indexMappings := range indexMappings { + metricIndex := strings.Contains(index, "-metric-") + indexFlattenedFields := make(map[string]interface{}) + getFlattenedFields(indexMappings.Mappings["properties"].(map[string]interface{}), "", indexFlattenedFields) + for field := range indexFlattenedFields { + if strings.HasPrefix(field, "labels.") || strings.HasPrefix(field, "transaction.marks.") { + // Labels and RUM page marks are dynamically indexed. + continue + } + _, ok := indexTemplateFlattenedFields[field] + if !ok && metricIndex { + var isKnownMetric bool + for _, knownMetric := range knownMetrics { + if strings.HasPrefix(field, knownMetric) { + isKnownMetric = true + break + } + } + if isKnownMetric { + continue + } + } + assert.True(t, ok, "%s: field %s not defined in index template", index, field) + } + } +} + +func getIndexTemplate(t testing.TB, serverVersion string) map[string]interface{} { + indexTemplateName := "apm-" + serverVersion + indexTemplates := make(map[string]interface{}) + + // Wait for the index template to be created. + timeout := time.NewTimer(10 * time.Second) + defer timeout.Stop() + for { + select { + case <-timeout.C: + t.Fatal("timed out waiting for index template") + default: + } + if _, err := systemtest.Elasticsearch.Do(context.Background(), + &esapi.IndicesGetTemplateRequest{Name: []string{indexTemplateName}}, + &indexTemplates, + ); err == nil { + break + } + time.Sleep(100 * time.Millisecond) + } + + require.Len(t, indexTemplates, 1) + require.Contains(t, indexTemplates, indexTemplateName) + indexTemplate := indexTemplates[indexTemplateName].(map[string]interface{}) + return indexTemplate +} + +func getFlattenedFields(properties map[string]interface{}, prefix string, out map[string]interface{}) { + for field, mapping := range properties { + mapping := mapping.(map[string]interface{}) + out[prefix+field] = mapping + if properties, ok := mapping["properties"].(map[string]interface{}); ok { + getFlattenedFields(properties, prefix+field+".", out) + } + } +} diff --git a/systemtest/tls_test.go b/systemtest/tls_test.go new file mode 100644 index 00000000000..c360b37d5ba --- /dev/null +++ b/systemtest/tls_test.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package systemtest_test + +import ( + "crypto/tls" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/elastic/apm-server/systemtest/apmservertest" +) + +func TestTLSConfig(t *testing.T) { + srv := apmservertest.NewUnstartedServer(t) + srv.Config.TLS = &apmservertest.TLSConfig{ + SupportedProtocols: []string{"TLSv1.2"}, + CipherSuites: []string{"ECDHE-RSA-AES-128-GCM-SHA256"}, + } + require.NoError(t, srv.StartTLS()) + + attemptRequest := func(t *testing.T, minVersion, maxVersion uint16, cipherSuites ...uint16) error { + tlsConfig := &tls.Config{RootCAs: srv.TLS.RootCAs} + tlsConfig.MinVersion = minVersion + tlsConfig.MaxVersion = maxVersion + tlsConfig.CipherSuites = cipherSuites + httpClient := &http.Client{Transport: &http.Transport{TLSClientConfig: tlsConfig}} + resp, err := httpClient.Get(srv.URL) + if err != nil { + return err + } + defer resp.Body.Close() + return nil + } + + t.Run("compatible_cipher_suite", func(t *testing.T) { + err := attemptRequest(t, 0, 0, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) + require.NoError(t, err) + }) + + t.Run("compatible_protocol", func(t *testing.T) { + err := attemptRequest(t, tls.VersionTLS12, tls.VersionTLS12) + require.NoError(t, err) + }) + + t.Run("incompatible_cipher_suite", func(t *testing.T) { + err := attemptRequest(t, 0, 0, tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384) + require.Error(t, err) + assert.Regexp(t, ".*tls: handshake failure", err.Error()) + }) + + t.Run("incompatible_protocol", func(t *testing.T) { + for _, version := range []uint16{tls.VersionTLS10, tls.VersionTLS13} { + err := attemptRequest(t, version, version) + require.Error(t, err) + assert.Regexp(t, ".*tls: protocol version not supported", err.Error()) + } + }) +} + +func TestTLSClientAuth(t *testing.T) { + srv := apmservertest.NewUnstartedServer(t) + srv.Config.TLS = &apmservertest.TLSConfig{ClientAuthentication: "required"} + require.NoError(t, srv.StartTLS()) + + attemptRequest := func(t *testing.T, tlsConfig *tls.Config) error { + httpClient := &http.Client{Transport: &http.Transport{TLSClientConfig: tlsConfig}} + resp, err := httpClient.Get(srv.URL) + if err != nil { + return err + } + defer resp.Body.Close() + return nil + } + + err := attemptRequest(t, &tls.Config{InsecureSkipVerify: true}) + require.Error(t, err) + assert.Regexp(t, "tls: bad certificate", err.Error()) + logs := srv.Logs.Iterator() + defer logs.Close() + for entry := range logs.C() { + if entry.Logger != "beater.http" || entry.Level != zapcore.ErrorLevel { + continue + } + assert.Equal(t, "http/server.go", entry.File) + assert.Regexp(t, "tls: client didn't provide a certificate", entry.Message) + break + } + + err = attemptRequest(t, srv.TLS) + require.NoError(t, err) +} diff --git a/testdata/intake-v2/errors.ndjson b/testdata/intake-v2/errors.ndjson index 75c2892fc60..f4aacee646e 100644 --- a/testdata/intake-v2/errors.ndjson +++ b/testdata/intake-v2/errors.ndjson @@ -1,5 +1,5 @@ -{"metadata": {"process": {"ppid": 6789, "pid": 1234, "argv": ["node", "server.js"], "title": "node"}, "user": { "id": 123, "username": "bar", "email": "bar@example.com"}, "system": {"platform": "darwin", "hostname": "prod1.example.com", "configured_hostname": "prod.example", "detected_hostname": "myhostname", "architecture": "x64", "container": {"id": "container-id"}, "kubernetes": {"namespace": "namespace1", "pod": {"uid": "pod-uid", "name": "pod-name"}, "node": {"name": "node-name"}}}, "service": {"name": "1234_service-12a3","node": {"configured_name": "node-abc"},"language": {"version": "8", "name": "ecmascript"}, "agent": {"version": "3.14.0", "name": "elastic-node", "ephemeral_id":"abcdef123"}, "environment": "staging", "framework": {"version": "1.2.3", "name": "Express"}, "version": "5.1.3", "runtime": {"version": "8.0.0", "name": "node"}},"cloud":{"account":{"id":"account_id","name":"account_name"},"availability_zone":"cloud_availability_zone","instance":{"id":"instance_id","name":"instance_name"},"machine":{"type":"machine_type"},"project":{"id":"project_id","name":"project_name"},"provider":"cloud_provider","region":"cloud_region"}}} -{"error": {"id": "0123456789012345", "timestamp": 1494342245999999, "culprit": "my.module.function_name","log": { "message": "My service could not talk to the database named foobar", "param_message": "My service could not talk to the database named %s", "logger_name": "my.logger.name", "level": "warning", "stacktrace": [{"classname": "User::Common"}, {"abs_path": "/real/file/name.py", "filename": "/webpack/file/name.py", "classname": "Webpack::File::Name", "function": "foo", "vars": { "key": "value" }, "pre_context": ["line1", "line2"], "context_line": "line3","library_frame": false,"lineno": 3,"module": "App::MyModule","colno": 4,"post_context": ["line4","line5" ]},{"filename": "lib/instrumentation/index.js","lineno": 102,"function": "instrumented","abs_path": "/Users/watson/code/node_modules/elastic/lib/instrumentation/index.js","vars": {"key": "value"},"pre_context": [" var trans = this.currentTransaction",""," return instrumented",""," function instrumented () {"," var prev = ins.currentTransaction", " ins.currentTransaction = trans"],"context_line": " var result = original.apply(this, arguments)","post_context": [" ins.currentTransaction = prev"," return result","}","}","","Instrumentation.prototype._recoverTransaction = function (trans) {"," if (this.currentTransaction === trans) return"]}]},"exception": {"message": "The username root is unknown","type": "DbError","module": "__builtins__","code": 42,"handled": false,"attributes": {"foo": "bar" }, "cause":[{"type":"InternalDbError", "message":"something wrong writing a file", "cause":[{"type":"VeryInternalDbError", "message":"disk spinning way too fast"}, {"type":"ConnectionError", "message":"on top of it, internet doesn't work", "parent": 0}]}], "stacktrace": [{"classname": "BaseClass"},{ "abs_path": "/real/file/name.py","filename": "file/name.py","classname": "RName","function": "foo","vars": {"key": "value"},"pre_context": ["line1","line2"],"context_line": "line3", "library_frame": true,"lineno": 3,"module": "App::MyModule","colno": 4,"post_context": ["line4","line5"]},{"filename": "lib/instrumentation/index.js","lineno": 102,"function": "instrumented","abs_path": "/Users/watson/code/node_modules/elastic/lib/instrumentation/index.js","vars": {"key": "value"},"pre_context": [" var trans = this.currentTransaction",""," return instrumented",""," function instrumented () {", " var prev = ins.currentTransaction"," ins.currentTransaction = trans"],"context_line": " var result = original.apply(this, arguments)","post_context": [" ins.currentTransaction = prev"," return result","}","}","","Instrumentation.prototype._recoverTransaction = function (trans) {"," if (this.currentTransaction === trans) return"]}]},"context": {"page":{"referer":"http://localhost:8000/test/e2e/","url":"http://localhost:8000/test/e2e/general-usecase/"}, "request": {"socket": {"remote_address": "12.53.12.1","encrypted": true},"http_version": "1.1","method": "POST","url": {"protocol": "https:","full": "https://www.example.com/p/a/t/h?query=string#hash","hostname": "www.example.com","port": 8080,"pathname": "/p/a/t/h","search": "?query=string", "hash": "#hash","raw": "/p/a/t/h?query=string#hash"},"headers": {"user-agent": "Mozilla Chrome Edge","content-type": "text/html","cookie": "c1=v1,c2=v2","some-other-header": "foo","array": ["foo","bar","baz"]}, "cookies": {"c1": "v1", "c2": "v2" },"env": {"SERVER_SOFTWARE": "nginx", "GATEWAY_INTERFACE": "CGI/1.1"},"body": "Hello World"},"response": { "status_code": 200, "headers": { "content-type": "application/json" },"headers_sent": true, "finished": true }, "user": { "id": 99, "username": "foo"},"tags": {"organization_uuid": "9f0e9d64-c185-4d21-a6f4-4673ed561ec8"}, "custom": {"my_key": 1,"some_other_value": "foo bar","and_objects": {"foo": ["bar","baz" ] }},"service": {"name": "service1", "node": {"configured_name": "node-xyz"}, "language": {"version": "1.2"}, "framework": {"version": "1", "name": "Node"}}}}} +{"metadata": {"process": {"ppid": 6789, "pid": 1234, "argv": ["node", "server.js"], "title": "node"}, "user": { "domain": "ldap://abc", "id": 123, "username": "bar", "email": "bar@example.com"}, "system": {"platform": "darwin", "hostname": "prod1.example.com", "configured_hostname": "prod.example", "detected_hostname": "myhostname", "architecture": "x64", "container": {"id": "container-id"}, "kubernetes": {"namespace": "namespace1", "pod": {"uid": "pod-uid", "name": "pod-name"}, "node": {"name": "node-name"}}}, "service": {"name": "1234_service-12a3","node": {"configured_name": "node-abc"},"language": {"version": "8", "name": "ecmascript"}, "agent": {"version": "3.14.0", "name": "elastic-node", "ephemeral_id":"abcdef123"}, "environment": "staging", "framework": {"version": "1.2.3", "name": "Express"}, "version": "5.1.3", "runtime": {"version": "8.0.0", "name": "node"}},"cloud":{"account":{"id":"account_id","name":"account_name"},"availability_zone":"cloud_availability_zone","instance":{"id":"instance_id","name":"instance_name"},"machine":{"type":"machine_type"},"project":{"id":"project_id","name":"project_name"},"provider":"cloud_provider","region":"cloud_region","service":{"name":"lambda"}}}} +{"error": {"id": "0123456789012345", "timestamp": 1494342245999999, "culprit": "my.module.function_name","log": { "message": "My service could not talk to the database named foobar", "param_message": "My service could not talk to the database named %s", "logger_name": "my.logger.name", "level": "warning", "stacktrace": [{"classname": "User::Common"}, {"abs_path": "/real/file/name.py", "filename": "/webpack/file/name.py", "classname": "Webpack::File::Name", "function": "foo", "vars": { "key": "value" }, "pre_context": ["line1", "line2"], "context_line": "line3","library_frame": false,"lineno": 3,"module": "App::MyModule","colno": 4,"post_context": ["line4","line5" ]},{"filename": "lib/instrumentation/index.js","lineno": 102,"function": "instrumented","abs_path": "/Users/watson/code/node_modules/elastic/lib/instrumentation/index.js","vars": {"key": "value"},"pre_context": [" var trans = this.currentTransaction",""," return instrumented",""," function instrumented () {"," var prev = ins.currentTransaction", " ins.currentTransaction = trans"],"context_line": " var result = original.apply(this, arguments)","post_context": [" ins.currentTransaction = prev"," return result","}","}","","Instrumentation.prototype._recoverTransaction = function (trans) {"," if (this.currentTransaction === trans) return"]}]},"exception": {"message": "The username root is unknown","type": "DbError","module": "__builtins__","code": 42,"handled": false,"attributes": {"foo": "bar" }, "cause":[{"type":"InternalDbError", "message":"something wrong writing a file", "cause":[{"type":"VeryInternalDbError", "message":"disk spinning way too fast"}, {"type":"ConnectionError", "message":"on top of it, internet doesn't work", "parent": 0}]}], "stacktrace": [{"classname": "BaseClass"},{ "abs_path": "/real/file/name.py","filename": "file/name.py","classname": "RName","function": "foo","vars": {"key": "value"},"pre_context": ["line1","line2"],"context_line": "line3", "library_frame": true,"lineno": 3,"module": "App::MyModule","colno": 4,"post_context": ["line4","line5"]},{"filename": "lib/instrumentation/index.js","lineno": 102,"function": "instrumented","abs_path": "/Users/watson/code/node_modules/elastic/lib/instrumentation/index.js","vars": {"key": "value"},"pre_context": [" var trans = this.currentTransaction",""," return instrumented",""," function instrumented () {", " var prev = ins.currentTransaction"," ins.currentTransaction = trans"],"context_line": " var result = original.apply(this, arguments)","post_context": [" ins.currentTransaction = prev"," return result","}","}","","Instrumentation.prototype._recoverTransaction = function (trans) {"," if (this.currentTransaction === trans) return"]}]},"context": {"page":{"referer":"http://localhost:8000/test/e2e/","url":"http://localhost:8000/test/e2e/general-usecase/"}, "request": {"socket": {"remote_address": "12.53.12.1","encrypted": true},"http_version": "1.1","method": "POST","url": {"protocol": "https:","full": "https://www.example.com/p/a/t/h?query=string#hash","hostname": "www.example.com","port": 8080,"pathname": "/p/a/t/h","search": "?query=string", "hash": "#hash","raw": "/p/a/t/h?query=string#hash"},"headers": {"user-agent": "Mozilla Chrome Edge","content-type": "text/html","cookie": "c1=v1,c2=v2","some-other-header": "foo","array": ["foo","bar","baz"]}, "cookies": {"c1": "v1", "c2": "v2" },"env": {"SERVER_SOFTWARE": "nginx", "GATEWAY_INTERFACE": "CGI/1.1"},"body": "Hello World"},"response": { "status_code": 200, "headers": { "content-type": "application/json" },"headers_sent": true, "finished": true }, "user": { "domain": "ldap://abc", "id": 99, "username": "foo"},"tags": {"organization_uuid": "9f0e9d64-c185-4d21-a6f4-4673ed561ec8"}, "custom": {"my_key": 1,"some_other_value": "foo bar","and_objects": {"foo": ["bar","baz" ] }},"service": {"name": "service1", "node": {"configured_name": "node-xyz"}, "language": {"version": "1.2"}, "framework": {"version": "1", "name": "Node"}}}}} {"error": {"id": "xFoaabb123FFFFFF", "timestamp": 1533826745999000,"log": {"message": "no user found", "stacktrace": [{"classname": "User::Special"}]}}} {"error": {"id": "cdefab0123456789", "trace_id": null, "timestamp": 1533826745999000,"exception": {"message": "Cannot read property 'baz' no defined"}}} {"error": {"id": "cdefab0123456780", "trace_id": "0123456789abcdeffedcba0123456789", "parent_id": "9632587410abcdef", "exception": {"type": "DbError"}, "context":{"service": {"name": "service1", "environment":"testing","language": {"version": "2.5", "name": "ruby"}, "agent": {"version": "2.1.3", "name": "elastic-ruby", "ephemeral_id":"justanid"}, "framework": {"version": "5.0", "name": "Rails"}, "version": "2", "runtime": {"version": "2.5", "name": "cruby"}}}}} diff --git a/testdata/intake-v2/experimental.ndjson b/testdata/intake-v2/experimental.ndjson deleted file mode 100644 index 60b897d26f4..00000000000 --- a/testdata/intake-v2/experimental.ndjson +++ /dev/null @@ -1,4 +0,0 @@ -{"metadata": { "service": {"name": "1234_service-12a3", "agent": {"version": "3.14.0", "name": "elastic-node"}}}} -{"error": {"timestamp":1494342245999999,"id": "abcdef0123456789", "log": {"message": "error log message"},"context":{"experimental":{"foo":"bar","a":{"b":1}}}}} -{"span": {"timestamp":1496170407154000,"id": "0123456a89012345", "trace_id": "0123456789abcdef0123456789abcdef", "parent_id": "ab23456a89012345", "transaction_id": "ab23456a89012345", "name": "GET /api/types", "type": "request", "start": 1.845, "duration": 3.5642981,"context":{"experimental":{"foo":"bar","a":{"b":1}}}}} -{"transaction": {"timestamp":1496170407154000,"trace_id": "01234567890123456789abcdefabcdef", "id": "abcdef1478523690", "type": "request", "duration": 32.592981, "span_count": {"started": 0},"context":{"experimental":{"foo":"bar","a":{"b":1}}}}} diff --git a/testdata/intake-v2/unrecognized-event.ndjson b/testdata/intake-v2/invalid-event-type.ndjson similarity index 100% rename from testdata/intake-v2/unrecognized-event.ndjson rename to testdata/intake-v2/invalid-event-type.ndjson diff --git a/testdata/intake-v2/metadata.ndjson b/testdata/intake-v2/metadata.ndjson index bccc533ed42..be1d2104409 100644 --- a/testdata/intake-v2/metadata.ndjson +++ b/testdata/intake-v2/metadata.ndjson @@ -1 +1 @@ -{"metadata":{"service":{"name":"1234_service-12a3","node":{"configured_name":"node-123"},"version":"5.1.3","environment":"staging","language":{"name":"ecmascript","version":"8"},"runtime":{"name":"node","version":"8.0.0"},"framework":{"name":"Express","version":"1.2.3"},"agent":{"name":"elastic-node","version":"3.14.0","ephemeral_id":"e71be9ac-93b0-44b9-a997-5638f6ccfc36"}},"user":{"id":"123user","username":"bar","email":"bar@user.com"},"system":{"hostname":"prod1.example.com","configured_hostname":"prod1.example.com","detected_hostname":"prod1.example.com","architecture":"x64","ip":"127.0.0.1","platform":"darwin","container":{"id":"container-id"},"kubernetes":{"namespace":"namespace1","pod":{"uid":"pod-uid","name":"pod-name"},"node":{"name":"node-name"}}},"process":{"pid":1234,"ppid":6789,"title":"node","argv":["node","server.js"]},"labels":{"tag0":null,"tag1":"one","tag2":2},"cloud":{"account":{"id":"account_id","name":"account_name"},"availability_zone":"cloud_availability_zone","instance":{"id":"instance_id","name":"instance_name"},"machine":{"type":"machine_type"},"project":{"id":"project_id","name":"project_name"},"provider":"cloud_provider","region":"cloud_region"}}} +{"metadata":{"service":{"name":"1234_service-12a3","node":{"configured_name":"node-123"},"version":"5.1.3","environment":"staging","language":{"name":"ecmascript","version":"8"},"runtime":{"name":"node","version":"8.0.0"},"framework":{"name":"Express","version":"1.2.3"},"agent":{"name":"elastic-node","version":"3.14.0","ephemeral_id":"e71be9ac-93b0-44b9-a997-5638f6ccfc36"}},"user":{"domain":"ldap://abc","id":"123user","username":"bar","email":"bar@user.com"},"system":{"hostname":"prod1.example.com","configured_hostname":"prod1.example.com","detected_hostname":"prod1.example.com","architecture":"x64","ip":"127.0.0.1","platform":"darwin","container":{"id":"container-id"},"kubernetes":{"namespace":"namespace1","pod":{"uid":"pod-uid","name":"pod-name"},"node":{"name":"node-name"}}},"process":{"pid":1234,"ppid":6789,"title":"node","argv":["node","server.js"]},"labels":{"tag0":null,"tag1":"one","tag2":2},"cloud":{"account":{"id":"account_id","name":"account_name"},"availability_zone":"cloud_availability_zone","instance":{"id":"instance_id","name":"instance_name"},"machine":{"type":"machine_type"},"project":{"id":"project_id","name":"project_name"},"provider":"cloud_provider","region":"cloud_region","service":{"name":"lambda"}}}} diff --git a/testdata/intake-v2/metricsets.ndjson b/testdata/intake-v2/metricsets.ndjson index 0efe5c5d093..5644c618358 100644 --- a/testdata/intake-v2/metricsets.ndjson +++ b/testdata/intake-v2/metricsets.ndjson @@ -1,4 +1,6 @@ {"metadata": {"user": {"username": "logged-in-user", "id": "axb123hg", "email": "user@mail.com"}, "labels": {"tag0": null, "tag1": "one", "tag2": 2}, "process": {"ppid": null, "pid": 1234, "argv": null, "title": null}, "system": null, "service": {"name": "1234_service-12a3", "node": {"configured_name": "node-1"},"language": {"version": null, "name":"ecmascript"}, "agent": {"version": "3.14.0", "name": "elastic-node"}, "environment": null, "framework": null,"version": null, "runtime": null}}} -{"metricset": { "samples": { "transaction.breakdown.count":{"value":12}, "transaction.duration.sum.us":{"value":12}, "transaction.duration.count":{"value":2}, "transaction.self_time.sum.us":{"value":10}, "transaction.self_time.count":{"value":2}, "span.self_time.count":{"value":1},"span.self_time.sum.us":{"value":633.288}, "byte_counter": { "value": 1 }, "short_counter": { "value": 227 }, "integer_gauge": { "value": 42767 }, "long_gauge": { "value": 3147483648 }, "float_gauge": { "value": 9.16 }, "double_gauge": { "value": 3.141592653589793 }, "dotted.float.gauge": { "value": 6.12 }, "negative.d.o.t.t.e.d": { "value": -1022 } }, "tags": { "some": "abc", "code": 200, "success": true }, "transaction":{"type":"request","name":"GET /"},"span":{"type":"db","subtype":"mysql"},"timestamp": 1496170422281000 }} +{"metricset": { "samples": { "transaction.breakdown.count":{"value":12}, "transaction.duration.sum.us":{"value":12}, "transaction.duration.count":{"value":2}, "transaction.self_time.sum.us":{"value":10}, "transaction.self_time.count":{"value":2}, "span.self_time.count":{"value":1},"span.self_time.sum.us":{"value":633.288}, "extraneous_metrics_ignored": { "value": -1022 } }, "tags": { "some": "abc", "code": 200, "success": true }, "transaction":{"type":"request","name":"GET /"},"span":{"type":"db","subtype":"mysql"},"timestamp": 1496170422281000 }} {"metricset": { "samples": { "go.memstats.heap.sys.bytes": { "value": 6.520832e+06 }}, "timestamp": 1496170421364000}} -{"metricset": { "samples": { "system.process.cgroup.memory.mem.limit.bytes":{"value":2048},"system.process.cgroup.memory.mem.usage.bytes":{"value":1024},"system.process.cgroup.memory.stats.inactive_file.bytes":{"value":48}},"timestamp": 1496170421366000}} \ No newline at end of file +{"metricset": { "samples": { "system.process.cgroup.memory.mem.limit.bytes":{"value":2048},"system.process.cgroup.memory.mem.usage.bytes":{"value":1024}},"timestamp": 1496170421366000}} +{"metricset": { "samples": { "system.process.cgroup.cpu.id": { "value": 2048 }, "system.process.cgroup.cpu.cfs.quota.us": { "value": 2048 }, "system.process.cgroup.cpu.stats.periods": { "value": 2048 }, "system.process.cgroup.cpu.stats.throttled.periods": { "value": 2048 }, "system.process.cgroup.cpu.stats.throttled.ns": { "value": 2048 }, "system.process.cgroup.cpuacct.id": { "value": 2048 }, "system.process.cgroup.cpuacct.total.ns": { "value": 2048 }, "system.process.cgroup.cpu.cfs.period.us": { "value": 1024 } }, "timestamp": 1496170421366000 }} +{"metricset": { "samples": { "latency_distribution": { "type": "histogram", "unit": "s", "counts": [1,2,3], "values": [1.1,2.2,3.3] } }, "timestamp": 1496170421366000 }} diff --git a/testdata/intake-v2/minimal.ndjson b/testdata/intake-v2/minimal.ndjson index 1e2b5c5a540..77c5bf78ef0 100644 --- a/testdata/intake-v2/minimal.ndjson +++ b/testdata/intake-v2/minimal.ndjson @@ -5,4 +5,4 @@ {"span": {"id": "0123456a89012345", "trace_id": "0123456789abcdef0123456789abcdef", "parent_id": "ab23456a89012345", "name": "GET /api/types", "type": "request", "start": 1.845, "duration": 3.5642981}} {"span": {"id": "0123456a89012345", "trace_id": "0123456789abcdef0123456789abcdef", "parent_id": "ab23456a89012345", "name": "GET /api/types", "type": "request", "timestamp": 1535655207154000, "duration": 3.5642981}} {"transaction": {"trace_id": "01234567890123456789abcdefabcdef", "id": "abcdef1478523690", "type": "request", "duration": 32.592981, "span_count": {"started": 0}}} -{"metricset": {"samples": {}, "timestamp": 1496170422281000}} +{"metricset": {"samples":{"a":{"value":3.2}}, "timestamp": 1496170422281000}} diff --git a/testdata/intake-v2/spans.ndjson b/testdata/intake-v2/spans.ndjson index a8f9fd00976..bdd94f33d17 100644 --- a/testdata/intake-v2/spans.ndjson +++ b/testdata/intake-v2/spans.ndjson @@ -1,7 +1,8 @@ -{"metadata": {"user": {"id": "123", "email": "s@test.com", "username": "john"}, "process": {"ppid": 6789, "pid": 1234,"argv": ["node", "server.js"], "title": "node"}, "system": {"platform": "darwin", "hostname": "prod1.example.com", "architecture": "x64", "container": {"id": "container-id"}, "kubernetes": {"namespace": "namespace1", "pod": {"uid": "pod-uid", "name": "pod-name"}, "node": {"name": "node-name"}}}, "labels": {"tag1": "label1"}, "service": {"name": "backendspans", "language": {"version": "8", "name": "ecmascript"}, "agent": {"version": "3.14.0", "name": "elastic-node"}, "environment": "staging", "framework": {"version": "1.2.3", "name": "Express"}, "version": "5.1.3", "runtime": {"version": "8.0.0", "name": "node"}},"cloud":{"account":{"id":"account_id","name":"account_name"},"availability_zone":"cloud_availability_zone","instance":{"id":"instance_id","name":"instance_name"},"machine":{"type":"machine_type"},"project":{"id":"project_id","name":"project_name"},"provider":"cloud_provider","region":"cloud_region"}}} +{"metadata": {"user": {"domain": "ldap://abc", "id": "123", "email": "s@test.com", "username": "john"}, "process": {"ppid": 6789, "pid": 1234,"argv": ["node", "server.js"], "title": "node"}, "system": {"platform": "darwin", "hostname": "prod1.example.com", "architecture": "x64", "container": {"id": "container-id"}, "kubernetes": {"namespace": "namespace1", "pod": {"uid": "pod-uid", "name": "pod-name"}, "node": {"name": "node-name"}}}, "labels": {"tag1": "label1"}, "service": {"name": "backendspans", "language": {"version": "8", "name": "ecmascript"}, "agent": {"version": "3.14.0", "name": "elastic-node"}, "environment": "staging", "framework": {"version": "1.2.3", "name": "Express"}, "version": "5.1.3", "runtime": {"version": "8.0.0", "name": "node"}},"cloud":{"account":{"id":"account_id","name":"account_name"},"availability_zone":"cloud_availability_zone","instance":{"id":"instance_id","name":"instance_name"},"machine":{"type":"machine_type"},"project":{"id":"project_id","name":"project_name"},"provider":"cloud_provider","region":"cloud_region","service":{"name":"lambda"}}}} {"span": {"trace_id": "fdedef0123456789abcdef9876543210", "parent_id": "abcdef0123456789", "id": "abcdef01234567", "child_ids": ["51234abcdef56789"], "transaction_id": "01af25874dec69dd", "name": "GET /api/types", "type": "db.postgresql.query.custom","start": null, "duration": 141.581, "timestamp": 1532976822281000, "outcome": "success"}} {"span": {"trace_id": "abcdef0123456789abcdef9876543210", "parent_id": "0000000011111111", "id": "1234abcdef567895", "transaction_id": "ab45781d265894fe", "name": "GET /api/types", "type": "request", "start": 22, "duration": 32.592981, "timestamp": 1532976822281000,"context":{"service":{"environment":"prod","agent":{}}}}} {"span": {"trace_id": "abcdef0123456789abcdef9876543210", "parent_id": "abcdefabcdef7890", "id": "0123456a89012345", "transaction_id": "ab23456a89012345", "name": "GET /api/types", "type": "request.http", "start": 1.845, "duration": 3.5642981, "stacktrace": [], "context":{"tags": {"tag1": "value1", "tag2": 123, "tag3": 12.34, "tag4": true, "tag5": null},"service":{}}}} {"span": {"trace_id": "abcdef0123456789abcdef9876543210", "parent_id": "ababcdcdefefabde", "id": "abcde56a89012345", "transaction_id": null, "name": "get /api/types", "sync": false, "type": "request", "subtype": "http", "action": "call", "start": 0, "duration": 13.9802981, "stacktrace": null, "context": null }} {"span": {"trace_id": "abcdef0123456789abcdef9876543210", "parent_id": "abcdef0123456789", "id": "1234567890aaaade", "sync": true, "name": "SELECT FROM product_types", "type": "db.postgresql.query", "start": 2.83092, "duration": 3.781912, "stacktrace": [{ "filename": "net.js", "classname": "Core.js", "lineno": 547},{"filename": "file2.js", "lineno": 12, "post_context": [ " ins.currentTransaction = prev", "}"]}, { "function": "onread", "abs_path": "net.js", "filename": "net.js", "lineno": 547, "library_frame": true, "vars": { "key": "value" }, "module": "some module", "colno": 4, "context_line": "line3", "pre_context": [ " var trans = this.currentTransaction", "" ], "post_context": [ " ins.currentTransaction = prev", " return result"] }], "context": { "db": { "instance": "customers", "statement": "SELECT * FROM product_types WHERE user_id=?", "type": "sql", "user": "readonly_user", "link": "other.db.com", "rows_affected": 2}, "http": { "url": "http://localhost:8000", "status_code":200, "response":{"headers": { "content-type": null }, "status_code":200,"transfer_size":300.12,"encoded_body_size":356,"decoded_body_size":401}, "method": "GET" }, "destination": {"address": "0:0::0:1", "port": 5432, "service": {"type": "db", "name": "postgresql", "resource": "postgresql"}}, "service":{"name":"service1","agent":{"version":"2.2","name":"elastic-ruby", "ephemeral_id": "justanid"}}}}} {"span": {"trace_id": "fdedef0123456789abcdef9876543210", "parent_id": "abcdef0123456789", "id": "00xxx12312312312", "transaction_id": "01af25874dec69dd", "name": "Rabbitmq receive", "type": "messaging", "subtype": "JMS", "action": "receive", "duration": 141.581, "timestamp": 1532976822281000, "context": {"destination": {"address": "0:0::0:1"}, "message": {"queue": { "name": "new_users"}, "age":{"ms": 1577958057123}}}}} +{"span": {"trace_id": "edcbaf0123456789abcdef9876543210", "parent_id": "abcdef0123456789", "id": "abcdef01234567", "transaction_id": "01af25874dec69dd", "name": "SELECT FROM p_details", "type": "db.postgresql.query","start": 2.83092, "duration": 378.1912, "timestamp": 1625572685682272,"composite":{"count":10, "sum": 359.2981,"compression_strategy": "exact_match"},"outcome": "success"}} diff --git a/testdata/intake-v2/transactions.ndjson b/testdata/intake-v2/transactions.ndjson index e2d5942a3c6..8bd20b05991 100644 --- a/testdata/intake-v2/transactions.ndjson +++ b/testdata/intake-v2/transactions.ndjson @@ -1,5 +1,5 @@ -{"metadata": {"service": {"name": "1234_service-12a3","node": {"configured_name": "node-123"},"version": "5.1.3","environment": "staging","language": {"name": "ecmascript","version": "8"},"runtime": {"name": "node","version": "8.0.0"},"framework": {"name": "Express","version": "1.2.3"},"agent": {"name": "elastic-node","version": "3.14.0"}},"user": {"id": "123user", "username": "bar", "email": "bar@user.com"}, "labels": {"tag0": null, "tag1": "one", "tag2": 2}, "process": {"pid": 1234,"ppid": 6789,"title": "node","argv": ["node","server.js"]},"system": {"hostname": "prod1.example.com","architecture": "x64","platform": "darwin", "container": {"id": "container-id"}, "kubernetes": {"namespace": "namespace1", "pod": {"uid": "pod-uid", "name": "pod-name"}, "node": {"name": "node-name"}}},"cloud":{"account":{"id":"account_id","name":"account_name"},"availability_zone":"cloud_availability_zone","instance":{"id":"instance_id","name":"instance_name"},"machine":{"type":"machine_type"},"project":{"id":"project_id","name":"project_name"},"provider":"cloud_provider","region":"cloud_region"}}} +{"metadata": {"service": {"name": "1234_service-12a3","node": {"configured_name": "node-123"},"version": "5.1.3","environment": "staging","language": {"name": "ecmascript","version": "8"},"runtime": {"name": "node","version": "8.0.0"},"framework": {"name": "Express","version": "1.2.3"},"agent": {"name": "elastic-node","version": "3.14.0"}},"user": {"id": "123user", "username": "bar", "email": "bar@user.com"}, "labels": {"tag0": null, "tag1": "one", "tag2": 2}, "process": {"pid": 1234,"ppid": 6789,"title": "node","argv": ["node","server.js"]},"system": {"hostname": "prod1.example.com","architecture": "x64","platform": "darwin", "container": {"id": "container-id"}, "kubernetes": {"namespace": "namespace1", "pod": {"uid": "pod-uid", "name": "pod-name"}, "node": {"name": "node-name"}}},"cloud":{"account":{"id":"account_id","name":"account_name"},"availability_zone":"cloud_availability_zone","instance":{"id":"instance_id","name":"instance_name"},"machine":{"type":"machine_type"},"project":{"id":"project_id","name":"project_name"},"provider":"cloud_provider","region":"cloud_region","service":{"name":"lambda"}}}} {"transaction": { "id": "945254c567a5417e", "trace_id": "0123456789abcdef0123456789abcdef", "parent_id": "abcdefabcdef01234567", "type": "request", "duration": 32.592981, "span_count": { "started": 43 }}} -{"transaction": {"id": "4340a8e0df1906ecbfa9", "trace_id": "0acd456789abcdef0123456789abcdef", "name": "GET /api/types","type": "request","duration": 32.592981,"outcome":"success", "result": "success", "timestamp": 1496170407154000, "sampled": true, "span_count": {"started": 17},"context": {"service": {"runtime": {"version": "7.0"}},"page":{"referer":"http://localhost:8000/test/e2e/","url":"http://localhost:8000/test/e2e/general-usecase/"}, "request": {"socket": {"remote_address": "12.53.12.1","encrypted": true},"http_version": "1.1","method": "POST","url": {"protocol": "https:","full": "https://www.example.com/p/a/t/h?query=string#hash","hostname": "www.example.com","port": "8080","pathname": "/p/a/t/h","search": "?query=string","hash": "#hash","raw": "/p/a/t/h?query=string#hash"},"headers": {"user-agent":["Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36","Mozilla Chrome Edge"],"content-type": "text/html","cookie": "c1=v1, c2=v2","some-other-header": "foo","array": ["foo","bar","baz"]},"cookies": {"c1": "v1","c2": "v2"},"env": {"SERVER_SOFTWARE": "nginx","GATEWAY_INTERFACE": "CGI/1.1"},"body": {"str": "hello world","additional": { "foo": {},"bar": 123,"req": "additional information"}}},"response": {"status_code": 200,"headers": {"content-type": "application/json"},"headers_sent": true,"finished": true,"transfer_size":25.8,"encoded_body_size":26.90,"decoded_body_size":29.90}, "user": {"id": "99","username": "foo"},"tags": {"organization_uuid": "9f0e9d64-c185-4d21-a6f4-4673ed561ec8", "tag2": 12, "tag3": 12.45, "tag4": false, "tag5": null },"custom": {"my_key": 1,"some_other_value": "foo bar","and_objects": {"foo": ["bar","baz"]},"(": "not a valid regex and that is fine"}}}} -{"transaction": { "id": "cdef4340a8e0df19", "trace_id": "0acd456789abcdef0123456789abcdef", "type": "request", "duration": 13.980558, "timestamp": 1532976822281000, "sampled": null, "span_count": { "dropped": 55, "started": 436 }, "marks": {"navigationTiming": {"appBeforeBootstrap": 608.9300000000001,"navigationStart": -21},"another_mark": {"some_long": 10,"some_float": 10.0}, "performance": {}}, "context": { "request": { "socket": { "remote_address": "192.0.1", "encrypted": null }, "method": "POST", "headers": { "user-agent": null, "content-type": null, "cookie": null }, "url": { "protocol": null, "full": null, "hostname": null, "port": null, "pathname": null, "search": null, "hash": null, "raw": null } }, "response": { "headers": { "content-type": null } }, "service": {"environment":"testing","name": "service1","node": {"configured_name": "node-ABC"}, "language": {"version": "2.5", "name": "ruby"}, "agent": {"version": "2.2", "name": "elastic-ruby", "ephemeral_id": "justanid"}, "framework": {"version": "5.0", "name": "Rails"}, "version": "2", "runtime": {"version": "2.5", "name": "cruby"}}},"experience":{"cls":1,"fid":2.0,"tbt":3.4}}} -{"transaction": { "id": "00xxxxFFaaaa1234", "trace_id": "0123456789abcdef0123456789abcdef", "name": "amqp receive", "parent_id": "abcdefabcdef01234567", "type": "messaging", "duration": 3, "span_count": { "started": 1 }, "context": {"message": {"queue": { "name": "new_users"}, "age":{ "ms": 1577958057123}, "headers": {"user_id": "1ax3", "involved_services": ["user", "auth"]}, "body": "user created"}}}} +{"transaction": {"id": "4340a8e0df1906ecbfa9", "trace_id": "0acd456789abcdef0123456789abcdef", "name": "GET /api/types","type": "request","duration": 32.592981,"outcome":"success", "result": "success", "timestamp": 1496170407154000, "sampled": true, "span_count": {"started": 17},"context": {"service": {"runtime": {"version": "7.0"}},"page":{"referer":"http://localhost:8000/test/e2e/","url":"http://localhost:8000/test/e2e/general-usecase/"}, "request": {"socket": {"remote_address": "12.53.12.1","encrypted": true},"http_version": "1.1","method": "POST","url": {"protocol": "https:","full": "https://www.example.com/p/a/t/h?query=string#hash","hostname": "www.example.com","port": "8080","pathname": "/p/a/t/h","search": "?query=string","hash": "#hash","raw": "/p/a/t/h?query=string#hash"},"headers": {"user-agent":["Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36","Mozilla Chrome Edge"],"content-type": "text/html","cookie": "c1=v1, c2=v2","some-other-header": "foo","array": ["foo","bar","baz"]},"cookies": {"c1": "v1","c2": "v2"},"env": {"SERVER_SOFTWARE": "nginx","GATEWAY_INTERFACE": "CGI/1.1"},"body": {"str": "hello world","additional": { "foo": {},"bar": 123,"req": "additional information"}}},"response": {"status_code": 200,"headers": {"content-type": "application/json"},"headers_sent": true,"finished": true,"transfer_size":25.8,"encoded_body_size":26.90,"decoded_body_size":29.90}, "user": {"domain": "ldap://abc","id": "99","username": "foo"},"tags": {"organization_uuid": "9f0e9d64-c185-4d21-a6f4-4673ed561ec8", "tag2": 12, "tag3": 12.45, "tag4": false, "tag5": null },"custom": {"my_key": 1,"some_other_value": "foo bar","and_objects": {"foo": ["bar","baz"]},"(": "not a valid regex and that is fine"}}}} +{"transaction": { "id": "cdef4340a8e0df19", "trace_id": "0acd456789abcdef0123456789abcdef", "type": "request", "duration": 13.980558, "timestamp": 1532976822281000, "sampled": null, "span_count": { "dropped": 55, "started": 436 }, "marks": {"navigationTiming": {"appBeforeBootstrap": 608.9300000000001,"navigationStart": -21},"another_mark": {"some_long": 10,"some_float": 10.0}, "performance": {}}, "context": { "request": { "socket": { "remote_address": "192.0.1", "encrypted": null }, "method": "POST", "headers": { "user-agent": null, "content-type": null, "cookie": null }, "url": { "protocol": null, "full": null, "hostname": null, "port": null, "pathname": null, "search": null, "hash": null, "raw": null } }, "response": { "headers": { "content-type": null } }, "service": {"environment":"testing","name": "service1","node": {"configured_name": "node-ABC"}, "language": {"version": "2.5", "name": "ruby"}, "agent": {"version": "2.2", "name": "elastic-ruby", "ephemeral_id": "justanid"}, "framework": {"version": "5.0", "name": "Rails"}, "version": "2", "runtime": {"version": "2.5", "name": "cruby"}}},"experience":{"cls":1,"fid":2.0,"tbt":3.4,"longtask":{"count":3,"sum":2.5,"max":1}}}} +{"transaction": { "id": "00xxxxFFaaaa1234", "trace_id": "0123456789abcdef0123456789abcdef", "name": "amqp receive", "parent_id": "abcdefabcdef01234567", "type": "messaging", "duration": 3, "span_count": { "started": 1 }, "context": {"message": {"queue": { "name": "new_users"}, "age":{ "ms": 1577958057123}, "headers": {"user_id": "1ax3", "involved_services": ["user", "auth"]}, "body": "user created"}},"session":{"id":"sunday","sequence":123}}} diff --git a/testdata/intake-v2/transactions_spans_rum.ndjson b/testdata/intake-v2/transactions_spans_rum.ndjson index c9bfb45c963..bd55fa8ca06 100644 --- a/testdata/intake-v2/transactions_spans_rum.ndjson +++ b/testdata/intake-v2/transactions_spans_rum.ndjson @@ -1,3 +1,3 @@ -{"metadata":{"service":{"name":"apm-agent-js","version":"1.0.0","agent":{"name":"rum-js","version":"0.0.0"}}}} +{"metadata":{"service":{"name":"apm-agent-js","version":"1.0.0","agent":{"name":"rum-js","version":"0.0.0"}},"network":{"connection":{"type":"5G"}}}} {"transaction":{"id":"611f4fa950f04631","type":"page-load","duration":643,"context":{"page":{"referer":"http://localhost:8000/test/e2e/","url":"http://localhost:8000/test/e2e/general-usecase/"}},"trace_id":"611f4fa950f04631aaaaaaaaaaaaaaaa","span_count":{"started":1},"experience":{"cls":1,"fid":2.0,"tbt":3.4,"ignored":5,"also":"ignored"}}} {"span":{"name":"transaction","type":"transaction","start":0,"duration":643,"stacktrace":[{"abs_path":"http://localhost:8000/test/e2e/general-usecase/bundle.js.map","filename":"test/e2e/general-usecase/bundle.js.map","function":"","lineno":1,"colno":18},{"abs_path":"http://localhost:8000/test/e2e/general-usecase/bundle.js.map","filename":"~/test/e2e/general-usecase/bundle.js.map","function":"","lineno":1,"colno":18}],"context":{"http":{"url":"http://localhost:8000/test/e2e/general-usecase/span"}},"transaction_id":"611f4fa950f04631","parent_id":"611f4fa950f04631","trace_id":"611f4fa950f04631aaaaaaaaaaaaaaaa","id":"aaaaaaaaaaaaaaaa"}} diff --git a/testdata/intake-v3/rum_events.ndjson b/testdata/intake-v3/rum_events.ndjson index 88bcbe51df8..9bca2a7d11c 100644 --- a/testdata/intake-v3/rum_events.ndjson +++ b/testdata/intake-v3/rum_events.ndjson @@ -1,3 +1,2 @@ -{"m": {"se": {"n": "apm-a-rum-test-e2e-general-usecase","ve": "0.0.1","en": "prod","a": {"n": "js-base","ve": "4.8.1"},"ru": {"n": "v8","ve": "8.0"},"la": {"n": "javascript","ve": "6"},"fw": {"n": "angular","ve": "2"}},"u": {"id": 123,"em": "user@email.com","un": "John Doe"},"l": {"testTagKey": "testTagValue"}}} -{"x": {"id": "ec2e280be8345240","tid": "286ac3ad697892c406528f13c82e0ce1","pid": "1ef08ac234fca23b455d9e27c660f1ab","n": "general-usecase-initial-p-load","t": "p-load","d": 295,"me": [{"sa": {"xdc": {"v": 1},"xds": {"v": 295},"xbc": {"v": 1}}},{"y": {"t": "Request"},"sa": {"ysc": {"v": 1},"yss": {"v": 1}}},{"y": {"t": "Response"},"sa": {"ysc": {"v": 1},"yss": {"v": 1}}}],"y": [{"id": "bbd8bcc3be14d814","n": "Requesting and receiving the document","t": "hard-navigation","su": "browser-timing","s": 4,"d": 2},{"id": "fc546e87a90a774f","n": "Parsing the document, executing sy. scripts","t": "hard-navigation","su": "browser-timing","s": 14,"d": 106},{"id": "fb8f717930697299","n": "http://localhost:8000/test/e2e/general-usecase/app.e2e-bundle.min.js","t": "rc","su": "script","s": 22.53499999642372,"d": 35.060000023804605,"c": {"h": {"url": "http://localhost:8000/test/e2e/general-usecase/app.e2e-bundle.min.js?token=REDACTED","r": {"ts": 677175,"ebs": 676864,"dbs": 676864}},"dt": {"se": {"n": "http://localhost:8000","rc": "localhost:8000","t": "rc"},"ad": "localhost","po": 8000}}},{"id": "9b80535c4403c9fb","n": "OpenTracing y","t": "cu","s": 96.92999999970198,"d": 198.07000000029802},{"id": "5ecb8ee030749715","n": "GET /test/e2e/common/data.json","t": "external","su": "h","sy": true,"s": 98.94000005442649,"d": 6.72499998472631,"c": {"h": {"mt": "GET","url": "http://localhost:8000/test/e2e/common/data.json?test=hamid","sc": 200},"dt": {"se": {"n": "http://localhost:8000","rc": "localhost:8000","t": "external"},"ad": "localhost","po": 8000}}},{"id": "27f45fd274f976d4","n": "POST http://localhost:8003/data","t": "external","su": "h","sy": true,"s": 106.52000003028661,"d": 11.584999971091747,"c": {"h": {"mt": "POST","url": "http://localhost:8003/data","sc": 200},"dt": {"se": {"n": "http://localhost:8003","rc": "localhost:8003","t": "external"},"ad": "localhost","po": 8003}}},{"id": "a3c043330bc2015e","pi": 0,"n": "POST http://localhost:8003/fetch","t": "external","su": "h","ac": "action","sy": false,"s": 119.93500008247793,"d": 15.949999913573265,"c": {"h": {"mt": "POST","url": "http://localhost:8003/fetch","sc": 200},"dt": {"se": {"n": "http://localhost:8003","rc": "localhost:8003","t": "external"},"ad": "localhost","po": 8003}}},{"id": "bc7665dc25629379","st": [{"ap": "http://localhost:8000/test/e2e/general-usecase/app.e2e-bundle.min.js?token=secret","f": "test/e2e/general-usecase/app.e2e-bundle.min.js?token=secret","fn": "generateError","li": 7662,"co": 9},{"ap": "http://localhost:8000/test/e2e/general-usecase/app.e2e-bundle.min.js?token=secret","f": "test/e2e/general-usecase/app.e2e-bundle.min.js?token=secret","fn": "","li": 7666,"co": 3}],"n": "Fire \"DOMContentLoaded\" event","t": "hard-navigation","su": "browser-timing","s": 120,"d": 2,"o":"success"}],"c": {"p": {"rf": "http://localhost:8000/test/e2e/","url": "http://localhost:8000/test/e2e/general-usecase/"},"r": {"sc": 200,"ts": 983,"ebs": 690,"dbs": 690,"he": {"Content-Type": "application/json"}},"q": {"he": {"Accept": "application/json"},"hve": "1.1","mt": "GET"},"u": {"id": "uId","un": "un","em": "em"},"cu": {"testContext": "testContext"},"g": {"testTagKey": "testTagValue"}},"k": {"a": {"lp": 131.03000004775822,"fb": 5,"di": 120,"dc": 138,"ds": 100,"de": 110,"fp": 70.82500003930181},"nt": {"fs": 0,"ls": 0,"le": 0,"cs": 0,"ce": 0,"qs": 4,"rs": 5,"re": 6,"dl": 14,"di": 120,"ds": 120,"de": 122,"dc": 138,"es": 138,"ee": 138}},"yc": {"sd": 8,"dd": 1},"sm": true,"exp":{"cls":1,"fid":2.0,"tbt":3.4,"ignored":5,"also":"ignored"}}} -{"me": {"y": {"t": "Processing","su": "subtype"},"sa": {"ysc": {"v": 1},"yss": {"v": 124}},"g": {"tag1": "value1"}}} +{"m": {"se": {"n": "apm-a-rum-test-e2e-general-usecase","ve": "0.0.1","en": "prod","a": {"n": "js-base","ve": "4.8.1"},"ru": {"n": "v8","ve": "8.0"},"la": {"n": "javascript","ve": "6"},"fw": {"n": "angular","ve": "2"}},"u": {"id": 123,"em": "user@email.com","un": "John Doe"},"l": {"testTagKey": "testTagValue"},"n":{"c":{"t":"5G"}}}} +{"x": {"id": "ec2e280be8345240","tid": "286ac3ad697892c406528f13c82e0ce1","pid": "1ef08ac234fca23b455d9e27c660f1ab","n": "general-usecase-initial-p-load","t": "p-load","d": 295,"me": [{"sa": {"xdc": {"v": 1},"xds": {"v": 295},"xbc": {"v": 1}}},{"y": {"t": "Request"},"sa": {"ysc": {"v": 1},"yss": {"v": 1}}},{"y": {"t": "Response"},"sa": {"ysc": {"v": 1},"yss": {"v": 1}}}],"y": [{"id": "bbd8bcc3be14d814","n": "Requesting and receiving the document","t": "hard-navigation","su": "browser-timing","s": 4,"d": 2},{"id": "fc546e87a90a774f","n": "Parsing the document, executing sy. scripts","t": "hard-navigation","su": "browser-timing","s": 14,"d": 106},{"id": "fb8f717930697299","n": "http://localhost:8000/test/e2e/general-usecase/app.e2e-bundle.min.js","t": "rc","su": "script","s": 22.53499999642372,"d": 35.060000023804605,"c": {"h": {"url": "http://localhost:8000/test/e2e/general-usecase/app.e2e-bundle.min.js?token=REDACTED","r": {"ts": 677175,"ebs": 676864,"dbs": 676864}},"dt": {"se": {"n": "http://localhost:8000","rc": "localhost:8000","t": "rc"},"ad": "localhost","po": 8000}}},{"id": "9b80535c4403c9fb","n": "OpenTracing y","t": "cu","s": 96.92999999970198,"d": 198.07000000029802},{"id": "5ecb8ee030749715","n": "GET /test/e2e/common/data.json","t": "external","su": "h","sy": true,"s": 98.94000005442649,"d": 6.72499998472631,"c": {"h": {"mt": "GET","url": "http://localhost:8000/test/e2e/common/data.json?test=hamid","sc": 200},"dt": {"se": {"n": "http://localhost:8000","rc": "localhost:8000","t": "external"},"ad": "localhost","po": 8000}}},{"id": "27f45fd274f976d4","n": "POST http://localhost:8003/data","t": "external","su": "h","sy": true,"s": 106.52000003028661,"d": 11.584999971091747,"c": {"h": {"mt": "POST","url": "http://localhost:8003/data","sc": 200},"dt": {"se": {"n": "http://localhost:8003","rc": "localhost:8003","t": "external"},"ad": "localhost","po": 8003}}},{"id": "a3c043330bc2015e","pi": 0,"n": "POST http://localhost:8003/fetch","t": "external","su": "h","ac": "action","sy": false,"s": 119.93500008247793,"d": 15.949999913573265,"c": {"h": {"mt": "POST","url": "http://localhost:8003/fetch","sc": 200},"dt": {"se": {"n": "http://localhost:8003","rc": "localhost:8003","t": "external"},"ad": "localhost","po": 8003}}},{"id": "bc7665dc25629379","st": [{"ap": "http://localhost:8000/test/e2e/general-usecase/app.e2e-bundle.min.js?token=secret","f": "test/e2e/general-usecase/app.e2e-bundle.min.js?token=secret","fn": "generateError","li": 7662,"co": 9},{"ap": "http://localhost:8000/test/e2e/general-usecase/app.e2e-bundle.min.js?token=secret","f": "test/e2e/general-usecase/app.e2e-bundle.min.js?token=secret","fn": "","li": 7666,"co": 3}],"n": "Fire \"DOMContentLoaded\" event","t": "hard-navigation","su": "browser-timing","s": 120,"d": 2,"o":"success"}],"c": {"p": {"rf": "http://localhost:8000/test/e2e/","url": "http://localhost:8000/test/e2e/general-usecase/"},"r": {"sc": 200,"ts": 983,"ebs": 690,"dbs": 690,"he": {"Content-Type": "application/json"}},"q": {"he": {"Accept": "application/json"},"hve": "1.1","mt": "GET"},"u": {"id": "uId","un": "un","em": "em"},"cu": {"testContext": "testContext"},"g": {"testTagKey": "testTagValue"}},"k": {"a": {"lp": 131.03000004775822,"fb": 5,"di": 120,"dc": 138,"ds": 100,"de": 110,"fp": 70.82500003930181},"nt": {"fs": 0,"ls": 0,"le": 0,"cs": 0,"ce": 0,"qs": 4,"rs": 5,"re": 6,"dl": 14,"di": 120,"ds": 120,"de": 122,"dc": 138,"es": 138,"ee": 138}},"yc": {"sd": 8,"dd": 1},"sm": true,"exp":{"cls":1,"fid":2.0,"tbt":3.4,"ignored":5,"also":"ignored","lt":{"count":3,"sum":2.5,"max":1}}}} diff --git a/testdata/jaeger/batch_0.approved.json b/testdata/jaeger/batch_0.approved.json index e30bc33d761..b102649bc54 100644 --- a/testdata/jaeger/batch_0.approved.json +++ b/testdata/jaeger/batch_0.approved.json @@ -8,12 +8,11 @@ "version": "2.20.1" }, "event": { - "outcome": "success" + "outcome": "unknown" }, "host": { "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" + "ip": "10.0.0.13" }, "labels": { "as": "thrift", @@ -31,16 +30,13 @@ "language": { "name": "Go" }, - "name": "driver", - "node": { - "name": "host01" - } + "name": "driver" }, "timestamp": { "us": 1576827704953864 }, "trace": { - "id": "7be2fd98d0973be3" + "id": "00000000000000007be2fd98d0973be3" }, "transaction": { "duration": { @@ -48,11 +44,72 @@ }, "id": "7be2fd98d0973be3", "name": "Driver::findNearest", - "result": "Success", "sampled": true, "type": "custom" } }, + { + "@timestamp": "2019-12-20T07:41:44.954Z", + "agent": { + "ephemeral_id": "624386e9c81d2980", + "name": "Jaeger/Go", + "version": "2.20.1" + }, + "data_stream.type": "logs", + "host": { + "hostname": "host01", + "ip": "10.0.0.13" + }, + "labels": { + "event": "baggage", + "key": "customer", + "value": "Japanese Desserts" + }, + "processor": { + "event": "log", + "name": "log" + }, + "service": { + "language": { + "name": "Go" + }, + "name": "driver" + }, + "trace": { + "id": "00000000000000007be2fd98d0973be3" + } + }, + { + "@timestamp": "2019-12-20T07:41:44.954Z", + "agent": { + "ephemeral_id": "624386e9c81d2980", + "name": "Jaeger/Go", + "version": "2.20.1" + }, + "data_stream.type": "logs", + "host": { + "hostname": "host01", + "ip": "10.0.0.13" + }, + "labels": { + "event": "Searching for nearby drivers", + "level": "info", + "location": "728,326" + }, + "processor": { + "event": "log", + "name": "log" + }, + "service": { + "language": { + "name": "Go" + }, + "name": "driver" + }, + "trace": { + "id": "00000000000000007be2fd98d0973be3" + } + }, { "@timestamp": "2019-12-20T07:41:45.007Z", "agent": { @@ -66,15 +123,13 @@ "message": "redis timeout" } ], - "grouping_key": "dd09a7d0d9dde0adfcd694967c5a88de", "log": { "message": "Retrying GetDriver after error" } }, "host": { "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" + "ip": "10.0.0.13" }, "parent": { "id": "7be2fd98d0973be3" @@ -87,19 +142,17 @@ "language": { "name": "Go" }, - "name": "driver", - "node": { - "name": "host01" - } + "name": "driver" }, "timestamp": { "us": 1576827705007552 }, "trace": { - "id": "7be2fd98d0973be3" + "id": "00000000000000007be2fd98d0973be3" }, "transaction": { "id": "7be2fd98d0973be3", + "sampled": true, "type": "custom" } }, @@ -116,15 +169,13 @@ "message": "redis timeout" } ], - "grouping_key": "dd09a7d0d9dde0adfcd694967c5a88de", "log": { "message": "Retrying GetDriver after error" } }, "host": { "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" + "ip": "10.0.0.13" }, "parent": { "id": "7be2fd98d0973be3" @@ -137,19 +188,17 @@ "language": { "name": "Go" }, - "name": "driver", - "node": { - "name": "host01" - } + "name": "driver" }, "timestamp": { "us": 1576827705089431 }, "trace": { - "id": "7be2fd98d0973be3" + "id": "00000000000000007be2fd98d0973be3" }, "transaction": { "id": "7be2fd98d0973be3", + "sampled": true, "type": "custom" } }, @@ -166,15 +215,13 @@ "message": "redis timeout" } ], - "grouping_key": "dd09a7d0d9dde0adfcd694967c5a88de", "log": { "message": "Retrying GetDriver after error" } }, "host": { "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" + "ip": "10.0.0.13" }, "parent": { "id": "7be2fd98d0973be3" @@ -187,21 +234,50 @@ "language": { "name": "Go" }, - "name": "driver", - "node": { - "name": "host01" - } + "name": "driver" }, "timestamp": { "us": 1576827705172530 }, "trace": { - "id": "7be2fd98d0973be3" + "id": "00000000000000007be2fd98d0973be3" }, "transaction": { "id": "7be2fd98d0973be3", + "sampled": true, "type": "custom" } + }, + { + "@timestamp": "2019-12-20T07:41:45.197Z", + "agent": { + "ephemeral_id": "624386e9c81d2980", + "name": "Jaeger/Go", + "version": "2.20.1" + }, + "data_stream.type": "logs", + "host": { + "hostname": "host01", + "ip": "10.0.0.13" + }, + "labels": { + "event": "Search successful", + "level": "info", + "num_drivers": 10 + }, + "processor": { + "event": "log", + "name": "log" + }, + "service": { + "language": { + "name": "Go" + }, + "name": "driver" + }, + "trace": { + "id": "00000000000000007be2fd98d0973be3" + } } ] } diff --git a/testdata/jaeger/batch_1.approved.json b/testdata/jaeger/batch_1.approved.json index 336fc199743..8020fb7285b 100644 --- a/testdata/jaeger/batch_1.approved.json +++ b/testdata/jaeger/batch_1.approved.json @@ -12,8 +12,7 @@ }, "host": { "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" + "ip": "10.0.0.13" }, "labels": { "param_location": "728,326" @@ -29,10 +28,7 @@ "language": { "name": "Go" }, - "name": "redis", - "node": { - "name": "host01" - } + "name": "redis" }, "span": { "duration": { @@ -40,13 +36,46 @@ }, "id": "6e09e8bcefd6b828", "name": "FindDriverIDs", - "type": "custom" + "type": "app" }, "timestamp": { "us": 1576827704954062 }, "trace": { + "id": "00000000000000007be2fd98d0973be3" + } + }, + { + "@timestamp": "2019-12-20T07:41:44.973Z", + "agent": { + "ephemeral_id": "2e3f8db3eb77fae0", + "name": "Jaeger/Go", + "version": "2.20.1" + }, + "data_stream.type": "logs", + "host": { + "hostname": "host01", + "ip": "10.0.0.13" + }, + "labels": { + "event": "Found drivers", + "level": "info" + }, + "parent": { "id": "7be2fd98d0973be3" + }, + "processor": { + "event": "log", + "name": "log" + }, + "service": { + "language": { + "name": "Go" + }, + "name": "redis" + }, + "trace": { + "id": "00000000000000007be2fd98d0973be3" } }, { @@ -57,15 +86,13 @@ "version": "2.20.1" }, "event": { - "outcome": "unknown" + "outcome": "failure" }, "host": { "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" + "ip": "10.0.0.13" }, "labels": { - "error": true, "param_driverID": "T762465C" }, "parent": { @@ -79,10 +106,7 @@ "language": { "name": "Go" }, - "name": "redis", - "node": { - "name": "host01" - } + "name": "redis" }, "span": { "duration": { @@ -90,13 +114,57 @@ }, "id": "333295bfb438ea03", "name": "GetDriver", - "type": "custom" + "type": "app" }, "timestamp": { "us": 1576827704973809 }, "trace": { - "id": "7be2fd98d0973be3" + "id": "00000000000000007be2fd98d0973be3" + } + }, + { + "@timestamp": "2019-12-20T07:41:45.006Z", + "agent": { + "ephemeral_id": "2e3f8db3eb77fae0", + "name": "Jaeger/Go", + "version": "2.20.1" + }, + "error": { + "exception": [ + { + "message": "redis timeout" + } + ], + "log": { + "message": "redis timeout" + } + }, + "host": { + "hostname": "host01", + "ip": "10.0.0.13" + }, + "labels": { + "driver_id": "T762465C" + }, + "parent": { + "id": "333295bfb438ea03" + }, + "processor": { + "event": "error", + "name": "error" + }, + "service": { + "language": { + "name": "Go" + }, + "name": "redis" + }, + "timestamp": { + "us": 1576827705006847 + }, + "trace": { + "id": "00000000000000007be2fd98d0973be3" } }, { @@ -111,8 +179,7 @@ }, "host": { "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" + "ip": "10.0.0.13" }, "labels": { "param_driverID": "T762465C" @@ -128,10 +195,7 @@ "language": { "name": "Go" }, - "name": "redis", - "node": { - "name": "host01" - } + "name": "redis" }, "span": { "duration": { @@ -139,13 +203,13 @@ }, "id": "627c37a97e475c2f", "name": "GetDriver", - "type": "custom" + "type": "app" }, "timestamp": { "us": 1576827705007578 }, "trace": { - "id": "7be2fd98d0973be3" + "id": "00000000000000007be2fd98d0973be3" } }, { @@ -160,8 +224,7 @@ }, "host": { "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" + "ip": "10.0.0.13" }, "labels": { "param_driverID": "T712515C" @@ -177,10 +240,7 @@ "language": { "name": "Go" }, - "name": "redis", - "node": { - "name": "host01" - } + "name": "redis" }, "span": { "duration": { @@ -188,13 +248,13 @@ }, "id": "7bd7663d39c5a847", "name": "GetDriver", - "type": "custom" + "type": "app" }, "timestamp": { "us": 1576827705016845 }, "trace": { - "id": "7be2fd98d0973be3" + "id": "00000000000000007be2fd98d0973be3" } }, { @@ -209,8 +269,7 @@ }, "host": { "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" + "ip": "10.0.0.13" }, "labels": { "param_driverID": "T752110C" @@ -226,10 +285,7 @@ "language": { "name": "Go" }, - "name": "redis", - "node": { - "name": "host01" - } + "name": "redis" }, "span": { "duration": { @@ -237,13 +293,13 @@ }, "id": "6b4051dd2a5e2366", "name": "GetDriver", - "type": "custom" + "type": "app" }, "timestamp": { "us": 1576827705029415 }, "trace": { - "id": "7be2fd98d0973be3" + "id": "00000000000000007be2fd98d0973be3" } }, { @@ -258,8 +314,7 @@ }, "host": { "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" + "ip": "10.0.0.13" }, "labels": { "param_driverID": "T757670C" @@ -275,10 +330,7 @@ "language": { "name": "Go" }, - "name": "redis", - "node": { - "name": "host01" - } + "name": "redis" }, "span": { "duration": { @@ -286,13 +338,13 @@ }, "id": "6df97a86b9b3451b", "name": "GetDriver", - "type": "custom" + "type": "app" }, "timestamp": { "us": 1576827705040082 }, "trace": { - "id": "7be2fd98d0973be3" + "id": "00000000000000007be2fd98d0973be3" } }, { @@ -303,15 +355,13 @@ "version": "2.20.1" }, "event": { - "outcome": "unknown" + "outcome": "failure" }, "host": { "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" + "ip": "10.0.0.13" }, "labels": { - "error": true, "param_driverID": "T781861C" }, "parent": { @@ -325,10 +375,7 @@ "language": { "name": "Go" }, - "name": "redis", - "node": { - "name": "host01" - } + "name": "redis" }, "span": { "duration": { @@ -336,13 +383,57 @@ }, "id": "614811d6c498bfb0", "name": "GetDriver", - "type": "custom" + "type": "app" }, "timestamp": { "us": 1576827705054046 }, "trace": { - "id": "7be2fd98d0973be3" + "id": "00000000000000007be2fd98d0973be3" + } + }, + { + "@timestamp": "2019-12-20T07:41:45.089Z", + "agent": { + "ephemeral_id": "2e3f8db3eb77fae0", + "name": "Jaeger/Go", + "version": "2.20.1" + }, + "error": { + "exception": [ + { + "message": "redis timeout" + } + ], + "log": { + "message": "redis timeout" + } + }, + "host": { + "hostname": "host01", + "ip": "10.0.0.13" + }, + "labels": { + "driver_id": "T781861C" + }, + "parent": { + "id": "614811d6c498bfb0" + }, + "processor": { + "event": "error", + "name": "error" + }, + "service": { + "language": { + "name": "Go" + }, + "name": "redis" + }, + "timestamp": { + "us": 1576827705089372 + }, + "trace": { + "id": "00000000000000007be2fd98d0973be3" } }, { @@ -357,8 +448,7 @@ }, "host": { "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" + "ip": "10.0.0.13" }, "labels": { "param_driverID": "T781861C" @@ -374,10 +464,7 @@ "language": { "name": "Go" }, - "name": "redis", - "node": { - "name": "host01" - } + "name": "redis" }, "span": { "duration": { @@ -385,13 +472,13 @@ }, "id": "231604559da84d61", "name": "GetDriver", - "type": "custom" + "type": "app" }, "timestamp": { "us": 1576827705089459 }, "trace": { - "id": "7be2fd98d0973be3" + "id": "00000000000000007be2fd98d0973be3" } }, { @@ -406,8 +493,7 @@ }, "host": { "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" + "ip": "10.0.0.13" }, "labels": { "param_driverID": "T705860C" @@ -423,10 +509,7 @@ "language": { "name": "Go" }, - "name": "redis", - "node": { - "name": "host01" - } + "name": "redis" }, "span": { "duration": { @@ -434,13 +517,13 @@ }, "id": "61f7ecf24d13c36a", "name": "GetDriver", - "type": "custom" + "type": "app" }, "timestamp": { "us": 1576827705101278 }, "trace": { - "id": "7be2fd98d0973be3" + "id": "00000000000000007be2fd98d0973be3" } }, { @@ -455,8 +538,7 @@ }, "host": { "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" + "ip": "10.0.0.13" }, "labels": { "param_driverID": "T708771C" @@ -472,10 +554,7 @@ "language": { "name": "Go" }, - "name": "redis", - "node": { - "name": "host01" - } + "name": "redis" }, "span": { "duration": { @@ -483,13 +562,13 @@ }, "id": "2ef335bad24accc2", "name": "GetDriver", - "type": "custom" + "type": "app" }, "timestamp": { "us": 1576827705113531 }, "trace": { - "id": "7be2fd98d0973be3" + "id": "00000000000000007be2fd98d0973be3" } }, { @@ -504,8 +583,7 @@ }, "host": { "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" + "ip": "10.0.0.13" }, "labels": { "param_driverID": "T710624C" @@ -521,10 +599,7 @@ "language": { "name": "Go" }, - "name": "redis", - "node": { - "name": "host01" - } + "name": "redis" }, "span": { "duration": { @@ -532,13 +607,13 @@ }, "id": "38ec645e7201224d", "name": "GetDriver", - "type": "custom" + "type": "app" }, "timestamp": { "us": 1576827705125567 }, "trace": { - "id": "7be2fd98d0973be3" + "id": "00000000000000007be2fd98d0973be3" } }, { @@ -549,15 +624,13 @@ "version": "2.20.1" }, "event": { - "outcome": "unknown" + "outcome": "failure" }, "host": { "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" + "ip": "10.0.0.13" }, "labels": { - "error": true, "param_driverID": "T752547C" }, "parent": { @@ -571,24 +644,21 @@ "language": { "name": "Go" }, - "name": "redis", - "node": { - "name": "host01" - } + "name": "redis" }, "span": { "duration": { "us": 39602 }, - "id": "242ee3774d9eab1", + "id": "0242ee3774d9eab1", "name": "GetDriver", - "type": "custom" + "type": "app" }, "timestamp": { "us": 1576827705132896 }, "trace": { - "id": "7be2fd98d0973be3" + "id": "00000000000000007be2fd98d0973be3" } }, { @@ -598,50 +668,45 @@ "name": "Jaeger/Go", "version": "2.20.1" }, - "event": { - "outcome": "unknown" + "error": { + "exception": [ + { + "message": "redis timeout" + } + ], + "log": { + "message": "redis timeout" + } }, "host": { "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" + "ip": "10.0.0.13" }, "labels": { - "param_driverID": "T752547C" + "driver_id": "T752547C" }, "parent": { - "id": "7be2fd98d0973be3" + "id": "0242ee3774d9eab1" }, "processor": { - "event": "span", - "name": "transaction" + "event": "error", + "name": "error" }, "service": { "language": { "name": "Go" }, - "name": "redis", - "node": { - "name": "host01" - } - }, - "span": { - "duration": { - "us": 14029 - }, - "id": "6a63d1e81cfc7d95", - "name": "GetDriver", - "type": "custom" + "name": "redis" }, "timestamp": { - "us": 1576827705172618 + "us": 1576827705172347 }, "trace": { - "id": "7be2fd98d0973be3" + "id": "00000000000000007be2fd98d0973be3" } }, { - "@timestamp": "2019-12-20T07:41:45.186Z", + "@timestamp": "2019-12-20T07:41:45.172Z", "agent": { "ephemeral_id": "2e3f8db3eb77fae0", "name": "Jaeger/Go", @@ -652,11 +717,10 @@ }, "host": { "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" + "ip": "10.0.0.13" }, "labels": { - "param_driverID": "T757338C" + "param_driverID": "T752547C" }, "parent": { "id": "7be2fd98d0973be3" @@ -669,162 +733,66 @@ "language": { "name": "Go" }, - "name": "redis", - "node": { - "name": "host01" - } + "name": "redis" }, "span": { "duration": { - "us": 10431 + "us": 14029 }, - "id": "2b4c28f02b272f17", + "id": "6a63d1e81cfc7d95", "name": "GetDriver", - "type": "custom" + "type": "app" }, "timestamp": { - "us": 1576827705186670 + "us": 1576827705172618 }, "trace": { - "id": "7be2fd98d0973be3" + "id": "00000000000000007be2fd98d0973be3" } }, { - "@timestamp": "2019-12-20T07:41:45.006Z", + "@timestamp": "2019-12-20T07:41:45.186Z", "agent": { "ephemeral_id": "2e3f8db3eb77fae0", "name": "Jaeger/Go", "version": "2.20.1" }, - "error": { - "exception": [ - { - "message": "redis timeout" - } - ], - "grouping_key": "dd09a7d0d9dde0adfcd694967c5a88de", - "log": { - "message": "redis timeout" - } + "event": { + "outcome": "unknown" }, "host": { "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" - }, - "parent": { - "id": "333295bfb438ea03" - }, - "processor": { - "event": "error", - "name": "error" - }, - "service": { - "language": { - "name": "Go" - }, - "name": "redis", - "node": { - "name": "host01" - } - }, - "timestamp": { - "us": 1576827705006847 - }, - "trace": { - "id": "7be2fd98d0973be3" - } - }, - { - "@timestamp": "2019-12-20T07:41:45.089Z", - "agent": { - "ephemeral_id": "2e3f8db3eb77fae0", - "name": "Jaeger/Go", - "version": "2.20.1" - }, - "error": { - "exception": [ - { - "message": "redis timeout" - } - ], - "grouping_key": "dd09a7d0d9dde0adfcd694967c5a88de", - "log": { - "message": "redis timeout" - } + "ip": "10.0.0.13" }, - "host": { - "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" + "labels": { + "param_driverID": "T757338C" }, "parent": { - "id": "614811d6c498bfb0" + "id": "7be2fd98d0973be3" }, "processor": { - "event": "error", - "name": "error" + "event": "span", + "name": "transaction" }, "service": { "language": { "name": "Go" }, - "name": "redis", - "node": { - "name": "host01" - } - }, - "timestamp": { - "us": 1576827705089372 + "name": "redis" }, - "trace": { - "id": "7be2fd98d0973be3" - } - }, - { - "@timestamp": "2019-12-20T07:41:45.172Z", - "agent": { - "ephemeral_id": "2e3f8db3eb77fae0", - "name": "Jaeger/Go", - "version": "2.20.1" - }, - "error": { - "exception": [ - { - "message": "redis timeout" - } - ], - "grouping_key": "dd09a7d0d9dde0adfcd694967c5a88de", - "log": { - "message": "redis timeout" - } - }, - "host": { - "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" - }, - "parent": { - "id": "242ee3774d9eab1" - }, - "processor": { - "event": "error", - "name": "error" - }, - "service": { - "language": { - "name": "Go" + "span": { + "duration": { + "us": 10431 }, - "name": "redis", - "node": { - "name": "host01" - } + "id": "2b4c28f02b272f17", + "name": "GetDriver", + "type": "app" }, "timestamp": { - "us": 1576827705172347 + "us": 1576827705186670 }, "trace": { - "id": "7be2fd98d0973be3" + "id": "00000000000000007be2fd98d0973be3" } } ] diff --git a/testdata/sourcemap/minimal_payload.json b/testdata/sourcemap/minimal_payload.json deleted file mode 100644 index afaa31d9d00..00000000000 --- a/testdata/sourcemap/minimal_payload.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "service_name": "service", - "service_version": "1", - "bundle_filepath": "js/bundle.js", - "sourcemap": "{\"mappings\":\"CAAS,SAAUA,GCInB,QAAAC,GAAAC,GAGA,GAAAC,EAAAD,GACA,MAAAC,GAAAD,GAAAE,OAGA,IAAAC,GAAAF,EAAAD,IACAE,WACAE,GAAAJ,EACAK,QAAA,EAUA,OANAP,GAAAE,GAAAM,KAAAH,EAAAD,QAAAC,IAAAD,QAAAH,GAGAI,EAAAE,QAAA,EAGAF,EAAAD,QAvBA,GAAAD,KAqCA,OATAF,GAAAQ,EAAAT,EAGAC,EAAAS,EAAAP,EAGAF,EAAAU,EAAA,GAGAV,EAAA,KDMM,SAASI,EAAQD,EAASH,GE3ChCA,EAAA,GAEAA,EAAA,GAEAW,OFmDM,SAASP,EAAQD,EAASH,GGxDhCI,EAAAD,QAAAH,EAAAU,EAAA,cH8DM,SAASN,EAAQD,GI9DvB,QAAAQ,KACAC,QAAAC,IAAAC,QAGAH\",\"names\":[\"modules\",\"__webpack_require__\",\"moduleId\",\"installedModules\",\"exports\",\"module\",\"id\",\"loaded\",\"call\",\"m\",\"c\",\"p\",\"foo\",\"console\",\"log\",\"foobar\"],\"sources\":[\"webpack:///bundle.js\",\"webpack:///webpack/bootstrap 6002740481c9666b0d38\",\"webpack:///./scripts/index.js\",\"webpack:///./index.html\",\"webpack:///./scripts/app.js\"],\"version\":3}" -} diff --git a/testdata/sourcemap/payload.json b/testdata/sourcemap/payload.json deleted file mode 100644 index 3231121c7c4..00000000000 --- a/testdata/sourcemap/payload.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "service_name": "service", - "service_version": "1", - "bundle_filepath": "js/bundle.js", - "sourcemap": "{\"file\":\"bundle.js\",\"mappings\":\"CAAS,SAAUA,GCInB,QAAAC,GAAAC,GAGA,GAAAC,EAAAD,GACA,MAAAC,GAAAD,GAAAE,OAGA,IAAAC,GAAAF,EAAAD,IACAE,WACAE,GAAAJ,EACAK,QAAA,EAUA,OANAP,GAAAE,GAAAM,KAAAH,EAAAD,QAAAC,IAAAD,QAAAH,GAGAI,EAAAE,QAAA,EAGAF,EAAAD,QAvBA,GAAAD,KAqCA,OATAF,GAAAQ,EAAAT,EAGAC,EAAAS,EAAAP,EAGAF,EAAAU,EAAA,GAGAV,EAAA,KDMM,SAASI,EAAQD,EAASH,GE3ChCA,EAAA,GAEAA,EAAA,GAEAW,OFmDM,SAASP,EAAQD,EAASH,GGxDhCI,EAAAD,QAAAH,EAAAU,EAAA,cH8DM,SAASN,EAAQD,GI9DvB,QAAAQ,KACAC,QAAAC,IAAAC,QAGAH\",\"names\":[\"modules\",\"__webpack_require__\",\"moduleId\",\"installedModules\",\"exports\",\"module\",\"id\",\"loaded\",\"call\",\"m\",\"c\",\"p\",\"foo\",\"console\",\"log\",\"foobar\"],\"sourceRoot\":\"\",\"sources\":[\"webpack:///bundle.js\",\"webpack:///webpack/bootstrap 6002740481c9666b0d38\",\"webpack:///./scripts/index.js\",\"webpack:///./index.html\",\"webpack:///./scripts/app.js\"],\"sourcesContent\":[\"/******/ (function(modules) { // webpackBootstrap\\n/******/ \\t// The module cache\\n/******/ \\tvar installedModules = {};\\n/******/\\n/******/ \\t// The require function\\n/******/ \\tfunction __webpack_require__(moduleId) {\\n/******/\\n/******/ \\t\\t// Check if module is in cache\\n/******/ \\t\\tif(installedModules[moduleId])\\n/******/ \\t\\t\\treturn installedModules[moduleId].exports;\\n/******/\\n/******/ \\t\\t// Create a new module (and put it into the cache)\\n/******/ \\t\\tvar module = installedModules[moduleId] = {\\n/******/ \\t\\t\\texports: {},\\n/******/ \\t\\t\\tid: moduleId,\\n/******/ \\t\\t\\tloaded: false\\n/******/ \\t\\t};\\n/******/\\n/******/ \\t\\t// Execute the module function\\n/******/ \\t\\tmodules[moduleId].call(module.exports, module, module.exports, __webpack_require__);\\n/******/\\n/******/ \\t\\t// Flag the module as loaded\\n/******/ \\t\\tmodule.loaded = true;\\n/******/\\n/******/ \\t\\t// Return the exports of the module\\n/******/ \\t\\treturn module.exports;\\n/******/ \\t}\\n/******/\\n/******/\\n/******/ \\t// expose the modules object (__webpack_modules__)\\n/******/ \\t__webpack_require__.m = modules;\\n/******/\\n/******/ \\t// expose the module cache\\n/******/ \\t__webpack_require__.c = installedModules;\\n/******/\\n/******/ \\t// __webpack_public_path__\\n/******/ \\t__webpack_require__.p = \\\"\\\";\\n/******/\\n/******/ \\t// Load entry module and return exports\\n/******/ \\treturn __webpack_require__(0);\\n/******/ })\\n/************************************************************************/\\n/******/ ([\\n/* 0 */\\n/***/ function(module, exports, __webpack_require__) {\\n\\n\\t// Webpack\\n\\t__webpack_require__(1)\\n\\t\\n\\t__webpack_require__(2)\\n\\t\\n\\tfoo()\\n\\n\\n/***/ },\\n/* 1 */\\n/***/ function(module, exports, __webpack_require__) {\\n\\n\\tmodule.exports = __webpack_require__.p + \\\"index.html\\\"\\n\\n/***/ },\\n/* 2 */\\n/***/ function(module, exports) {\\n\\n\\tfunction foo() {\\n\\t console.log(foobar)\\n\\t}\\n\\t\\n\\tfoo()\\n\\n\\n/***/ }\\n/******/ ]);\\n\\n\\n/** WEBPACK FOOTER **\\n ** bundle.js\\n **/\",\" \\t// The module cache\\n \\tvar installedModules = {};\\n\\n \\t// The require function\\n \\tfunction __webpack_require__(moduleId) {\\n\\n \\t\\t// Check if module is in cache\\n \\t\\tif(installedModules[moduleId])\\n \\t\\t\\treturn installedModules[moduleId].exports;\\n\\n \\t\\t// Create a new module (and put it into the cache)\\n \\t\\tvar module = installedModules[moduleId] = {\\n \\t\\t\\texports: {},\\n \\t\\t\\tid: moduleId,\\n \\t\\t\\tloaded: false\\n \\t\\t};\\n\\n \\t\\t// Execute the module function\\n \\t\\tmodules[moduleId].call(module.exports, module, module.exports, __webpack_require__);\\n\\n \\t\\t// Flag the module as loaded\\n \\t\\tmodule.loaded = true;\\n\\n \\t\\t// Return the exports of the module\\n \\t\\treturn module.exports;\\n \\t}\\n\\n\\n \\t// expose the modules object (__webpack_modules__)\\n \\t__webpack_require__.m = modules;\\n\\n \\t// expose the module cache\\n \\t__webpack_require__.c = installedModules;\\n\\n \\t// __webpack_public_path__\\n \\t__webpack_require__.p = \\\"\\\";\\n\\n \\t// Load entry module and return exports\\n \\treturn __webpack_require__(0);\\n\\n\\n\\n/** WEBPACK FOOTER **\\n ** webpack/bootstrap 6002740481c9666b0d38\\n **/\",\"// Webpack\\nrequire('../index.html')\\n\\nrequire('./app')\\n\\nfoo()\\n\\n\\n\\n/*****************\\n ** WEBPACK FOOTER\\n ** ./scripts/index.js\\n ** module id = 0\\n ** module chunks = 0\\n **/\",\"module.exports = __webpack_public_path__ + \\\"index.html\\\"\\n\\n\\n/*****************\\n ** WEBPACK FOOTER\\n ** ./index.html\\n ** module id = 1\\n ** module chunks = 0\\n **/\",\"function foo() {\\n console.log(foobar)\\n}\\n\\nfoo()\\n\\n\\n\\n/*****************\\n ** WEBPACK FOOTER\\n ** ./scripts/app.js\\n ** module id = 2\\n ** module chunks = 0\\n **/\"],\"version\":3}" -} diff --git a/testing/docker/elasticsearch/roles.yml b/testing/docker/elasticsearch/roles.yml index 38df16e9982..72f8fbbbe69 100644 --- a/testing/docker/elasticsearch/roles.yml +++ b/testing/docker/elasticsearch/roles.yml @@ -1,7 +1,7 @@ apm_server: cluster: ['manage_ilm','manage_security','manage_api_key'] indices: - - names: ['apm-*'] + - names: ['apm-*', 'traces-apm*', 'logs-apm*', 'metrics-apm*'] privileges: ['write','create_index','manage','manage_ilm'] applications: - application: 'apm' diff --git a/testing/docker/fleet-server/ca.pem b/testing/docker/fleet-server/ca.pem new file mode 100644 index 00000000000..f7c1df6737f --- /dev/null +++ b/testing/docker/fleet-server/ca.pem @@ -0,0 +1,28 @@ +-----BEGIN CERTIFICATE----- +MIIEuTCCAyGgAwIBAgIQP5xwBoeDe2Fa7GyUeDrLfzANBgkqhkiG9w0BAQsFADB1 +MR4wHAYDVQQKExVta2NlcnQgZGV2ZWxvcG1lbnQgQ0ExJTAjBgNVBAsMHGFuZHJl +d0Bnb2F0IChBbmRyZXcgV2lsa2lucykxLDAqBgNVBAMMI21rY2VydCBhbmRyZXdA +Z29hdCAoQW5kcmV3IFdpbGtpbnMpMB4XDTIxMDYxMDAyMDEzM1oXDTMxMDYxMDAy +MDEzM1owdTEeMBwGA1UEChMVbWtjZXJ0IGRldmVsb3BtZW50IENBMSUwIwYDVQQL +DBxhbmRyZXdAZ29hdCAoQW5kcmV3IFdpbGtpbnMpMSwwKgYDVQQDDCNta2NlcnQg +YW5kcmV3QGdvYXQgKEFuZHJldyBXaWxraW5zKTCCAaIwDQYJKoZIhvcNAQEBBQAD +ggGPADCCAYoCggGBAOaFTfeUzZ7Vgudh6KrfWryPSGXrAhngDOBjRp4tlOhypsW+ +ybUXB49YHmFvnZH6gwUl6WG1ixKVUAMc/jQEePCBsEkeUuqV1U0P734YpAqjuRJf +t9Xf/Efo4OYwBDsOD071V9YADig9lD1qIEFyuXJ99rzPWBFw8tNuA3/Xnxzh9zUa +heL3aQ03V2b8Wkhf7Q3g5btwQt0fhEFo2tTuY4H5iSn/84QvgHAj7pyWvDFMPOo4 +LTnRC0hEtxCbmcviEpN/rbswOuLdTpie+Q4jIPm/WBkOr8ckTRAC7njtXCijxFEY +WywBCZPT40FleeI3h74Md5wnffchZvr6yNN00K6trguTFPBKC4c4XG7NBlHxSIEo +oF2b7y+X4iV9JNoMaYLvi12GYhHddnv8Cr1OdVSVlXqM1DInXk+g4fDeKmZy+Ffv +QwnEtX5CKCCbJC/sJphrRsrgM6tq5uxFA5rxwBk1hmizqkL8jCWKY6LKDvfUXPMF +JHTH+o3a6ewuwPV78QIDAQABo0UwQzAOBgNVHQ8BAf8EBAMCAgQwEgYDVR0TAQH/ +BAgwBgEB/wIBADAdBgNVHQ4EFgQUxzrrY8H5mamcn5Yx7eKno5atZ8YwDQYJKoZI +hvcNAQELBQADggGBAI1JNxY8KBHNi3yg8sW6jlsMo/lqS0ghSJiPWbLAIE9CpNRJ +lyhKc0J4TQKHKav/PY5AmoEC8pOluncz7jeaCsk5l9vv1iYJp1KU7EQwEsafRMS9 +TupwnJ4GvW2je085RGByNPnEBUfa99g5fF9l0SBLcqYZo2EJeXTzOoUmoOSI45wU +0A2r3X4ucDs5eTw/6mA3GHsm92JDODxZWVRlZ5OQZVhHo6fm2J0Gy+tOslypZiI3 +mV0XhF9MVASLwC7GBsQ2yOt3AXRi7gBzxezhzsrxh2iCxOo3A5FlDp88gXzsgI4m +k5YFhnDAm6aYYIJnjotTVNG6BN9e/YH91vKuNax7h9oaSIjxolMe9TR+tnG7DZ6K +5A18hXi9lnnHXvRJrHuVFBMxaU16lLNM+bh1l00HEs0owG8x713zGY90hq9EW8Js +X/5fO5U4u6C6QBOtV1+U11MyTyM62j+6yWnUaPELWD0R0LmzvnwQCrHWSQ2o+2IY +mhU21Ex5j9nxU4tFuQ== +-----END CERTIFICATE----- diff --git a/testing/docker/fleet-server/certificate.pem b/testing/docker/fleet-server/certificate.pem new file mode 100644 index 00000000000..8945714cf0a --- /dev/null +++ b/testing/docker/fleet-server/certificate.pem @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEOzCCAqOgAwIBAgIQMbTNJXcilk0w4lgVeOqmyjANBgkqhkiG9w0BAQsFADB1 +MR4wHAYDVQQKExVta2NlcnQgZGV2ZWxvcG1lbnQgQ0ExJTAjBgNVBAsMHGFuZHJl +d0Bnb2F0IChBbmRyZXcgV2lsa2lucykxLDAqBgNVBAMMI21rY2VydCBhbmRyZXdA +Z29hdCAoQW5kcmV3IFdpbGtpbnMpMB4XDTIxMDYxMDAyMDE0NloXDTIzMDkxMDAy +MDE0NlowUDEnMCUGA1UEChMebWtjZXJ0IGRldmVsb3BtZW50IGNlcnRpZmljYXRl +MSUwIwYDVQQLDBxhbmRyZXdAZ29hdCAoQW5kcmV3IFdpbGtpbnMpMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA8+ZMTi2V878ZKLVTNbllVGOO8RLzLDMF +oBxK6oOAu4H8B4gIefsmacuruEd58iBbl0mAWPS+ii8YqsfkVAtVvkVXvnNZGObh +XTRfI1ytQ9w2ADobQY08z0uw7wUsv4bk1evoBpWedMomwmCJQ693scNLAfHoOds2 +0yrg0UOiuwZGS6clID1Fn+Aiwit8hwqvEAC3nT5jq9vIxgCQQoyKzmA/prGyUqkp +MvMP4E77Jtm1L0wPffsC0/69J2ZNBfwT2cR0R+5C3sglarIK3QAZFPK90HGFVWOz +GweU1kscL744myuj55wugchSkm3JzDgv1hitngy5fd294q9CGPIQiwIDAQABo2ww +ajAOBgNVHQ8BAf8EBAMCBaAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwHwYDVR0jBBgw +FoAUxzrrY8H5mamcn5Yx7eKno5atZ8YwIgYDVR0RBBswGYIMZmxlZXQtc2VydmVy +gglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggGBAKKq2OTw4+qpn86dU2njn44/ +DCmQet1UTtgNuLEiM18noaEOChEt/yjBmtgiLWtDBUb5JSz92Qgrk4inKXSg0MuV +RyvVej10A/rkKrS3zLozukEZQAAjNlS5nkRExT/ZAjFUcBQiDYTieSVgN2kKmMQJ +kQteqP+UdcS4KFkJYUz3Iijdmxq3m9NWnGJShUacp5jKv/Bhcw4MMbL5EWr6Wt6t +1qGXY7O96IFSTQtnWcHy2IVUSwom+Fkk3Oy24qWoxVvC0l7jsBLwvhtbMaHO5Adw +ORVRzk0Imk2faC7r8/Lv/el7g558TLPvo99K7YhWkDUYhVGNv3Wf3eT4JszRI4J0 +jHJ31EN+OIWg37gKYKBPqFMtykYjtJChvnPxzncGA1RauT03dIZAc3Y2b65o5MjG +C8FPbsCWWHbHWJA3hGzv5C3klBJpX/OLM56gT5RPBMOLAcIKq802WIahfaQqkSKl +8uOasEBXQYWzQtAj/h8kAicI7z1gcl4PUM6uZimE6A== +-----END CERTIFICATE----- diff --git a/testing/docker/fleet-server/key.pem b/testing/docker/fleet-server/key.pem new file mode 100644 index 00000000000..982fad70aa5 --- /dev/null +++ b/testing/docker/fleet-server/key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDz5kxOLZXzvxko +tVM1uWVUY47xEvMsMwWgHErqg4C7gfwHiAh5+yZpy6u4R3nyIFuXSYBY9L6KLxiq +x+RUC1W+RVe+c1kY5uFdNF8jXK1D3DYAOhtBjTzPS7DvBSy/huTV6+gGlZ50yibC +YIlDr3exw0sB8eg52zbTKuDRQ6K7BkZLpyUgPUWf4CLCK3yHCq8QALedPmOr28jG +AJBCjIrOYD+msbJSqSky8w/gTvsm2bUvTA99+wLT/r0nZk0F/BPZxHRH7kLeyCVq +sgrdABkU8r3QcYVVY7MbB5TWSxwvvjibK6PnnC6ByFKSbcnMOC/WGK2eDLl93b3i +r0IY8hCLAgMBAAECggEBANbdMKXCpRKpbDmfnCF9JVZ1qqyYHB/5BuCpbBozJUqK +1YOxBH6pkYqsQahDV5vFg8rAltBHNEC6AsoY9P5RSgUoQ4dlSL2WUD1y8MlPUNiy +e+QxTGewTDz2mnXHIkfMR3Zpr+t1DbYnjIO61dIKF7FDsaWR/hpSE3duk8XnBsoo +oaWoYZY/cUhNT8fhbPbVSf+2ExrIsA228qkUfgasVcf8CNHVtvyxvX73ipuM8CyH +97p9gDL2YpRgVvs2BfQ0fzf54KRXuwcUGlKP+wzsgSoPoBHDJW1hRfTzlnXqKBe1 +V21kIFgW3wshBanX35pOOZxYvEC3u3Z1nf8RFAKov3kCgYEA93hrl5HR5HzEF9UV +gHlNhwVoO4t83tvecUWgbhXtHdvJADPQCriPI4hTbcYCFDtPmULAMS7QdxmcTyMR +kVnx6M8r+tgfEXZyO3T091A6OAPsf+TNfXTHczWMPVrY3btVKFOhzUiLRwBABX0g +tbjfjzoUg+4wEAsBIYOLAc8XIRcCgYEA/E5e3VppjgCEUWMEd2GZV+Olh8sqr7XL +ER19tUlbtg4K9/arAXuZ3ATMeORqViRKULIxdsEcINim1RD3z/c2b5Jur2Oq0Z6c +vLvr7+jO4UExQq5RHPfELBxVQqBrSXKFAsUjrdF4rIGpJSIL7mzckmQuX1LcukVH +g4iSs/ldbK0CgYAS6v++nIUhJHCRKdb09VD5623mb2liWAiPPDVhdQelarHY9B0J +VMaMftVx5Nsv1MDnBHVQzTVehXSvkAy9wdR+aagBCxiE6zscVHqNlXJ96b7goAsd +dhnxMry/y/wcJ0ABTzNlUBBlox1Bzij7+2ALwPLkiwbdkxnJCBdOUhiAjQKBgB1q +ocLbHL1yr/qxOb8VgQRvRUhs1qA/6Noo/xQY5nl2b67zcoKsv4aYhKJ/tyot9wAr +lnrLDxWBTQpAfTQhFZaykvcd/reL76hNnLePBDfdGvo0Sr6+4H9oGkS3YWrh0EI/ +a+aDKreqMmdi7dMxnBHxXjq89YE+PJVIYhpbs5nNAoGAE+XPHxu2d+Mke8MewkwS +fIb0NyPvuDQzBIFGmD9Ycqtyfu1KUESQF2xVZm0UEhRltVjbnGopnng2cYDjyO3V +2q4L0z1vw02wYZ7Unzb7zX4hfa914g8XwkL1xwCowmiTw5p7UJkWRL7Nv/JXyW5t +/o22TWgyQO0gAm1KTFkTqkM= +-----END PRIVATE KEY----- diff --git a/testing/docker/package-registry/config.yml b/testing/docker/package-registry/config.yml new file mode 100644 index 00000000000..82fd5881241 --- /dev/null +++ b/testing/docker/package-registry/config.yml @@ -0,0 +1,9 @@ +package_paths: + - /packages/production + - /packages/staging + - /packages/snapshot + - /packages/local + +cache_time.search: 10s +cache_time.categories: 10s +cache_time.catch_all: 10s diff --git a/testing/docker/package-registry/entrypoint.sh b/testing/docker/package-registry/entrypoint.sh new file mode 100755 index 00000000000..a04457b054e --- /dev/null +++ b/testing/docker/package-registry/entrypoint.sh @@ -0,0 +1,15 @@ +#!/bin/bash +set -e + +# Copy the package into the expected directory structure, using the version +# defined in manifest.yml. Packages must be stored in "/", +# and the version directory must match the version defined in manifest.yml. + +VERSION=$(grep '^version:' /apmpackage/apm/manifest.yml | cut -d ' ' -f 2) +PACKAGES_LOCAL_APM=/packages/local/apm + +rm -fr $PACKAGES_LOCAL_APM +mkdir -p $PACKAGES_LOCAL_APM +cp -r /apmpackage/apm/ $PACKAGES_LOCAL_APM/$VERSION + +exec ./package-registry --address=0.0.0.0:8080 diff --git a/testing/environments/5.x.yml b/testing/environments/5.x.yml deleted file mode 100644 index 543a5632534..00000000000 --- a/testing/environments/5.x.yml +++ /dev/null @@ -1,33 +0,0 @@ -# This is the latest stable 5x release environment. - -version: '2.3' -services: - elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:5.6.9 - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:9200"] - retries: 300 - interval: 1s - environment: - - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - - "network.host=" - - "transport.host=127.0.0.1" - - "http.host=0.0.0.0" - - "xpack.security.enabled=false" - - logstash: - image: docker.elastic.co/logstash/logstash:5.6.9 - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] - retries: 300 - interval: 1s - volumes: - - ./docker/logstash/pipeline:/usr/share/logstash/pipeline:ro - - ./docker/logstash/pki:/etc/pki:ro - - kibana: - image: docker.elastic.co/kibana/kibana:5.6.9 - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:5601"] - retries: 300 - interval: 1s diff --git a/testing/environments/6.0.yml b/testing/environments/6.0.yml deleted file mode 100644 index 220ea12fddf..00000000000 --- a/testing/environments/6.0.yml +++ /dev/null @@ -1,33 +0,0 @@ -# This is the latest 6.0 environment. - -version: '2.3' -services: - elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch-platinum:6.0.1 - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:9200"] - retries: 300 - interval: 1s - environment: - - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - - "network.host=" - - "transport.host=127.0.0.1" - - "http.host=0.0.0.0" - - "xpack.security.enabled=false" - - logstash: - image: docker.elastic.co/logstash/logstash:6.0.1 - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] - retries: 300 - interval: 1s - volumes: - - ./docker/logstash/pipeline:/usr/share/logstash/pipeline:ro - - ./docker/logstash/pki:/etc/pki:ro - - kibana: - image: docker.elastic.co/kibana/kibana:6.0.1 - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:5601"] - retries: 300 - interval: 1s diff --git a/testing/environments/Dockerfile b/testing/environments/Dockerfile deleted file mode 100644 index 0543985dadd..00000000000 --- a/testing/environments/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -# Basic debian file with curl, wget and nano installed to fetch files -# an update config files -FROM debian:latest -MAINTAINER Nicolas Ruflin - -RUN apt-get update && \ - apt-get install -y curl nano wget zip && \ - apt-get clean diff --git a/testing/environments/Makefile b/testing/environments/Makefile deleted file mode 100644 index bef47686095..00000000000 --- a/testing/environments/Makefile +++ /dev/null @@ -1,28 +0,0 @@ -ENV?=snapshot.yml -BASE_COMMAND=docker-compose -f ${ENV} -f local.yml - -start: - # This is run every time to make sure the environment is up-to-date - ${BASE_COMMAND} build --pull --force-rm - ${BASE_COMMAND} run beat bash - -stop: - ${BASE_COMMAND} down -v - - -up: - ${BASE_COMMAND} build - ${BASE_COMMAND} up - -up-%: - ${BASE_COMMAND} build $* - ${BASE_COMMAND} up $* - -# Be careful using this command, as it will remove all containers and volumes of your docker-machine -clean: - docker stop $(shell docker ps -a -q) - docker rm -v $(shell docker ps -a -q) - -# Tails the environment log files -logs: - ${BASE_COMMAND} logs -f diff --git a/testing/environments/README.md b/testing/environments/README.md deleted file mode 100644 index 332e52bfe2a..00000000000 --- a/testing/environments/README.md +++ /dev/null @@ -1,85 +0,0 @@ -# Testing environments - -These environments are intended for manual and automated testing. The docker-compose files can be combined to create the different environment. - - -# Manual testing - -The different environments can be started with the following commands for manual testing. These environments expose ports of Elasticsearch, Logstash and Kibana on the Docker-Machine ip. - -Running the environment chains the following docker-compose files together - -* local.yml: Definition of ports which have to be exposed for local testing including kibana -* latest.yml: Latest version of elasticsearch, logstash, kibana -* snapshot.yml: Snapshot version of elasticsearch, logstash, kibana - - -## Start / Stop environment - -``` -make start ENV=es17-ls15-kb41.yml -``` - -This will start the environment and log you into the debian machine. This machine is intended for manual testing of the beats. Download the beats package or snapshot you want to test. Elasticsearch can be reached under the host `elasticsearch`, logstash under `logstash`. Make sure to update the configuration file of the beat with the specific host. - -To stop an clean up the environment afterwards, make sure to run: - -``` -make stop ENV=es17-ls15-kb41.yml -``` - - -## Update containers - -As for testing, some default installation must be changed, access to the containers is needed. Each container has a unique name which corresponds with the service name. To access a running container of elasticsearch, run: - -``` -docker exec -it elasticsearch bash -``` - -## Access machines from external - -It is useful to sometimes access the containers from a browser, especially for Kibana. Elasticsearch exposes port 9200 and Kibana 5601. Make sure no other services on your machine are already assigned to these ports. To access Kibana for example, go to the following url: - -``` -http://docker-machine-ip:5601/ -``` - -Often the default address is `localhost`. - - -## Cleanup -In case your environment is messed up because of multiple instances still running and conflicting with each other, use the following commands to clean up. Please be aware that this will stop ALL docker containers ony our docker-machine. - -``` -make clean -``` - - -## Notes - -Every container has a name corresponding with the service. This requires to shut down an environment and clean it up before starting an other environment. This is intentional to prevent conflicts. - - -# Automated Testing - -These environments are also used for integration testing in the different beats. For this, `make testsuite` by default uses the snapshot environment. To select a different environment during testing, run the following command to use the latest environment: - -``` -TESTING_ENVIRONMENT=latest make testsuite -``` - -This will run the full testsuite but with latest environments instead of snapshot. - - -## Defaults - -By default, elasticsearch, logstash and kibana are started. These are available at all time that these environments are used. Running the environment, chains the following docker-compose flies together: - -* snapshot.yml: Snapshot version of elasticsearch, logstash, kibana -* docker-compose.yml: Local beat docker-compose file - - -## Updating environments - -If the snapshot environment is updated with a new build, all beats will automatically build with the most recent version. diff --git a/testing/environments/docker/elasticsearch/kerberos/init.sh b/testing/environments/docker/elasticsearch/kerberos/init.sh deleted file mode 100644 index ac7fe70fa69..00000000000 --- a/testing/environments/docker/elasticsearch/kerberos/init.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh - -# setup Keberos -echo elasticsearch_kerberos.elastic > /etc/hostname && echo "127.0.0.1 elasticsearch_kerberos.elastic" >> /etc/hosts - -/scripts/installkdc.sh -/scripts/addprincs.sh - -# add test user -bin/elasticsearch-users useradd beats -r superuser -p testing | /usr/local/bin/docker-entrypoint.sh eswrapper diff --git a/testing/environments/docker/elasticsearch/kerberos/installkdc.sh b/testing/environments/docker/elasticsearch/kerberos/installkdc.sh deleted file mode 100644 index f35848d004c..00000000000 --- a/testing/environments/docker/elasticsearch/kerberos/installkdc.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/bin/bash - -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -e - -# KDC installation steps and considerations based on https://web.mit.edu/kerberos/krb5-latest/doc/admin/install_kdc.html -# and helpful input from https://help.ubuntu.com/community/Kerberos - -LOCALSTATEDIR=/etc -LOGDIR=/var/log/krb5 - -#MARKER_FILE=/etc/marker - -# Transfer and interpolate krb5.conf -cp /config/krb5.conf.template $LOCALSTATEDIR/krb5.conf -sed -i 's/${REALM_NAME}/'$REALM_NAME'/g' $LOCALSTATEDIR/krb5.conf -sed -i 's/${KDC_NAME}/'$KDC_NAME'/g' $LOCALSTATEDIR/krb5.conf -sed -i 's/${BUILD_ZONE}/'$BUILD_ZONE'/g' $LOCALSTATEDIR/krb5.conf -sed -i 's/${ELASTIC_ZONE}/'$ELASTIC_ZONE'/g' $LOCALSTATEDIR/krb5.conf - - -# Transfer and interpolate the kdc.conf -mkdir -p $LOCALSTATEDIR/krb5kdc -cp /config/kdc.conf.template $LOCALSTATEDIR/krb5kdc/kdc.conf -sed -i 's/${REALM_NAME}/'$REALM_NAME'/g' $LOCALSTATEDIR/krb5kdc/kdc.conf -sed -i 's/${KDC_NAME}/'$KDC_NAME'/g' $LOCALSTATEDIR/krb5kdc/kdc.conf -sed -i 's/${BUILD_ZONE}/'$BUILD_ZONE'/g' $LOCALSTATEDIR/krb5kdc/kdc.conf -sed -i 's/${ELASTIC_ZONE}/'$ELASTIC_ZONE'/g' $LOCALSTATEDIR/krb5.conf - -# Touch logging locations -mkdir -p $LOGDIR -touch $LOGDIR/kadmin.log -touch $LOGDIR/krb5kdc.log -touch $LOGDIR/krb5lib.log - -# Update package manager -yum update -qqy - -# Install krb5 packages -yum install -qqy krb5-{server,libs,workstation} - -# Create kerberos database with stash file and garbage password -kdb5_util create -s -r $REALM_NAME -P zyxwvutsrpqonmlk9876 - -# Set up admin acls -cat << EOF > /etc/krb5kdc/kadm5.acl -*/admin@$REALM_NAME * -*@$REALM_NAME * -*/*@$REALM_NAME i -EOF - -# Create admin principal -kadmin.local -q "addprinc -pw elastic admin/admin@$REALM_NAME" -kadmin.local -q "ktadd -k /etc/admin.keytab admin/admin@$REALM_NAME" - -# Create a link so addprinc.sh is on path -ln -s /scripts/addprinc.sh /usr/bin/ diff --git a/testing/environments/docker/elasticsearch/pki/ca/ca.crt b/testing/environments/docker/elasticsearch/pki/ca/ca.crt deleted file mode 100644 index a18a84fd7b6..00000000000 --- a/testing/environments/docker/elasticsearch/pki/ca/ca.crt +++ /dev/null @@ -1,20 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDSjCCAjKgAwIBAgIVAOshUH7Va8Kh1QeA4KgLw8dI29M4MA0GCSqGSIb3DQEB -CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu -ZXJhdGVkIENBMB4XDTIwMDIwNzE2MzUzMFoXDTIzMDIwNjE2MzUzMFowNDEyMDAG -A1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5lcmF0ZWQgQ0Ew -ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCv7Oq3uE0kO5Ij41U9M7ee -xOtprdA3joR68B32/SQ1FW9Igk3f/DTn8MqlAFexwzGAONPNIcj44W9KhVeT9aFA -qCKfS2no+V9aKds6wyEHY3sAmYICEHBMDor9KhPnIc8m/gl3TcGMKmouoHqFPNKE -irilBDUO7rs5w46lcbxJrHTlEA6xyQLT7+sJ4DswO/xeoemPTBa7vzkoVUyZ50/D -VSUulY4XtmQvmbe4Aa0p8sgLNzFAJRl3XqZMECwO2iJ9jFwKCUT4EbFW4aTQtylI -CBax+Cn79vKpp3gO1WVu1cdcQW3+ciAJyUydTsCA2zjGYZyzL84z7eCHW946WQWD -AgMBAAGjUzBRMB0GA1UdDgQWBBQZKfuW0o2yabRo9tosWldK43GDbjAfBgNVHSME -GDAWgBQZKfuW0o2yabRo9tosWldK43GDbjAPBgNVHRMBAf8EBTADAQH/MA0GCSqG -SIb3DQEBCwUAA4IBAQAHeIJPwxvHTismlbFJKcCM3kr/ZblXguiFTmhqylqa8wFI -ke1xpTUrdfTAkD0ohmtPAUMPBkHeyHKzvxK7Blh230/lxybJNVSpfp7FQvj1EsmW -7FbIsKoj9MwJ2Lg5h6rnFA4t0bL3q74HV+vqpMoJDe92uX0GaSH/iYb+BfZ2El8m -QfANac0O+TE70i0++v/BzUAkqhJB3pG/3ziPzdFWlXf4iUG0YhMG4Ig5P/SvGz/V -MNc+uq3bh9xsNrtcm2S/pVdt/gdsujg9MTaoOr+maJPB/+LBrkZWtZcbUe++1+Z7 -32exp0eKNA0i90cc/Ayr79MOFDxdgI7baBnLPPa8 ------END CERTIFICATE----- diff --git a/testing/environments/docker/elasticsearch/pki/ca/ca.key b/testing/environments/docker/elasticsearch/pki/ca/ca.key deleted file mode 100644 index 48982cea36e..00000000000 --- a/testing/environments/docker/elasticsearch/pki/ca/ca.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEAr+zqt7hNJDuSI+NVPTO3nsTraa3QN46EevAd9v0kNRVvSIJN -3/w05/DKpQBXscMxgDjTzSHI+OFvSoVXk/WhQKgin0tp6PlfWinbOsMhB2N7AJmC -AhBwTA6K/SoT5yHPJv4Jd03BjCpqLqB6hTzShIq4pQQ1Du67OcOOpXG8Sax05RAO -sckC0+/rCeA7MDv8XqHpj0wWu785KFVMmedPw1UlLpWOF7ZkL5m3uAGtKfLICzcx -QCUZd16mTBAsDtoifYxcCglE+BGxVuGk0LcpSAgWsfgp+/byqad4DtVlbtXHXEFt -/nIgCclMnU7AgNs4xmGcsy/OM+3gh1veOlkFgwIDAQABAoIBAALZzfvoKqfZp0aZ -mnoBaopSGpZ90I/16UOsvG+SLpIFpOYB5o0ooxrXFhGSbdldlmHDifsa/wy5anpE -quSk6FYJ43W9XRv/XoIxh3HuU4yxGf8qfabW6VryKWJs2iG2tIqnNzQNuIMy9MGI -rDOYhrjLHq7d4JY7XCFVf+xCaZCwCb3yvZwVnrAqmPoeg2FrXmCVzqr1IpmwzJ0B -OfGWzi5THLm4/aGVUBfkvGURxsmwo3jGn0myr9oUkKczOKGEqvnlVuT9+ShURZp2 -tDU8zVRF0ksUNogUSfSNgWwpCYNBIqPOdxr7nT0+NEJ7b4R7/3LXEh/tRcuRNX+d -mjUMwbECgYEA/1MWpTdB9+9M7wyQasd29m1b0mkw+Jebdw+CuIK3XdPPGOfD17WO -sKZek3uK24DFGzRQf8tzHqzGvHncykAhb3oePVbfuhE5jt9bfgAOX8Naz6AK6Dmj -6+pJgXFTTNGL8JDojsIlabq4QH7oB02HoQ87GTr8IF4CjlJCHcyVB98CgYEAsGQO -uz0A1HdeuzbOP0+E86Ip03gcq66mVibXpy2qdMwEluxARW52XPKc8LKKI0QS4Qxk -giHHTQwPTLXJW9gM8v9/SQupQ/Vx8Zi3KjQ2ZAQoj6bGyDJ1P278GePJC4b0h/vG -F0sSUsmoEUGrLtq8Ofv3hDF6Ik247MQFi7i+Bt0CgYEAgP0kAqGw9SXzEw2g5CET -C5nh+qHj+KL3CqZOXxLCumcoSCfGe/KgPMRAIXgXhZ8/dOfwBy/sX8HfwRY7of3W -JnBmWIzMCD9tea2DlltG58BU33G2MO31z1iUfA2ZjMSMUyOSKZURu6F2Njcm15Gm -hIqiS7PN7jgwSGBsQIu7ercCgYEAh/nKJWrkbeVLgLTCD6okSpAzABLyvyJWlclB -q12Xrovr6dBbx2pdEk/wzdhEhuUeTKB6Bps1gV6PmMn2XLfTW6u8GrpDlODsIptg -b8dqOnW+MucVDBVhrzHGY8rmG93AOefMD/7ONEXCKvNdnDQAsA5eA2kExtb1fIer -4sbarn0CgYEAl1av+NOVduN1KrJXuZnNeN4KeNoYqJOS4s14Wk37GIujsrcE/m18 -BhZk0Al/oKZIDSuya5GGRhT+ndD8OWc4DEMWk2JnJdWKh20FfeM6UXVI46Sq3Won -vPDB6GYr45uRgtLvYeitLpXE5s9GmH8GyIV9om3TvDiceMXd/LbCodY= ------END RSA PRIVATE KEY----- diff --git a/testing/environments/docker/elasticsearch/pki/elasticsearchssl/elasticsearchssl.crt b/testing/environments/docker/elasticsearch/pki/elasticsearchssl/elasticsearchssl.crt deleted file mode 100644 index 4b373ea66a0..00000000000 --- a/testing/environments/docker/elasticsearch/pki/elasticsearchssl/elasticsearchssl.crt +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDKjCCAhKgAwIBAgIUZWu3nanhrFaNe6kMhtsPM4neUCYwDQYJKoZIhvcNAQEL -BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l -cmF0ZWQgQ0EwHhcNMjAwMjA3MTYzNjMxWhcNMjMwMjA2MTYzNjMxWjAbMRkwFwYD -VQQDExBlbGFzdGljc2VhcmNoc3NsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB -CgKCAQEAl09yaI0HI3I8DmJ0UyRDedFNBFOfsFh7sYGGElj0h4H1kt8oEA3uYIH/ -oPUQ9Mkn30m+qccdQC6/pz/ZgkCfOckXtX1PVLEAK9MEqEwj6UU4uMgSIUTjXN22 -m/YedSJFtwGiQqFbCD1LijRLjlDCvHZ1W5M6XYzWoUN1y4MDZSD755TuluAp277j -6yuJIEw5SsQ/Nw4Coaqexy1ha7G/y3L+3t4SFrXaBqe+nM1xPDR0Is/p8iTdcdlu -kEFmbIqDGAPx2jvTRWYikL3MmR4u58AoIk0WqeGmLefxzV6jC6zsQGRnpmtz3jye -XHRfodf3crMZm+mw6FNPk4PJzZSsXwIDAQABo00wSzAdBgNVHQ4EFgQUmcNplxkS -+zHt5LWVM67Tzws8fBEwHwYDVR0jBBgwFoAUGSn7ltKNsmm0aPbaLFpXSuNxg24w -CQYDVR0TBAIwADANBgkqhkiG9w0BAQsFAAOCAQEAPpFdJpuwBXqBZezfQTTQFkKm -EHz3UDYKA3nHt2tcmFqAEXYx4cXaor5GG9YLThGWUp2iBXIyIzUZnpkM2wl/pIlz -8fMFxvtS6hQ2VwFDHAo2ht8ay7/vTrKcVvNL5NtPHjRlHhT94XiwYNpneiB6EMGP -+lTxWXSLpSnl0AnFdpLzPpS6DiaMHAPChAbDGK9i76D13sQBJZ/lgQiMmntEWsTr -0NNsjBk2xjMQAYs/eJXfENkAxvuzJTbQdJ1kMOvybONT4Lw8UIhoRpRY7EspwlI3 -encLBhcxYJjpzSPqdDQQRVXd4zUNFe4595LKEsm14mXaTy682HAe/HvN+yO7qw== ------END CERTIFICATE----- diff --git a/testing/environments/docker/elasticsearch/pki/elasticsearchssl/elasticsearchssl.key b/testing/environments/docker/elasticsearch/pki/elasticsearchssl/elasticsearchssl.key deleted file mode 100644 index f374f10fa44..00000000000 --- a/testing/environments/docker/elasticsearch/pki/elasticsearchssl/elasticsearchssl.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAl09yaI0HI3I8DmJ0UyRDedFNBFOfsFh7sYGGElj0h4H1kt8o -EA3uYIH/oPUQ9Mkn30m+qccdQC6/pz/ZgkCfOckXtX1PVLEAK9MEqEwj6UU4uMgS -IUTjXN22m/YedSJFtwGiQqFbCD1LijRLjlDCvHZ1W5M6XYzWoUN1y4MDZSD755Tu -luAp277j6yuJIEw5SsQ/Nw4Coaqexy1ha7G/y3L+3t4SFrXaBqe+nM1xPDR0Is/p -8iTdcdlukEFmbIqDGAPx2jvTRWYikL3MmR4u58AoIk0WqeGmLefxzV6jC6zsQGRn -pmtz3jyeXHRfodf3crMZm+mw6FNPk4PJzZSsXwIDAQABAoIBAB1rHu1g7hBgN3j8 -f21i0ZOvs++xaozYx0Pd0PlkPjbSd7KUnK9yZfRxkgfzXdaZ/ZyWM/HCetdtv2l/ -KoT+l3aeuHNa57+pokTjBDbMhvbltH+Itq2tPR9jJAvysD1J6pAIS0n1IUPa1wMJ -497JqPMHfQ3O9DwYE+rKuO5WjKRulUrL8K3OgHndLiHPZuUfIveSd6qux7wAebmD -OpWukVvYoC2k//Bgopdyg9VxVZtTg1SZlyFZ8wteDrbgF+eDMp9uIRddrvMUCwH4 -+GJOzkXxgkeOANjr5obMRjrr5hwoCE+RObCXAT3lx+nfCvYY5Lb72WWPQPEJ5ltP -xuxYY/ECgYEA71+DxCSUpxaK6THpJ10Z4FlTV0YAFfnMx9Jecn4CaJpQrWYFjLB7 -zkhlJWWyzPMc56+5olfcMEXHO9dT1/w3lFlJmRaS4yu/ZdPf2E6Pi6eXpeRYshj9 -NIq/pMCB1XxNogGzQA0AFBc+vw6Tx7LG+Bz/Yafi4SQN89I9v2SaeiMCgYEAodIE -epMZmVhlmrVzjPKcYtqWu464Sb3sHBwgnxvKcU1NUAUjTuzI9DwrJYgrA9NBcgHq -ckwbqiHNcej4MGFk7nN98U47eb+p6PAPNde7q42iNz2q7pKlNVml+Eg/wC2lhNah -N6K6S4wvTM6ujNIZGQ3DyKQC0tCMu+LnPxYYcpUCgYEAi9E2nfLgAVjheqR0k1GG -M8z5KRjyI+PtASqXkDiaH49DYIUe6LaNGkifC+EDN0MptwqlW3YGXwvi+8kiaB4i -OLyOiKTu11JOUaQYM7hvkBssMPHX/O8rtuz0U78+FvysO9zSXq85RILvW5mgKBz8 -qyAE632sv+TXYXuEJa8VrBECgYEAmAmh6aSh7aDPPc90NJ6R7pMgAjKy1Z4a48JN -qBBNYazWkfNx3Cq/GDIb+9R3Tc3D9KD2LNNzPqMpyvevkI1BboSWdQ0i9l3s/w3l -zJnYGvQk0DAhlKu1i22icac4NpDsreWWbZZ34Jliq5CZEXgo2pBDPhVTDc2iHLmw -uWZCLA0CgYAG99zukAD9iq2MyGiyuJ8dYU0dDvyaKcSkM3pfsgA4TpSlZljDKnAH -1VVPGB3pOHUYbcxsD2/1AJBlplvq8OVcrURuSXYl9PtwczJBgfSNNtSkHvMirWzo -q7eEeYCCs/VZUr9mY0nuzysq3ltiBW6tsdCn6d89ogs2WvseTlHZLg== ------END RSA PRIVATE KEY----- diff --git a/testing/environments/docker/elasticsearch/pki/generate_pki.sh b/testing/environments/docker/elasticsearch/pki/generate_pki.sh deleted file mode 100644 index beb43d294ea..00000000000 --- a/testing/environments/docker/elasticsearch/pki/generate_pki.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh -# Take the certificates and create a DER format and create a sha256 of it and encode it to base 64 -# https://www.openssl.org/docs/manmaster/man1/dgst.html -openssl x509 -in ca/ca.crt -pubkey -noout | openssl pkey -pubin -outform der | openssl dgst -sha256 -binary | openssl enc -base64 diff --git a/testing/environments/docker/elasticsearch_kerberos/Dockerfile b/testing/environments/docker/elasticsearch_kerberos/Dockerfile deleted file mode 100644 index 59e5de735ad..00000000000 --- a/testing/environments/docker/elasticsearch_kerberos/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM docker.elastic.co/elasticsearch/elasticsearch:8.0.0-SNAPSHOT - -ADD scripts /scripts -ADD config /config -ADD healthcheck.sh /healthcheck.sh -ADD start.sh /start.sh - -ENV REALM_NAME ELASTIC -ENV KDC_NAME elasticsearch_kerberos.elastic -ENV BUILD_ZONE elastic -ENV ELASTIC_ZONE $BUILD_ZONE - -USER root -RUN /scripts/installkdc.sh && /scripts/addprincs.sh -USER elasticsearch diff --git a/testing/environments/docker/elasticsearch_kerberos/config/kdc.conf.template b/testing/environments/docker/elasticsearch_kerberos/config/kdc.conf.template deleted file mode 100644 index 0d32b8d411f..00000000000 --- a/testing/environments/docker/elasticsearch_kerberos/config/kdc.conf.template +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -[kdcdefaults] - kdc_listen = 1088 - kdc_tcp_listen = 1088 - -[realms] - ${REALM_NAME} = { - kadmind_port = 1749 - max_life = 12h 0m 0s - max_renewable_life = 7d 0h 0m 0s - master_key_type = aes256-cts - supported_enctypes = aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal - } - -[logging] - kdc = FILE:/var/log/krb5/krb5kdc.log - admin_server = FILE:/var/log/krb5/kadmin.log - default = FILE:/var/log/krb5/krb5lib.log diff --git a/testing/environments/docker/elasticsearch_kerberos/config/krb5.conf b/testing/environments/docker/elasticsearch_kerberos/config/krb5.conf deleted file mode 100644 index 1b34299558c..00000000000 --- a/testing/environments/docker/elasticsearch_kerberos/config/krb5.conf +++ /dev/null @@ -1,25 +0,0 @@ -[libdefaults] - default_realm = ELASTIC - dns_canonicalize_hostname = false - dns_lookup_kdc = false - dns_lookup_realm = false - dns_uri_lookup = false - forwardable = true - ignore_acceptor_hostname = true - rdns = false - default_tgs_enctypes = aes128-cts-hmac-sha1-96 - default_tkt_enctypes = aes128-cts-hmac-sha1-96 - permitted_enctypes = aes128-cts-hmac-sha1-96 - kdc_timeout = 3000 - -[realms] - ELASTIC = { - kdc = elasticsearch_kerberos.elastic:88 - admin_server = elasticsearch_kerberos.elastic:749 - default_domain = elastic - } - -[domain_realm] - .elastic = ELASTIC - elastic = ELASTIC - diff --git a/testing/environments/docker/elasticsearch_kerberos/config/krb5.conf.template b/testing/environments/docker/elasticsearch_kerberos/config/krb5.conf.template deleted file mode 100644 index 75245ab7733..00000000000 --- a/testing/environments/docker/elasticsearch_kerberos/config/krb5.conf.template +++ /dev/null @@ -1,43 +0,0 @@ -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -[libdefaults] - default_realm = ${REALM_NAME} - dns_canonicalize_hostname = false - dns_lookup_kdc = false - dns_lookup_realm = false - dns_uri_lookup = false - forwardable = true - ignore_acceptor_hostname = true - rdns = false - default_tgs_enctypes = aes128-cts-hmac-sha1-96 - default_tkt_enctypes = aes128-cts-hmac-sha1-96 - permitted_enctypes = aes128-cts-hmac-sha1-96 - udp_preference_limit = 1 - kdc_timeout = 3000 - -[realms] - ${REALM_NAME} = { - kdc = localhost:1088 - admin_server = localhost:1749 - default_domain = ${BUILD_ZONE} - } - -[domain_realm] - .${ELASTIC_ZONE} = ${REALM_NAME} - ${ELASTIC_ZONE} = ${REALM_NAME} - diff --git a/testing/environments/docker/elasticsearch_kerberos/healthcheck.sh b/testing/environments/docker/elasticsearch_kerberos/healthcheck.sh deleted file mode 100644 index a0932afaa94..00000000000 --- a/testing/environments/docker/elasticsearch_kerberos/healthcheck.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/sh - -# check if service principal is OK -KRB5_CONFIG=/etc/krb5.conf \ - kinit -k -t /etc/HTTP_elasticsearch_kerberos.elastic.keytab HTTP/elasticsearch_kerberos.elastic@ELASTIC - - -# check if beats user can connect -echo testing | KRB5_CONFIG=/etc/krb5.conf kinit beats@ELASTIC -klist -curl --negotiate -u : -XGET http://elasticsearch_kerberos.elastic:9200/ diff --git a/testing/environments/docker/elasticsearch_kerberos/scripts/addprinc.sh b/testing/environments/docker/elasticsearch_kerberos/scripts/addprinc.sh deleted file mode 100644 index 97493df7c51..00000000000 --- a/testing/environments/docker/elasticsearch_kerberos/scripts/addprinc.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/bash - -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -e - -if [[ $# -lt 1 ]]; then - echo 'Usage: addprinc.sh principalName [password]' - echo ' principalName user principal name without realm' - echo ' password If provided then will set password for user else it will provision user with keytab' - exit 1 -fi - -PRINC="$1" -PASSWD="$2" -USER=$(echo $PRINC | tr "/" "_") -REALM=ELASTIC - -VDIR=/usr/share/kerberos -BUILD_DIR=/var/build -LOCALSTATEDIR=/etc -LOGDIR=/var/log/krb5 - -ADMIN_PRIN=admin/admin@$REALM -ADMIN_KTAB=$LOCALSTATEDIR/admin.keytab - -USER_PRIN=$PRINC@$REALM -USER_KTAB=$LOCALSTATEDIR/$USER.keytab - -if [ -f $USER_KTAB ] && [ -z "$PASSWD" ]; then - echo "Principal '${PRINC}@${REALM}' already exists. Re-copying keytab..." - sudo cp $USER_KTAB $KEYTAB_DIR/$USER.keytab -else - if [ -z "$PASSWD" ]; then - echo "Provisioning '${PRINC}@${REALM}' principal and keytab..." - sudo kadmin -p $ADMIN_PRIN -kt $ADMIN_KTAB -q "addprinc -randkey $USER_PRIN" - sudo kadmin -p $ADMIN_PRIN -kt $ADMIN_KTAB -q "ktadd -k $USER_KTAB $USER_PRIN" - sudo chmod 777 $USER_KTAB - sudo cp $USER_KTAB /usr/share/elasticsearch/config - sudo chown elasticsearch:elasticsearch /usr/share/elasticsearch/config/$USER.keytab - else - echo "Provisioning '${PRINC}@${REALM}' principal with password..." - sudo kadmin -p $ADMIN_PRIN -kt $ADMIN_KTAB -q "addprinc -pw $PASSWD $PRINC" - fi -fi - -echo "Done provisioning $USER" diff --git a/testing/environments/docker/elasticsearch_kerberos/scripts/addprincs.sh b/testing/environments/docker/elasticsearch_kerberos/scripts/addprincs.sh deleted file mode 100644 index 7ee85889f0d..00000000000 --- a/testing/environments/docker/elasticsearch_kerberos/scripts/addprincs.sh +++ /dev/null @@ -1,7 +0,0 @@ -set -e - -krb5kdc -kadmind - -addprinc.sh HTTP/elasticsearch_kerberos.elastic -addprinc.sh beats testing diff --git a/testing/environments/docker/elasticsearch_kerberos/scripts/installkdc.sh b/testing/environments/docker/elasticsearch_kerberos/scripts/installkdc.sh deleted file mode 100644 index 50ab0ff0a6a..00000000000 --- a/testing/environments/docker/elasticsearch_kerberos/scripts/installkdc.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/bash - -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -e - -LOCALSTATEDIR=/etc -KDC_CONFIG=/var/kerberos -LOGDIR=/var/log/krb5 - -#MARKER_FILE=/etc/marker - -# Transfer and interpolate krb5.conf -cp /config/krb5.conf.template $LOCALSTATEDIR/krb5.conf -sed -i 's/${REALM_NAME}/'$REALM_NAME'/g' $LOCALSTATEDIR/krb5.conf -sed -i 's/${KDC_NAME}/'$KDC_NAME'/g' $LOCALSTATEDIR/krb5.conf -sed -i 's/${BUILD_ZONE}/'$BUILD_ZONE'/g' $LOCALSTATEDIR/krb5.conf -sed -i 's/${ELASTIC_ZONE}/'$ELASTIC_ZONE'/g' $LOCALSTATEDIR/krb5.conf - - -# Transfer and interpolate the kdc.conf -mkdir -p $KDC_CONFIG/krb5kdc -cp /config/kdc.conf.template $KDC_CONFIG/krb5kdc/kdc.conf -sed -i 's/${REALM_NAME}/'$REALM_NAME'/g' $KDC_CONFIG/krb5kdc/kdc.conf -sed -i 's/${KDC_NAME}/'$KDC_NAME'/g' $KDC_CONFIG/krb5kdc/kdc.conf -sed -i 's/${BUILD_ZONE}/'$BUILD_ZONE'/g' $KDC_CONFIG/krb5kdc/kdc.conf -sed -i 's/${ELASTIC_ZONE}/'$ELASTIC_ZONE'/g' $LOCALSTATEDIR/krb5.conf - -# Touch logging locations -mkdir -p $LOGDIR -touch $LOGDIR/kadmin.log -touch $LOGDIR/krb5kdc.log -touch $LOGDIR/krb5lib.log - -# Update package manager -yum update -qqy - -# Install krb5 packages -yum install -qqy krb5-{server,libs,workstation} sudo - -# Create kerberos database with stash file and garbage password -kdb5_util create -s -r $REALM_NAME -P zyxwvutsrpqonmlk9876 - -# Set up admin acls -cat << EOF > /var/kerberos/krb5kdc/kadm5.acl -*/admin@$REALM_NAME * -*@$REALM_NAME * -*/*@$REALM_NAME i -EOF - -# Create admin principal -kadmin.local -q "addprinc -pw elastic admin/admin@$REALM_NAME" -kadmin.local -q "ktadd -k /etc/admin.keytab admin/admin@$REALM_NAME" - -# set ownership for ES -chown -R elasticsearch:elasticsearch $LOGDIR -chown -R elasticsearch:elasticsearch $KDC_CONFIG -chown -R elasticsearch:elasticsearch $LOCALSTATEDIR/krb5.conf -chown -R elasticsearch:elasticsearch $LOCALSTATEDIR/admin.keytab - - -# Create a link so addprinc.sh is on path -ln -s /scripts/addprinc.sh /usr/bin/ diff --git a/testing/environments/docker/elasticsearch_kerberos/start.sh b/testing/environments/docker/elasticsearch_kerberos/start.sh deleted file mode 100644 index 522f6c20474..00000000000 --- a/testing/environments/docker/elasticsearch_kerberos/start.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh - -# start Kerberos services -krb5kdc -kadmind - -# start ES -/usr/local/bin/docker-entrypoint.sh eswrapper diff --git a/testing/environments/docker/kafka/Dockerfile b/testing/environments/docker/kafka/Dockerfile deleted file mode 100644 index 24bf0996193..00000000000 --- a/testing/environments/docker/kafka/Dockerfile +++ /dev/null @@ -1,26 +0,0 @@ -FROM debian:stretch - -ENV KAFKA_HOME /kafka -# The advertised host is kafka. This means it will not work if container is started locally and connected from localhost to it -ENV KAFKA_ADVERTISED_HOST kafka -ENV KAFKA_LOGS_DIR="/kafka-logs" -ENV KAFKA_VERSION 2.2.2 -ENV _JAVA_OPTIONS "-Djava.net.preferIPv4Stack=true" -ENV TERM=linux - -RUN apt-get update && apt-get install -y curl openjdk-8-jre-headless netcat - -RUN mkdir -p ${KAFKA_LOGS_DIR} && mkdir -p ${KAFKA_HOME} && curl -s -o $INSTALL_DIR/kafka.tgz \ - "http://mirror.easyname.ch/apache/kafka/${KAFKA_VERSION}/kafka_2.11-${KAFKA_VERSION}.tgz" && \ - tar xzf ${INSTALL_DIR}/kafka.tgz -C ${KAFKA_HOME} --strip-components 1 - -ADD run.sh /run.sh -ADD healthcheck.sh /healthcheck.sh - -EXPOSE 9092 -EXPOSE 2181 - -# Healthcheck creates an empty topic foo. As soon as a topic is created, it assumes broke is available -HEALTHCHECK --interval=1s --retries=600 CMD /healthcheck.sh - -ENTRYPOINT ["/run.sh"] diff --git a/testing/environments/docker/kafka/healthcheck.sh b/testing/environments/docker/kafka/healthcheck.sh deleted file mode 100644 index feebbb8786d..00000000000 --- a/testing/environments/docker/kafka/healthcheck.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -TOPIC="foo-`date '+%s-%N'`" - -${KAFKA_HOME}/bin/kafka-topics.sh --zookeeper=127.0.0.1:2181 --create --partitions 1 --topic "${TOPIC}" --replication-factor 1 -rc=$? -if [[ $rc != 0 ]]; then - exit $rc -fi - -${KAFKA_HOME}/bin/kafka-topic.sh --zookeeper=127.0.0.1:2181 --delete --topic "${TOPIC}" -exit 0 diff --git a/testing/environments/docker/kafka/run.sh b/testing/environments/docker/kafka/run.sh deleted file mode 100644 index 873f6951acc..00000000000 --- a/testing/environments/docker/kafka/run.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -wait_for_port() { - count=20 - port=$1 - while ! nc -z localhost $port && [[ $count -ne 0 ]]; do - count=$(( $count - 1 )) - [[ $count -eq 0 ]] && return 1 - sleep 0.5 - done - # just in case, one more time - nc -z localhost $port -} - -echo "Starting ZooKeeper" -${KAFKA_HOME}/bin/zookeeper-server-start.sh ${KAFKA_HOME}/config/zookeeper.properties & -wait_for_port 2181 - -echo "Starting Kafka broker" -mkdir -p ${KAFKA_LOGS_DIR} -${KAFKA_HOME}/bin/kafka-server-start.sh ${KAFKA_HOME}/config/server.properties \ - --override delete.topic.enable=true --override advertised.host.name=${KAFKA_ADVERTISED_HOST} \ - --override listeners=PLAINTEXT://0.0.0.0:9092 \ - --override logs.dir=${KAFKA_LOGS_DIR} --override log.flush.interval.ms=200 \ - --override num.partitions=3 & - -wait_for_port 9092 - -echo "Kafka load status code $?" - -# Make sure the container keeps running -tail -f /dev/null diff --git a/testing/environments/docker/kerberos_kdc/Dockerfile b/testing/environments/docker/kerberos_kdc/Dockerfile deleted file mode 100644 index 629fbaebcd5..00000000000 --- a/testing/environments/docker/kerberos_kdc/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM ubuntu:14.04 -ADD scripts /scripts - -ENV REALM_NAME ELASTIC -ENV KDC_NAME kerberos_kdc -ENV BUILD_ZONE elastic -ENV ELASTIC_ZONE $BUILD_ZONE - -RUN echo kerberos_kdc.elastic > /etc/hostname && echo "127.0.0.1 kerberos_kdc.elastic" >> /etc/hosts -RUN bash /scripts/installkdc.sh - -EXPOSE 88 -EXPOSE 749 - -CMD sleep infinity diff --git a/testing/environments/docker/logstash/gencerts.sh b/testing/environments/docker/logstash/gencerts.sh deleted file mode 100644 index a04742a7672..00000000000 --- a/testing/environments/docker/logstash/gencerts.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -mkdir -p pki/tls/certs -mkdir -p pki/tls/private -openssl req -subj '/CN=logstash/' -x509 -days $((100 * 365)) -batch -nodes -newkey rsa:2048 -keyout pki/tls/private/logstash.key -out pki/tls/certs/logstash.crt diff --git a/testing/environments/docker/logstash/pipeline/default.conf b/testing/environments/docker/logstash/pipeline/default.conf deleted file mode 100644 index 08edff764bf..00000000000 --- a/testing/environments/docker/logstash/pipeline/default.conf +++ /dev/null @@ -1,24 +0,0 @@ -input { - beats { - port => 5044 - ssl => false - } - - beats { - port => 5055 - ssl => true - ssl_certificate => "/etc/pki/tls/certs/logstash.crt" - ssl_key => "/etc/pki/tls/private/logstash.key" - } -} - - -output { - elasticsearch { - hosts => ["${ES_HOST:elasticsearch}:${ES_PORT:9200}"] - index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" - } - - # Used for easier debugging - #stdout { codec => rubydebug { metadata => true } } -} diff --git a/testing/environments/docker/logstash/pki/tls/certs/logstash.crt b/testing/environments/docker/logstash/pki/tls/certs/logstash.crt deleted file mode 100644 index 1b18ba84a20..00000000000 --- a/testing/environments/docker/logstash/pki/tls/certs/logstash.crt +++ /dev/null @@ -1,18 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIC+zCCAeOgAwIBAgIJALOvd7vXvRrFMA0GCSqGSIb3DQEBCwUAMBMxETAPBgNV -BAMMCGxvZ3N0YXNoMCAXDTE2MDgyNjEyMzMyNFoYDzIxMTYwODAyMTIzMzI0WjAT -MREwDwYDVQQDDAhsb2dzdGFzaDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC -ggEBAKw5gLdWfqG9eraHrAAfSn2NbemYq32YZgbwJGaM9SlY3DDHB1MgBKBjpzPW -FavMO4xaDcfFhZbBJXwCVjPJe3ORQeoHgm3hG2er6JtCXlt3vto8FVbs9H4jd3+U -gH4cNdomgtYh3lBobZFKOa/+mZvjQxsK71KM2Gwk4b5gnV9iLaXzAGRWmY1dlHkE -Gki4WGNg0FlGf7aDJXZK2Yyq8MmiMfUEIZ2sDRjO3f/rCLdz3amG4gJtDllekz5l -lUTLccvtTWstJiKIx1zIAUEvTqaqInjMiJkjQtwazlc9w5ofmauxI6bb9L3L1ZJX -rrt+u5mg8Mc/w63+GuS8ZETbAacCAwEAAaNQME4wHQYDVR0OBBYEFA9Ug44w4XmN -r0z225Zt1zjjrKtoMB8GA1UdIwQYMBaAFA9Ug44w4XmNr0z225Zt1zjjrKtoMAwG -A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBABwl9KCqg69dv2XNZ95VCdk7 -mAT0LcUbd0FyrzDibNolzx9OlymWYiIxe86KdZsWzgBUcm9Q3Gg+TzAs7UyyfqSp -LR5fgGGIz9PCuuoFBdZCppPL9Y3Dryi91lPXveDUh5zIemOU9Jf6Ni0XVrRsO9C8 -aoY7SLtl1W7du3Nm+ZFH8T0wCcBFaYttmHejyu311ZDyAF0suu6Qu8NAWFrr5QGe -hA8VcImc335VQntT9EcztHhoyt1aW96BxLU9L4kdSZLJ6FVZrGij7IpZNipUQB8p -bPEL9KuQUDHKjoCx2YaNZqmuZ73m6u84TiTxgDYgChSfYASRXyCq90rQrQHVF74= ------END CERTIFICATE----- diff --git a/testing/environments/docker/logstash/pki/tls/private/logstash.key b/testing/environments/docker/logstash/pki/tls/private/logstash.key deleted file mode 100644 index 9d3234d202e..00000000000 --- a/testing/environments/docker/logstash/pki/tls/private/logstash.key +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCsOYC3Vn6hvXq2 -h6wAH0p9jW3pmKt9mGYG8CRmjPUpWNwwxwdTIASgY6cz1hWrzDuMWg3HxYWWwSV8 -AlYzyXtzkUHqB4Jt4Rtnq+ibQl5bd77aPBVW7PR+I3d/lIB+HDXaJoLWId5QaG2R -Sjmv/pmb40MbCu9SjNhsJOG+YJ1fYi2l8wBkVpmNXZR5BBpIuFhjYNBZRn+2gyV2 -StmMqvDJojH1BCGdrA0Yzt3/6wi3c92phuICbQ5ZXpM+ZZVEy3HL7U1rLSYiiMdc -yAFBL06mqiJ4zIiZI0LcGs5XPcOaH5mrsSOm2/S9y9WSV667fruZoPDHP8Ot/hrk -vGRE2wGnAgMBAAECggEAArbxUXJ6koATFBd1XZcgrHPzPJBce6FQUmGsoTUtlBZD -ej1Y3zWM/R40/3srYkbY1XCB8Rkq7uJifd7nju9pE7xBZrfxlVvL+8lY5EGajSSJ -DJWP3Ivlmqticc9cayB0tNiQjWGBSJEs0PJzkFOaBjwBzcZRWWLA8otuR3rsYBl8 -cb7dV3HV4Z50Qto1ABoUWH2DGz7nX9HCr/SR1ayR1hWHCwv2Q4KQ5wJkmTKmaSNZ -I2464JXvufM9XiV9Fjy4RdiCN3sVXQcUIJ1hY+qGXsR0DUc5lOmw9Eu4SbJgdExR -EWoX4BqJuHrjCeKRF6rsDf5ocAS2cxATbQr1mEbW0QKBgQDWmZMO9TtL+pmJNmoP -g+HzgopBnMLxctcjVOEysuWgZyWYz9sFbCj6Udp2Q/9hjoVYRba3IXEHSsA2mdcY -KKcWbjEOYE+xL6oDXiZRkiJ+Poix9dOnTBg+lt2SKjphuNWnLe3jqfQhZxfV40Nf -60Wx6NGC7Dzlf+pAmkOA12BX2QKBgQDNcyTwnShbVrWzNRpzRe7RSHaAq6jSu1Yi -6dY/8bWTInVhPjB3xUGL0ckiMpDMoi0mxtnBmahvK59GPj3jhz+9HZqG6dSS6Fok -eS104GM7pCWyf66Rd9k8xu5IdrMM9Sveu24s21jgOJDtZtAplP1hsDMxxuaAEDVv -c0RwoKu1fwKBgDL0SheuIMM8oIIU+n/ul5LjNwK3Pw5nby/DcqlAEwfQFfw/tkiG -UwCEuPOF17iJR54bB3RaK2VI2XTdeFYTKQFJbrp0Idf3ck3UaBLMOQZywLBIp1W8 -2rDZz4hqIGydn5VPcYGyE/ZubRlrGc9HpMfGeSC2CQuRIMTwHAEWopiZAoGAZpKm -Trsn+vI/pUlN+19e7H4RLAyILS36w1Ob9DDpRpxdnj8+U43YO8ZxdPFp+cC+ai29 -ajsdLOPKkXdhzscnu3OcQt9bkj0PREZ7u26MHKrHZ2b38Qi1HPL05JjerAl77agG -Sb75kHitYtmB9EC+gJdH+AIl8qolA4+5C8Ir+GECgYBJgkoapPrGgSzCWHH6WpVZ -wrpZJ/rj6685J9K/ji3nHXj7gS4MzEzrtVK2K3aFjIjQe/zXJDI+dxrSCNfYFuQD -YJh+siGPF7Gosyht3ec0vUg7gY7Do1FzwL2H/OxvnaNEO+PPidXGOu/wHV5fMZJ1 -O2aLO5ZIygL6YcVr+vdm1A== ------END PRIVATE KEY----- diff --git a/testing/environments/docker/mosquitto/Dockerfile b/testing/environments/docker/mosquitto/Dockerfile deleted file mode 100644 index eac5d1e0d6c..00000000000 --- a/testing/environments/docker/mosquitto/Dockerfile +++ /dev/null @@ -1,2 +0,0 @@ -FROM eclipse-mosquitto:1.6.8 -HEALTHCHECK --interval=1s --retries=600 CMD nc -z localhost 1883 diff --git a/testing/environments/docker/redis/Dockerfile b/testing/environments/docker/redis/Dockerfile deleted file mode 100644 index 6f777f0e630..00000000000 --- a/testing/environments/docker/redis/Dockerfile +++ /dev/null @@ -1,2 +0,0 @@ -FROM redis:5.0.8-alpine -HEALTHCHECK --interval=1s --retries=600 CMD nc -z localhost 6379 diff --git a/testing/environments/docker/sredis/Dockerfile b/testing/environments/docker/sredis/Dockerfile deleted file mode 100644 index 5abbc29468b..00000000000 --- a/testing/environments/docker/sredis/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM alpine:edge - -RUN apk add --no-cache stunnel - -COPY stunnel.conf /etc/stunnel/stunnel.conf -COPY pki /etc/pki - -RUN chmod 600 /etc/stunnel/stunnel.conf; \ - chmod 600 /etc/pki/tls/certs/*; \ - chmod 600 /etc/pki/tls/private/*; - -HEALTHCHECK --interval=1s --retries=600 CMD nc -z localhost 6380 -EXPOSE 6380 - -CMD ["stunnel"] - diff --git a/testing/environments/docker/sredis/gencerts.sh b/testing/environments/docker/sredis/gencerts.sh deleted file mode 100644 index 8617695a156..00000000000 --- a/testing/environments/docker/sredis/gencerts.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -mkdir -p pki/tls/certs -mkdir -p pki/tls/private -openssl req -subj '/CN=sredis/' -x509 -days $((100 * 365)) -batch -nodes -newkey rsa:2048 -keyout pki/tls/private/sredis.key -out pki/tls/certs/sredis.crt diff --git a/testing/environments/docker/sredis/pki/tls/certs/sredis.crt b/testing/environments/docker/sredis/pki/tls/certs/sredis.crt deleted file mode 100644 index e1fbc7211e2..00000000000 --- a/testing/environments/docker/sredis/pki/tls/certs/sredis.crt +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDGTCCAgGgAwIBAgIJAPEol/xrYMpMMA0GCSqGSIb3DQEBBQUAMBExDzANBgNV -BAMTBnNyZWRpczAgFw0xNjA0MDgyMjQxMzBaGA8yMTE2MDMxNTIyNDEzMFowETEP -MA0GA1UEAxMGc3JlZGlzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA -ygmNOjJ6r77/r/7FVykKynMTmUElFdspdVKYPB+iPbBU+Xwn3FdlzVvYBGAX+5kD -XLP9OX0qR18EUrBf0xsux0sEjvoydD7e9ZPoRn/PpD8T3mObtDOKe4UCY0j67Qj4 -uKT9dhqO6Gw/gVmktydhi3OHeCEHEb7sf3ydwBvradegVNFQZyjxD3x8N6b82roN -fZmVpYPoJaRI2UR+A8EbS95Rl7HjpxXrtchl/Fw+k3wS5OXHu36HdoAzkC6Hw3nV -MoKaqCSAuyVwGzd+csbp1+6S0TNZgfpHKjuSI9maYZqetJLA4rqg+hM1PbKkB605 -VjeKgNtLScjbrcXYfUBGQwIDAQABo3IwcDAdBgNVHQ4EFgQUYHI4wROpICq6l/sz -p8iNVUE8ySIwQQYDVR0jBDowOIAUYHI4wROpICq6l/szp8iNVUE8ySKhFaQTMBEx -DzANBgNVBAMTBnNyZWRpc4IJAPEol/xrYMpMMAwGA1UdEwQFMAMBAf8wDQYJKoZI -hvcNAQEFBQADggEBAFOc8jV5VKGIFt09ianhYYpr/1kld9o7zlzjHfyQbRTBvvYt -Ni22j3fshECZC9dBMuQObLpxtDAcJcncgl2LRCLcJSab/aa8jjH8qb+An4mbwdYA -bCNyItHVmPteDFWJgwSo/YHb6xpZ26fN8bi65RoUbsLtx14/wFiiEIO+rQ/20Pzi -3lOgGM7LXmYWYRhUd+LfBpNGWihZ3QL+ZkpsT6R4aFLwuWGEmGAPsgHyiOeMoR78 -0eYVnoY2oqTYARC/o+e2pCk6GWTycgSygwNRojH3ago1k5FMDk3rLWDOX0RNl9xj -A9qPE3tfnN1/Do5WsunIKuQNXmb16yQwz/AeHCM= ------END CERTIFICATE----- diff --git a/testing/environments/docker/sredis/pki/tls/private/sredis.key b/testing/environments/docker/sredis/pki/tls/private/sredis.key deleted file mode 100644 index 5f0e7c877ce..00000000000 --- a/testing/environments/docker/sredis/pki/tls/private/sredis.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAygmNOjJ6r77/r/7FVykKynMTmUElFdspdVKYPB+iPbBU+Xwn -3FdlzVvYBGAX+5kDXLP9OX0qR18EUrBf0xsux0sEjvoydD7e9ZPoRn/PpD8T3mOb -tDOKe4UCY0j67Qj4uKT9dhqO6Gw/gVmktydhi3OHeCEHEb7sf3ydwBvradegVNFQ -ZyjxD3x8N6b82roNfZmVpYPoJaRI2UR+A8EbS95Rl7HjpxXrtchl/Fw+k3wS5OXH -u36HdoAzkC6Hw3nVMoKaqCSAuyVwGzd+csbp1+6S0TNZgfpHKjuSI9maYZqetJLA -4rqg+hM1PbKkB605VjeKgNtLScjbrcXYfUBGQwIDAQABAoIBAG4ZWm72h0kyqp+8 -FMp0wT6mC24exBju/97Bjdhl3MIFT6lNcWNv9Tg97rAjta4UKnLgWwRzIxEVxINT -PkUKsSlFxkwsKEaU5GernI6epAb7oNY2Lem7lKHPUAfPA38cvn3Q25b6zhn2s3zH -3y04Nr4JzS83wGR5SOQIgubn0BgywvfuoeCUKj8BJ36vy6Dy/yIcquERwXnRzMHL -Q9oCrVAE86Dm1tQYHi5ougjSt3oKHIaUNC+PNlswiVEu+9MWhpdzBHbiEJkj3Y6H -XAkFFXqaf7Cc4RYVlGKvonjdSNk4k0zZIWmbGSOydCYlIuAhomvJ2Ar9xPZtAv/y -7zuUoBECgYEA8pLp2q82m+OEHmZOv573efBxpJxqTiIK61DMchljlhJNm6j1fwqJ -IqWoM+HTlrnA9rSA0lg900ASpp7H7IMiIr1MMBUr0JPTbqtENp3pHOOBe7gWuW5M -JtH/UyNFj/o7ewKpnDioDTW8CC2sKxKymwOOooX/hPN6PCKUTXl1yX0CgYEA1ThE -Fjs43EgAZ8v88c5I9mfBKKlb36IeYtN2A2tbnlnggg0cXGPmsxT6rMfwJVuXIwIR -tAfzSh9az0OLahP0mAWx2dK0vRcpA3+2piy9IAGSkA7qLb1M4ZCef14qixEZ3RRT -NtoCRY7IohWBvc3heAyymT1vz09ltV7o5Ln9Wr8CgYBWLJ7rU2eBBdh/vDSpml2s -ciDNK8wQKcPbSP74YdChFauAawubsDB7oIIavFUgrKjCe+lv0G1WSOLXUn0Ppp5P -3RPd2QeRt5JbNHitNngEDUaInyNjiK2A9QVRkaw6s9jBoHaEyxPYbYh5F1CclK3i -p+baEeRuZNi92EL7KvUPOQKBgAewFqxiiENRCUq0zTL+yByyUwGfUaO3mbbgfwHS -jiQgg3rM9DfUlk3gtMUkFGGUctedTHwcSUZj6QdOHSm+/HO6yNXcxg2HV0A8C57k -QBF2XQ16rDDoAWykpUJcZ9ZJ0I/rGrEAnVJaups1gU8RrE6CzrG9yFlGOJmYiXct -yZfLAoGBAJ/z+CIO9wbg8Ix0VzFFgM2W2Lt2gxmJH1jcdv6GrRbpIovAisImymNw -lqLrATxthc8PlOO1qUwHPBjeoIpZE2H1VqYMItujtdLm9UyJz3VZe7RcIzw//8KF -wndGFmWXt6Ztdlbkb29hZ7prjftZSpeuLo3vf3+BHueLPVHKGnda ------END RSA PRIVATE KEY----- diff --git a/testing/environments/docker/sredis/stunnel.conf b/testing/environments/docker/sredis/stunnel.conf deleted file mode 100644 index 3d07f0be6c1..00000000000 --- a/testing/environments/docker/sredis/stunnel.conf +++ /dev/null @@ -1,7 +0,0 @@ -foreground=yes - -[redis] -accept=:::6380 -connect=redis:6379 -key=/etc/pki/tls/private/sredis.key -cert=/etc/pki/tls/certs/sredis.crt diff --git a/testing/environments/docker/test.env b/testing/environments/docker/test.env deleted file mode 100644 index 5856225ceaf..00000000000 --- a/testing/environments/docker/test.env +++ /dev/null @@ -1,2 +0,0 @@ -ES_HOST=elasticsearch -ES_PORT=9200 diff --git a/testing/environments/latest.yml b/testing/environments/latest.yml deleted file mode 100644 index 2394f49f402..00000000000 --- a/testing/environments/latest.yml +++ /dev/null @@ -1,33 +0,0 @@ -# This is the latest released environment. - -version: '2.3' -services: - elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:7.7.0 - healthcheck: - test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] - retries: 300 - interval: 1s - environment: - - "ES_JAVA_OPTS=-Xms1g -Xmx1g" - - "network.host=" - - "transport.host=127.0.0.1" - - "http.host=0.0.0.0" - - "xpack.security.enabled=false" - - logstash: - image: docker.elastic.co/logstash/logstash:7.7.0 - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] - retries: 300 - interval: 1s - volumes: - - ./docker/logstash/pipeline:/usr/share/logstash/pipeline:ro - - ./docker/logstash/pki:/etc/pki:ro - - kibana: - image: docker.elastic.co/kibana/kibana:7.7.0 - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:5601"] - retries: 300 - interval: 1s diff --git a/testing/environments/local.yml b/testing/environments/local.yml deleted file mode 100644 index 7d588a82987..00000000000 --- a/testing/environments/local.yml +++ /dev/null @@ -1,33 +0,0 @@ -# Defines if ports should be exported. -# This is useful for testing locally with a full elastic stack setup. -# All services can be reached through localhost like localhost:5601 for Kibana -# This is not used for CI as otherwise ports conflicts could happen. -version: '2.3' -services: - kibana: - ports: - - "127.0.0.1:5601:5601" - - elasticsearch: - ports: - - "127.0.0.1:9200:9200" - - logstash: - ports: - - "127.0.0.1:5044:5044" - - "127.0.0.1:5055:5055" - - "127.0.0.1:9600:9600" - depends_on: - elasticsearch: - condition: service_healthy - - # Makes sure containers keep running for manual testing - beat: - build: . - depends_on: - elasticsearch: - condition: service_healthy - kibana: - condition: service_healthy - logstash: - condition: service_healthy diff --git a/testing/environments/snapshot-oss.yml b/testing/environments/snapshot-oss.yml deleted file mode 100644 index 371493305cd..00000000000 --- a/testing/environments/snapshot-oss.yml +++ /dev/null @@ -1,34 +0,0 @@ -# This should start the environment with the latest snapshots. - -version: '2.3' -services: - elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch-oss:8.0.0-SNAPSHOT - healthcheck: - test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] - retries: 300 - interval: 1s - environment: - - "ES_JAVA_OPTS=-Xms1g -Xmx1g" - - "network.host=" - - "transport.host=127.0.0.1" - - "http.host=0.0.0.0" - - logstash: - image: docker.elastic.co/logstash/logstash-oss:8.0.0-SNAPSHOT - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] - retries: 600 - interval: 1s - volumes: - - ./docker/logstash/pipeline:/usr/share/logstash/pipeline:ro - - ./docker/logstash/pki:/etc/pki:ro - - kibana: - image: docker.elastic.co/kibana/kibana-oss:8.0.0-SNAPSHOT - healthcheck: - test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status | grep -q 'Looking good'"] - retries: 600 - interval: 1s - - diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml deleted file mode 100644 index 9d3555d7854..00000000000 --- a/testing/environments/snapshot.yml +++ /dev/null @@ -1,34 +0,0 @@ -# This should start the environment with the latest snapshots. - -version: '2.3' -services: - elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.0.0-SNAPSHOT - healthcheck: - test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] - retries: 300 - interval: 1s - environment: - - "ES_JAVA_OPTS=-Xms1g -Xmx1g" - - "network.host=" - - "transport.host=127.0.0.1" - - "http.host=0.0.0.0" - - "xpack.security.enabled=false" - - "indices.id_field_data.enabled=true" - - logstash: - image: docker.elastic.co/logstash/logstash@sha256:e01cf165142edf8d67485115b938c94deeda66153e9516aa2ce69ee417c5fc33 - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] - retries: 600 - interval: 1s - volumes: - - ./docker/logstash/pipeline:/usr/share/logstash/pipeline:ro - - ./docker/logstash/pki:/etc/pki:ro - - kibana: - image: docker.elastic.co/kibana/kibana:8.0.0-SNAPSHOT - healthcheck: - test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status | grep -q 'Looking good'"] - retries: 600 - interval: 1s diff --git a/tests/Dockerfile b/tests/Dockerfile index 8c395a87935..7429e5273d3 100644 --- a/tests/Dockerfile +++ b/tests/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.14.7 +FROM golang:1.16.6 MAINTAINER Nicolas Ruflin RUN apt-get update @@ -24,10 +24,12 @@ RUN touch -d "Yesterday" $PYTHON_ENV/build/ve/linux/bin/activate # Download module dependencies. WORKDIR $HOME -COPY go.mod go.sum ./ -COPY approvaltest/go.mod approvaltest/go.sum ./approvaltest/ -COPY systemtest/go.mod systemtest/go.sum ./systemtest/ -RUN go mod download +COPY --chown=$UID go.mod go.sum ./ +COPY --chown=$UID approvaltest/go.mod approvaltest/go.sum ./approvaltest/ +COPY --chown=$UID systemtest/go.mod systemtest/go.sum ./systemtest/ +COPY --chown=$UID internal/otel_collector/go.mod internal/otel_collector/go.sum ./internal/otel_collector/ +COPY --chown=$UID internal/glog/go.mod internal/glog/go.sum ./internal/glog/ +RUN go mod download all RUN cd approvaltest && go mod download RUN cd systemtest && go mod download diff --git a/tests/common.go b/tests/common.go index ccca25f5524..9c7bc35b636 100644 --- a/tests/common.go +++ b/tests/common.go @@ -17,60 +17,7 @@ package tests -import ( - "strings" - "testing" - - "github.com/stretchr/testify/assert" -) - -type group struct { - str string -} - -func Group(s string) group { - return group{str: s} -} - -// StringPtr is a test helper function that returns the address of the given string -func StringPtr(s string) *string { - return &s -} - // IntPtr is a test helper function that returns the address of the given integer func IntPtr(i int) *int { return &i } - -func strConcat(pre string, post string, delimiter string) string { - if pre == "" { - return post - } - return pre + delimiter + post -} - -func differenceWithGroup(s1 *Set, s2 *Set) *Set { - s := Difference(s1, s2) - - for _, e2 := range s2.Array() { - if e2Grp, ok := e2.(group); !ok { - continue - } else { - for _, e1 := range s1.Array() { - if e1Str, ok := e1.(string); ok { - if strings.HasPrefix(e1Str, e2Grp.str) { - s.Remove(e1) - } - } - } - - } - } - return s -} - -func assertEmptySet(t *testing.T, s *Set, msg string) { - if s.Len() > 0 { - assert.Fail(t, msg) - } -} diff --git a/tests/common_test.go b/tests/common_test.go deleted file mode 100644 index 80a21ad54c6..00000000000 --- a/tests/common_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package tests - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestDifferenceWithRegex(t *testing.T) { - for idx, d := range []struct { - s1, s2, diff *Set - }{ - {nil, nil, nil}, - {NewSet("a.b.c", 2, 3), nil, NewSet("a.b.c", 2, 3)}, - {nil, NewSet("a.b.c", 2, 3), nil}, - {NewSet("a.b.c", 2), NewSet(2), NewSet("a.b.c")}, - {NewSet("a.b.c", 2), NewSet(Group("b")), NewSet("a.b.c", 2)}, - {NewSet("a.b.c", "ab", "a.c", "a.b.", 3), NewSet(Group("a.b.")), NewSet("ab", "a.c", 3)}, - {NewSet("a.b.c", "a", "a.b", 3), NewSet(Group("a.b")), NewSet("a", 3)}, - {NewSet("a.b.c", "a", "a.b", 3), NewSet("a.b"), NewSet("a", "a.b.c", 3)}, - {NewSet("a.b.c", "a", "a.b", 3), NewSet("a.b*"), NewSet("a", "a.b", "a.b.c", 3)}, - } { - out := differenceWithGroup(d.s1, d.s2) - assert.ElementsMatch(t, d.diff.Array(), out.Array(), - fmt.Sprintf("Idx <%v>: Expected %v, Actual %v", idx, d.diff.Array(), out.Array())) - } - -} - -func TestStrConcat(t *testing.T) { - preIdx, postIdx, delimiterIdx, expectedIdx := 0, 1, 2, 3 - testData := [][]string{ - {"", "", "", ""}, - {"pre", "", "", "pre"}, - {"pre", "post", "", "prepost"}, - {"foo", "bar", ".", "foo.bar"}, - {"foo", "bar", ":", "foo:bar"}, - {"", "post", "", "post"}, - {"", "post", ",", "post"}, - } - for _, dataRow := range testData { - newStr := strConcat(dataRow[preIdx], dataRow[postIdx], dataRow[delimiterIdx]) - assert.Equal(t, dataRow[expectedIdx], newStr) - } -} diff --git a/tests/ecs_migration_test.go b/tests/ecs_migration_test.go deleted file mode 100644 index b6d01a12a73..00000000000 --- a/tests/ecs_migration_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package tests - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" - - "github.com/elastic/apm-server/tests/loader" -) - -func TestECSMapping(t *testing.T) { - b, err := loader.LoadDataAsBytes("../_meta/ecs-migration.yml") - require.NoError(t, err) - - type ECSFieldMigration struct { - From, To string - Index *bool - Documented *bool - } - - var ecsMigration []ECSFieldMigration - err = yaml.Unmarshal(b, &ecsMigration) - require.NoError(t, err) - - fieldNames, err := fetchFlattenedFieldNames([]string{"../_meta/fields.common.yml"}) - require.NoError(t, err) - - for _, field := range ecsMigration { - if field.Index == nil || *field.Index || (field.Documented != nil && *field.Documented) { - assert.True(t, fieldNames.Contains(field.To), "ECS field was expected in template: "+field.To) - assert.False(t, fieldNames.Contains(field.From), "6.x field was not expected in template: "+field.From) - } else { - assert.False(t, fieldNames.Contains(field.To), "not indexed field not expected in template: "+field.To) - assert.False(t, fieldNames.Contains(field.From), "not indexed field not expected in template "+field.From) - } - - } - -} diff --git a/tests/fields.go b/tests/fields.go deleted file mode 100644 index a33c49d4d4e..00000000000 --- a/tests/fields.go +++ /dev/null @@ -1,249 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package tests - -import ( - "fmt" - "io/ioutil" - "sort" - "strings" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/elastic/beats/v7/libbeat/common" - "github.com/elastic/beats/v7/libbeat/mapping" - - "github.com/elastic/apm-server/tests/loader" -) - -// This test checks -// * that all payload attributes are reflected in the ES template, -// except for attributes that should not be indexed in ES; -// * that all attributes in ES template are also included in the payload, -// to ensure full test coverage. -// Parameters: -// - payloadAttrsNotInFields: attributes sent with the payload but should not be -// indexed or not specifically mentioned in ES template. -// - fieldsAttrsNotInPayload: attributes that are reflected in the fields.yml but are -// not part of the payload, e.g. Kibana visualisation attributes. -func (ps *ProcessorSetup) PayloadAttrsMatchFields(t *testing.T, payloadAttrsNotInFields, fieldsNotInPayload *Set) { - notInFields := Union(payloadAttrsNotInFields, NewSet( - Group("processor"), - //dynamically indexed: - Group("labels"), - //known not-indexed fields: - Group("transaction.custom"), - Group("error.custom"), - "url.original", - Group("http.request.socket"), - Group("http.request.env"), - Group("http.request.body"), - Group("http.request.headers"), - Group("http.response.headers"), - )) - events := fetchFields(t, ps.Proc, ps.FullPayloadPath, notInFields) - ps.EventFieldsInTemplateFields(t, events, notInFields) - - // check ES fields in event - events = fetchFields(t, ps.Proc, ps.FullPayloadPath, fieldsNotInPayload) - ps.TemplateFieldsInEventFields(t, events, fieldsNotInPayload) -} - -func (ps *ProcessorSetup) EventFieldsInTemplateFields(t *testing.T, eventFields, allowedNotInFields *Set) { - allFieldNames, err := fetchFlattenedFieldNames(ps.TemplatePaths, hasName, isEnabled) - require.NoError(t, err) - - missing := Difference(eventFields, allFieldNames) - missing = differenceWithGroup(missing, allowedNotInFields) - - assertEmptySet(t, missing, fmt.Sprintf("Event attributes not documented in fields.yml: %v", missing)) -} - -type FieldTemplateMapping struct{ Template, Mapping string } - -func (ps *ProcessorSetup) EventFieldsMappedToTemplateFields(t *testing.T, eventFields *Set, - mappings []FieldTemplateMapping) { - allFieldNames, err := fetchFlattenedFieldNames(ps.TemplatePaths, hasName, isEnabled) - require.NoError(t, err) - - var eventFieldsMapped = NewSet() - for _, val := range eventFields.Array() { - var f = val.(string) - for _, m := range mappings { - template := m.Template - starMatch := strings.HasSuffix(m.Template, ".*") - if starMatch { - template = strings.TrimRight(m.Template, ".*") - } - if strings.HasPrefix(f, template) { - if starMatch { - f = strings.Split(f, ".")[0] - } - f = strings.Replace(f, template, m.Mapping, -1) - } - } - if f != "" { - eventFieldsMapped.Add(f) - } - } - missing := Difference(eventFieldsMapped, allFieldNames) - assertEmptySet(t, missing, fmt.Sprintf("Event attributes not in fields.yml: %v", missing)) -} - -func (ps *ProcessorSetup) TemplateFieldsInEventFields(t *testing.T, eventFields, allowedNotInEvent *Set) { - allFieldNames, err := fetchFlattenedFieldNames(ps.TemplatePaths, hasName, isEnabled) - require.NoError(t, err) - - missing := Difference(allFieldNames, eventFields) - missing = differenceWithGroup(missing, allowedNotInEvent) - assertEmptySet(t, missing, fmt.Sprintf("Fields missing in event: %v", missing)) -} - -func fetchFields(t *testing.T, p TestProcessor, path string, excludedKeys *Set) *Set { - buf, err := loader.LoadDataAsBytes(path) - require.NoError(t, err) - events, err := p.Process(buf) - require.NoError(t, err) - - keys := NewSet() - for _, event := range events { - for k := range event.Fields { - if k == "@timestamp" { - continue - } - FlattenMapStr(event.Fields[k], k, excludedKeys, keys) - } - } - sortedKeys := make([]string, keys.Len()) - for i, v := range keys.Array() { - sortedKeys[i] = v.(string) - } - sort.Strings(sortedKeys) - t.Logf("Keys in events: %v", sortedKeys) - return keys -} - -func FlattenMapStr(m interface{}, prefix string, excludedKeys *Set, flattened *Set) { - if commonMapStr, ok := m.(common.MapStr); ok { - for k, v := range commonMapStr { - flattenMapStrStr(k, v, prefix, excludedKeys, flattened) - } - } else if mapStr, ok := m.(map[string]interface{}); ok { - for k, v := range mapStr { - flattenMapStrStr(k, v, prefix, excludedKeys, flattened) - } - } - if prefix != "" && !isExcludedKey(excludedKeys, prefix) { - flattened.Add(prefix) - } -} - -func flattenMapStrStr(k string, v interface{}, prefix string, keysBlacklist *Set, flattened *Set) { - key := strConcat(prefix, k, ".") - if !isExcludedKey(keysBlacklist, key) { - flattened.Add(key) - } - switch v := v.(type) { - case common.MapStr: - FlattenMapStr(v, key, keysBlacklist, flattened) - case map[string]interface{}: - FlattenMapStr(v, key, keysBlacklist, flattened) - case []common.MapStr: - for _, v := range v { - FlattenMapStr(v, key, keysBlacklist, flattened) - } - } -} - -func isExcludedKey(keysBlacklist *Set, key string) bool { - for _, disabledKey := range keysBlacklist.Array() { - switch k := disabledKey.(type) { - case string: - if key == k { - return true - } - case group: - if strings.HasPrefix(key, k.str) { - return true - } - default: - panic("excluded key must be string or Group") - } - } - return false -} - -func fetchFlattenedFieldNames(paths []string, filters ...filter) (*Set, error) { - fields := NewSet() - for _, path := range paths { - f, err := loadFields(path) - if err != nil { - return nil, err - } - flattenFieldNames(f, "", fields, filters...) - } - return fields, nil -} - -func flattenFieldNames(fields []mapping.Field, prefix string, flattened *Set, filters ...filter) { - for _, f := range fields { - key := strConcat(prefix, f.Name, ".") - add := true - for i := 0; i < len(filters) && add; i++ { - add = filters[i](f) - } - if add { - flattened.Add(key) - } - flattenFieldNames(f.Fields, key, flattened, filters...) - } -} - -func loadFields(yamlPath string) ([]mapping.Field, error) { - fields := []mapping.Field{} - - yaml, err := ioutil.ReadFile(yamlPath) - if err != nil { - return nil, err - } - cfg, err := common.NewConfigWithYAML(yaml, "") - if err != nil { - return nil, err - } - err = cfg.Unpack(&fields) - if err != nil { - return nil, err - } - return fields, err -} - -// false to exclude field -type filter func(mapping.Field) bool - -func hasName(f mapping.Field) bool { - return f.Name != "" -} - -func isEnabled(f mapping.Field) bool { - return f.Enabled == nil || *f.Enabled -} - -func isDisabled(f mapping.Field) bool { - return f.Enabled != nil && !*f.Enabled -} diff --git a/tests/fields_test.go b/tests/fields_test.go deleted file mode 100644 index fdf50a2949e..00000000000 --- a/tests/fields_test.go +++ /dev/null @@ -1,103 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package tests - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/beats/v7/libbeat/common" -) - -func TestFlattenCommonMapStr(t *testing.T) { - emptyBlacklist := NewSet() - excluded := NewSet("a.bMap", "f") - expectedAll := NewSet("a", "a.bStr", "a.bMap", "a.bMap.cMap", "a.bMap.cMap.d", "a.bMap.cStr", "a.bAnotherMap", "a.bAnotherMap.e", "f") - expectedWoBlacklisted := NewSet("a", "a.bStr", "a.bMap.cStr", "a.bMap.cMap", "a.bMap.cMap.d", "a.bAnotherMap", "a.bAnotherMap.e") - expectedAllPrefixed := NewSet("pre", "pre.a", "pre.a.bStr", "pre.a.bMap", "pre.a.bMap.cMap", "pre.a.bMap.cMap.d", "pre.a.bMap.cStr", "pre.a.bAnotherMap", "pre.a.bAnotherMap.e", "pre.f") - expectedWithFilledInput := NewSet("prefilled", "a", "a.bStr", "a.bAnotherMap", "a.bAnotherMap.e", "a.bMap.cMap.d", "a.bMap.cStr", "a.bMap.cMap") - for idx, dataRow := range []struct { - mapData common.MapStr - prefix string - excluded *Set - input *Set - retVal *Set - }{ - {common.MapStr{}, "whatever", emptyBlacklist, NewSet(), NewSet("whatever")}, - {common.MapStr{}, "", excluded, NewSet(), NewSet()}, - {commonMapStr(), "", emptyBlacklist, NewSet(), expectedAll}, - {commonMapStr(), "", excluded, NewSet(), expectedWoBlacklisted}, - {commonMapStr(), "pre", emptyBlacklist, NewSet(), expectedAllPrefixed}, - {commonMapStr(), "", excluded, NewSet("prefilled"), expectedWithFilledInput}, - } { - FlattenMapStr(dataRow.mapData, dataRow.prefix, dataRow.excluded, dataRow.input) - expected := dataRow.retVal - diff := SymmDifference(dataRow.input, expected) - - errMsg := fmt.Sprintf("Failed for idx %v, diff: %v", idx, diff) - assert.Equal(t, 0, diff.Len(), errMsg) - } -} - -func commonMapStr() common.MapStr { - return common.MapStr{ - "a": common.MapStr{ - "bStr": "something", - "bMap": common.MapStr{ - "cMap": common.MapStr{ - "d": "something", - }, - "cStr": "", - }, - "bAnotherMap": map[string]interface{}{ - "e": 0, - }, - }, - "f": "", - } -} - -func TestLoadFields(t *testing.T) { - _, err := loadFields("non-existing") - assert.Error(t, err) -} - -func TestFlattenFieldNames(t *testing.T) { - fields, err := loadFields("./_meta/fields.yml") - require.NoError(t, err) - - expectAll := NewSet("transaction", "transaction.id", "transaction.context", "exception", "exception.http", - "exception.http.url", "exception.http.meta", "exception.stacktrace") - expectDisabled := NewSet("transaction.context", "exception.stacktrace") - expectEnabled := SymmDifference(expectAll, expectDisabled) - - allFields := NewSet() - flattenFieldNames(fields, "", allFields, hasName) - assert.Equal(t, expectAll, allFields) - - enabledFields := NewSet() - flattenFieldNames(fields, "", enabledFields, hasName, isEnabled) - assert.Equal(t, expectEnabled, enabledFields) - - disabledFields := NewSet() - flattenFieldNames(fields, "", disabledFields, hasName, isDisabled) - assert.Equal(t, expectDisabled, disabledFields) -} diff --git a/tests/json_schema.go b/tests/json_schema.go deleted file mode 100644 index dd88b373401..00000000000 --- a/tests/json_schema.go +++ /dev/null @@ -1,433 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package tests - -import ( - "bytes" - "encoding/json" - "fmt" - "regexp" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/beats/v7/libbeat/beat" - "github.com/elastic/beats/v7/libbeat/mapping" -) - -type TestProcessor interface { - LoadPayload(string) (interface{}, error) - Process([]byte) ([]beat.Event, error) - Validate(interface{}) error - Decode(interface{}) error -} - -type ProcessorSetup struct { - Proc TestProcessor - // path to payload that should be a full and valid example - FullPayloadPath string - // path to ES template definitions - TemplatePaths []string - // json schema string - Schema string - // prefix schema fields with this - SchemaPrefix string -} - -type SchemaTestData struct { - Key string - Valid []interface{} - Invalid []Invalid - Condition Condition -} -type Invalid struct { - Msg string - Values []interface{} -} - -type Condition struct { - // If requirements for a field apply in case of anothers key absence, - // add the key. - Absence []string - // If requirements for a field apply in case of anothers key specific values, - // add the key and its values. - Existence map[string]interface{} -} - -type obj = map[string]interface{} - -var ( - Str1024 = createStr(1024, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 _-") - Str1024Special = createStr(1024, `⌘ `) - Str1025 = createStr(1025, "") -) - -// This test checks -// * that all payload attributes are reflected in the json Schema, except for -// dynamic attributes not be specified in the schema; -// * that all attributes in the json schema are also included in the payload, -// to ensure full test coverage. -// Parameters: -// - payloadAttrsNotInSchema: attributes sent with the payload but should not be -// specified in the schema. -// - schemaAttrsNotInPayload: attributes that are reflected in the json schema but are -// not part of the payload. -func (ps *ProcessorSetup) PayloadAttrsMatchJsonSchema(t *testing.T, payloadAttrsNotInSchema, schemaAttrsNotInPayload *Set) { - require.True(t, len(ps.Schema) > 0, "Schema must be set") - - // check payload attrs in json schema - payload, err := ps.Proc.LoadPayload(ps.FullPayloadPath) - require.NoError(t, err, fmt.Sprintf("File %s not loaded", ps.FullPayloadPath)) - payloadAttrs := NewSet() - flattenJsonKeys(payload, "", payloadAttrs) - - ps.AttrsMatchJsonSchema(t, payloadAttrs, payloadAttrsNotInSchema, schemaAttrsNotInPayload) -} - -func (ps *ProcessorSetup) AttrsMatchJsonSchema(t *testing.T, payloadAttrs, payloadAttrsNotInSchema, schemaAttrsNotInPayload *Set) { - schemaKeys := NewSet() - schema, err := ParseSchema(ps.Schema) - require.NoError(t, err) - - FlattenSchemaNames(schema, ps.SchemaPrefix, nil, schemaKeys) - - missing := Difference(payloadAttrs, schemaKeys) - missing = differenceWithGroup(missing, payloadAttrsNotInSchema) - t.Logf("schemaKeys: %s", schemaKeys) - assertEmptySet(t, missing, fmt.Sprintf("Json payload fields missing in schema %v", missing)) - - missing = Difference(schemaKeys, payloadAttrs) - missing = differenceWithGroup(missing, schemaAttrsNotInPayload) - assertEmptySet(t, missing, fmt.Sprintf("Json schema fields missing in payload %v", missing)) -} - -// Test that payloads missing `required `attributes fail validation. -// - `required`: ensure required keys must not be missing or nil -// - `conditionally required`: prepare payload according to conditions, then -// ensure required keys must not be missing -func (ps *ProcessorSetup) AttrsPresence(t *testing.T, requiredKeys *Set, condRequiredKeys map[string]Condition) { - - required := Union(requiredKeys, NewSet( - "service", - "service.name", - "service.agent", - "service.agent.name", - "service.agent.version", - "service.language.name", - "service.runtime.name", - "service.runtime.version", - "process.pid", - )) - - payload, err := ps.Proc.LoadPayload(ps.FullPayloadPath) - require.NoError(t, err) - - payloadKeys := NewSet() - flattenJsonKeys(payload, "", payloadKeys) - - for _, k := range payloadKeys.Array() { - key := k.(string) - _, keyLast := splitKey(key) - - //test sending nil value for key - ps.changePayload(t, key, nil, Condition{}, upsertFn, - func(k string) (bool, []string) { - errMsgs := []string{keyLast, "did not recognize object type"} - return !required.ContainsStrPattern(k), errMsgs - }, - ) - - //test removing key from payload - cond := condRequiredKeys[key] - ps.changePayload(t, key, nil, cond, deleteFn, - func(k string) (bool, []string) { - errMsgs := []string{ - fmt.Sprintf("missing properties: \"%s\"", keyLast), - "did not recognize object type", - } - - if required.ContainsStrPattern(k) { - return false, errMsgs - } else if _, ok := condRequiredKeys[k]; ok { - return false, errMsgs - } - return true, []string{} - }, - ) - } -} - -// Test that field names indexed as `keywords` in Elasticsearch, have the same -// length limitation on the Intake API. -// APM Server has set all keyword restrictions to length 1024. -// -// keywordExceptionKeys: attributes defined as keywords in the ES template, but -// do not require a length restriction in the json schema, e.g. due to regex -// patterns defining a more specific restriction, -// templateToSchema: mapping for fields that are nested or named different on -// ES level than on intake API -func (ps *ProcessorSetup) KeywordLimitation(t *testing.T, keywordExceptionKeys *Set, - templateToSchema []FieldTemplateMapping) { - - // fetch keyword restricted field names from ES template - keywordFields, err := fetchFlattenedFieldNames(ps.TemplatePaths, hasName, - func(f mapping.Field) bool { return f.Type == "keyword" }) - require.NoError(t, err) - - // fetch length restricted field names from json schema - maxLengthFilter := func(s *Schema) bool { - return s.MaxLength > 0 - } - schemaKeys := NewSet() - schema, err := ParseSchema(ps.Schema) - require.NoError(t, err) - FlattenSchemaNames(schema, "", maxLengthFilter, schemaKeys) - - t.Log("Schema keys:", schemaKeys.Array()) - - keywordFields = differenceWithGroup(keywordFields, keywordExceptionKeys) - - for _, k := range keywordFields.Array() { - key := k.(string) - - for _, ts := range templateToSchema { - if strings.HasPrefix(key, ts.Template) { - key = strings.Replace(key, ts.Template, ts.Mapping, 1) - break - } - } - - assert.True(t, schemaKeys.Contains(key), "Expected <%s> (original: <%s>) to have the MaxLength limit set because it gets indexed as 'keyword'", key, k.(string)) - } -} - -// Test that specified values for attributes fail or pass -// the validation accordingly. -// The configuration and testing of valid attributes here is intended -// to ensure correct setup and configuration to avoid false negatives. -func (ps *ProcessorSetup) DataValidation(t *testing.T, testData []SchemaTestData) { - for _, d := range testData { - testAttrs := func(val interface{}, valid bool, msg string) { - ps.changePayload(t, d.Key, val, d.Condition, - upsertFn, func(k string) (bool, []string) { - return valid, []string{msg} - }) - } - - for _, invalid := range d.Invalid { - for _, v := range invalid.Values { - testAttrs(v, false, invalid.Msg) - } - } - for _, v := range d.Valid { - testAttrs(v, true, "") - } - - } -} - -func logPayload(t *testing.T, payload interface{}) { - j, _ := json.MarshalIndent(payload, "", " ") - t.Log("payload:", string(j)) -} - -func (ps *ProcessorSetup) changePayload( - t *testing.T, - key string, - val interface{}, - condition Condition, - changeFn func(interface{}, string, interface{}) interface{}, - validateFn func(string) (bool, []string), -) { - // load payload - payload, err := ps.Proc.LoadPayload(ps.FullPayloadPath) - require.NoError(t, err) - - err = ps.Proc.Validate(payload) - assert.NoError(t, err, "vanilla payload did not validate") - - // prepare payload according to conditions: - - // - ensure specified keys being present - for k, val := range condition.Existence { - fnKey, keyToChange := splitKey(k) - - payload = iterateMap(payload, "", fnKey, keyToChange, val, upsertFn) - } - - // - ensure specified keys being absent - for _, k := range condition.Absence { - fnKey, keyToChange := splitKey(k) - payload = iterateMap(payload, "", fnKey, keyToChange, nil, deleteFn) - } - - // change payload for key to test - fnKey, keyToChange := splitKey(key) - payload = iterateMap(payload, "", fnKey, keyToChange, val, changeFn) - - wantLog := false - defer func() { - if wantLog { - logPayload(t, payload) - } - }() - - // run actual validation - err = ps.Proc.Validate(payload) - if shouldValidate, errMsgs := validateFn(key); shouldValidate { - wantLog = !assert.NoError(t, err, fmt.Sprintf("Expected <%v> for key <%s> to be valid", val, key)) - err = ps.Proc.Decode(payload) - assert.NoError(t, err) - } else { - if assert.Error(t, err, fmt.Sprintf(`Expected error for key <%v>, but received no error.`, key)) { - for _, errMsg := range errMsgs { - if strings.Contains(strings.ToLower(err.Error()), errMsg) { - return - } - } - wantLog = true - assert.Fail(t, fmt.Sprintf("Expected error to be one of %v, but was %v", errMsgs, err.Error())) - } else { - wantLog = true - } - } -} - -func createStr(n int, start string) string { - buf := bytes.NewBufferString(start) - for buf.Len() < n { - buf.WriteString("a") - } - return buf.String() -} - -func splitKey(s string) (string, string) { - idx := strings.LastIndex(s, ".") - if idx == -1 { - return "", s - } - return s[:idx], s[idx+1:] -} - -func upsertFn(m interface{}, k string, v interface{}) interface{} { - fn := func(o obj, key string, val interface{}) obj { o[key] = val; return o } - return applyFn(m, k, v, fn) -} - -func deleteFn(m interface{}, k string, v interface{}) interface{} { - fn := func(o obj, key string, _ interface{}) obj { delete(o, key); return o } - return applyFn(m, k, v, fn) -} - -func applyFn(m interface{}, k string, val interface{}, fn func(obj, string, interface{}) obj) interface{} { - switch t := m.(type) { - case obj: - fn(t, k, val) - case []interface{}: - for _, e := range m.([]interface{}) { - if eObj, ok := e.(obj); ok { - fn(eObj, k, val) - } - } - } - return m -} - -func iterateMap(m interface{}, prefix, fnKey, xKey string, val interface{}, fn func(interface{}, string, interface{}) interface{}) interface{} { - re := regexp.MustCompile(fmt.Sprintf("^%s$", fnKey)) - if d, ok := m.(obj); ok { - ma := d - if prefix == "" && fnKey == "" { - ma = fn(ma, xKey, val).(obj) - } - for k, v := range d { - key := strConcat(prefix, k, ".") - ma[k] = iterateMap(v, key, fnKey, xKey, val, fn) - if key == fnKey || re.MatchString(key) { - ma[k] = fn(ma[k], xKey, val) - } - } - return ma - } else if d, ok := m.([]interface{}); ok { - var ma []interface{} - for _, i := range d { - r := iterateMap(i, prefix, fnKey, xKey, val, fn) - ma = append(ma, r) - } - return ma - } else { - return m - } -} - -type Schema struct { - Title string - Properties map[string]*Schema - AdditionalProperties interface{} // bool or object - PatternProperties obj - Items *Schema - AllOf []*Schema - OneOf []*Schema - AnyOf []*Schema - MaxLength int -} - -func ParseSchema(s string) (*Schema, error) { - decoder := json.NewDecoder(bytes.NewBufferString(s)) - var schema Schema - err := decoder.Decode(&schema) - return &schema, err -} - -func FlattenSchemaNames(s *Schema, prefix string, filter func(*Schema) bool, flattened *Set) { - if len(s.Properties) > 0 { - for k, v := range s.Properties { - key := strConcat(prefix, k, ".") - if filter == nil || filter(v) { - flattened.Add(key) - } - FlattenSchemaNames(v, key, filter, flattened) - } - } - - if s.Items != nil { - FlattenSchemaNames(s.Items, prefix, filter, flattened) - } - - for _, schemas := range [][]*Schema{s.AllOf, s.OneOf, s.AnyOf} { - for _, e := range schemas { - FlattenSchemaNames(e, prefix, filter, flattened) - } - } -} - -func flattenJsonKeys(data interface{}, prefix string, flattened *Set) { - if d, ok := data.(obj); ok { - for k, v := range d { - key := strConcat(prefix, k, ".") - flattened.Add(key) - flattenJsonKeys(v, key, flattened) - } - } else if d, ok := data.([]interface{}); ok { - for _, v := range d { - flattenJsonKeys(v, prefix, flattened) - } - } -} diff --git a/tests/json_schema_test.go b/tests/json_schema_test.go deleted file mode 100644 index 5c8ada0ba39..00000000000 --- a/tests/json_schema_test.go +++ /dev/null @@ -1,100 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package tests - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestIterateMap(t *testing.T) { - for _, d := range []struct { - original obj - result obj - key, fnKey string - val interface{} - fn func(interface{}, string, interface{}) interface{} - }{ - {original: obj{"a": 1, "b": 2}, - result: obj{"b": 2}, - fnKey: "", key: "a", fn: deleteFn}, - {original: obj{"a": 1, "b": obj{"d": obj{"e": "x"}}}, - result: obj{"a": 1, "b": obj{"d": obj{}}}, - fnKey: "b.d", key: "e", fn: deleteFn}, - {original: obj{"a": 1, "c": []interface{}{obj{"d": "x"}, obj{"d": "y", "e": 1}}}, - result: obj{"a": 1, "c": []interface{}{obj{}, obj{"e": 1}}}, - fnKey: "c", key: "d", fn: deleteFn, - }, - {original: obj{"a": 1, "b": obj{"d": "x"}}, - result: obj{"a": 1, "b": obj{"d": "x"}}, - fnKey: "", key: "h", fn: deleteFn, - }, - {original: obj{"a": 1, "b": obj{"d": "x"}}, - result: obj{"a": 1, "b": obj{"d": "x"}}, - fnKey: "b", key: "", fn: deleteFn, - }, - {original: obj{"a": 1, "b": obj{"d": "x"}}, - result: obj{"a": 1, "b": obj{"d": "x"}}, - fnKey: "b", key: "c", fn: deleteFn, - }, - {original: obj{"a": 1, "b": obj{"c": obj{"d": "x", "e": "y"}}}, - result: obj{"a": 1, "b": obj{"c": obj{"e": "y"}}}, - fnKey: "b.[^.]", key: "d", fn: deleteFn, - }, - {original: obj{"a": 1, "b": obj{"c": obj{"d": "x", "e": "y"}, "c2": obj{"g": 1}}}, - result: obj{"a": 1, "b": obj{"c": obj{"e": "y"}, "c2": obj{"g": 1}}}, - fnKey: "b.[^.]", key: "d", fn: deleteFn, - }, - {original: obj{"a": obj{"b": obj{"c": obj{"d": "x", "e": "y"}, "c2": obj{"g": 1}}}}, - result: obj{"a": obj{"b": obj{"c": obj{"d": "x"}, "c2": obj{"g": 1}}}}, - fnKey: "a.[^.].c", key: "e", fn: deleteFn, - }, - {original: obj{"a": obj{"b": obj{"c": obj{"d": "x", "e": "y"}, "c2": obj{"g": 1}}}}, - result: obj{"a": obj{"b": obj{"c2": obj{"g": 1}}}}, - fnKey: "a.[^.]", key: "c", fn: deleteFn, - }, - {original: obj{"a": 1, "b": 2}, - result: obj{"a": "new", "b": 2}, - fnKey: "", key: "a", val: "new", fn: upsertFn, - }, - {original: obj{"a": 1, "b": obj{"d": obj{"e": "x"}}}, - result: obj{"a": 1, "b": obj{"d": obj{"e": nil}}}, - fnKey: "b.d", key: "e", val: nil, fn: upsertFn, - }, - {original: obj{"a": 1, "c": []interface{}{obj{"d": "x"}, obj{"d": "y", "e": 1}}}, - result: obj{"a": 1, "c": []interface{}{obj{"d": "new"}, obj{"d": "new", "e": 1}}}, - fnKey: "c", key: "d", val: "new", fn: upsertFn, - }, - {original: obj{"a": 1, "b": obj{"d": "x"}}, - result: obj{"a": 1, "b": obj{"d": "x"}, "h": "new"}, - fnKey: "", key: "h", val: "new", fn: upsertFn, - }, - {original: obj{"a": 1, "b": obj{"d": "x"}}, - result: obj{"a": 1, "b": obj{"d": "x"}}, - fnKey: "h", key: "", val: "new", fn: upsertFn, - }, - {original: obj{"a": 1, "b": obj{"c": obj{"d": "x", "e": "y"}}}, - result: obj{"a": 1, "b": obj{"c": obj{"d": "x", "e": "z"}}}, - fnKey: "b.[^.]", key: "e", val: "z", fn: upsertFn, - }, - } { - out := iterateMap(d.original, "", d.fnKey, d.key, d.val, d.fn) - assert.Equal(t, d.result, out) - } -} diff --git a/tests/kibana.go b/tests/kibana.go deleted file mode 100644 index 304d12d237a..00000000000 --- a/tests/kibana.go +++ /dev/null @@ -1,69 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package tests - -import ( - "context" - "io" - "io/ioutil" - "net/http" - "net/url" - - "github.com/pkg/errors" - - "github.com/elastic/apm-server/convert" - "github.com/elastic/apm-server/kibana" - - "github.com/elastic/beats/v7/libbeat/common" -) - -// MockKibanaClient implements the kibana.Client interface for testing purposes -type MockKibanaClient struct { - code int - body map[string]interface{} - v common.Version - connected bool -} - -// Send returns a mock http.Response based on parameters used to init the MockKibanaClient instance -func (c *MockKibanaClient) Send(_ context.Context, method, extraPath string, params url.Values, - headers http.Header, body io.Reader) (*http.Response, error) { - resp := http.Response{StatusCode: c.code, Body: ioutil.NopCloser(convert.ToReader(c.body))} - if resp.StatusCode == http.StatusBadGateway { - return nil, errors.New("testerror") - } - return &resp, nil -} - -// GetVersion returns a mock version based on parameters used to init the MockKibanaClient instance -func (c *MockKibanaClient) GetVersion(context.Context) (common.Version, error) { - return c.v, nil -} - -// SupportsVersion returns whether or not mock client is compatible with given version -func (c *MockKibanaClient) SupportsVersion(_ context.Context, v *common.Version, _ bool) (bool, error) { - if !c.connected { - return false, errors.New("unable to retrieve connection to Kibana") - } - return v.LessThanOrEqual(true, &c.v), nil -} - -// MockKibana provides a fake connection for unit tests -func MockKibana(respCode int, respBody map[string]interface{}, v common.Version, connected bool) kibana.Client { - return &MockKibanaClient{code: respCode, body: respBody, v: v, connected: connected} -} diff --git a/tests/loader/loader.go b/tests/loader/loader.go deleted file mode 100644 index 1cb7343a18f..00000000000 --- a/tests/loader/loader.go +++ /dev/null @@ -1,75 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package loader - -import ( - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - - "github.com/elastic/apm-server/decoder" -) - -func LoadData(file string) (map[string]interface{}, error) { - return unmarshalData(FindFile(file)) -} - -func LoadDataAsBytes(fileName string) ([]byte, error) { - return readFile(FindFile(fileName)) -} - -func LoadDataAsStream(file string) (io.ReadCloser, error) { - return fileReader(FindFile(file)) -} - -func FindFile(fileInfo ...string) (string, error) { - _, current, _, _ := runtime.Caller(0) - f := []string{filepath.Dir(current), ".."} - f = append(f, fileInfo...) - p := filepath.Join(f...) - _, err := os.Stat(p) - return p, err -} - -func fileReader(filePath string, err error) (io.ReadCloser, error) { - if err != nil { - return nil, err - } - return os.Open(filePath) -} - -func readFile(filePath string, err error) ([]byte, error) { - var f io.Reader - f, err = fileReader(filePath, err) - if err != nil { - return nil, err - } - return ioutil.ReadAll(f) -} - -func unmarshalData(filePath string, err error) (map[string]interface{}, error) { - var r io.ReadCloser - r, err = fileReader(filePath, err) - if err != nil { - return nil, err - } - defer r.Close() - return decoder.DecodeJSONData(r) -} diff --git a/tests/packaging/Dockerfile.aarch64.rpm.install b/tests/packaging/Dockerfile.aarch64.rpm.install deleted file mode 100644 index c234fe34d43..00000000000 --- a/tests/packaging/Dockerfile.aarch64.rpm.install +++ /dev/null @@ -1,11 +0,0 @@ -FROM arm64v8/centos - -RUN yum install -y initscripts - -ARG apm_server_pkg -COPY $apm_server_pkg $apm_server_pkg -RUN rpm -ivh $apm_server_pkg - -COPY test.sh test.sh - -CMD ./test.sh diff --git a/tests/packaging/Dockerfile.amd64.deb.install b/tests/packaging/Dockerfile.amd64.deb.install deleted file mode 100644 index 2c6a2b39097..00000000000 --- a/tests/packaging/Dockerfile.amd64.deb.install +++ /dev/null @@ -1,9 +0,0 @@ -FROM debian:jessie - -ARG apm_server_pkg -COPY $apm_server_pkg $apm_server_pkg -RUN dpkg -i $apm_server_pkg - -COPY test.sh test.sh - -CMD ./test.sh diff --git a/tests/packaging/Dockerfile.arm64.deb.install b/tests/packaging/Dockerfile.arm64.deb.install deleted file mode 100644 index 131505ec786..00000000000 --- a/tests/packaging/Dockerfile.arm64.deb.install +++ /dev/null @@ -1,9 +0,0 @@ -FROM arm64v8/debian - -ARG apm_server_pkg -COPY $apm_server_pkg $apm_server_pkg -RUN dpkg -i $apm_server_pkg - -COPY test.sh test.sh - -CMD ./test.sh diff --git a/tests/packaging/Dockerfile.i386.deb.install b/tests/packaging/Dockerfile.i386.deb.install deleted file mode 100644 index ac6c7cce630..00000000000 --- a/tests/packaging/Dockerfile.i386.deb.install +++ /dev/null @@ -1,11 +0,0 @@ -FROM debian:jessie - -RUN dpkg --add-architecture i386 && apt update && apt install libc6-i386 - -ARG apm_server_pkg -COPY $apm_server_pkg $apm_server_pkg -RUN dpkg -i $apm_server_pkg - -COPY test.sh test.sh - -CMD ./test.sh diff --git a/tests/packaging/Dockerfile.i686.rpm.install b/tests/packaging/Dockerfile.i686.rpm.install deleted file mode 100644 index 1849ea9a35b..00000000000 --- a/tests/packaging/Dockerfile.i686.rpm.install +++ /dev/null @@ -1,11 +0,0 @@ -FROM centos:7 - -RUN yum install -y initscripts glibc.i686 - -ARG apm_server_pkg -COPY $apm_server_pkg $apm_server_pkg -RUN rpm -ivh $apm_server_pkg - -COPY test.sh test.sh - -CMD ./test.sh diff --git a/tests/packaging/Dockerfile.x86_64.rpm.install b/tests/packaging/Dockerfile.x86_64.rpm.install deleted file mode 100644 index 2d3ae032414..00000000000 --- a/tests/packaging/Dockerfile.x86_64.rpm.install +++ /dev/null @@ -1,11 +0,0 @@ -FROM centos:7 - -RUN yum install -y initscripts - -ARG apm_server_pkg -COPY $apm_server_pkg $apm_server_pkg -RUN rpm -ivh $apm_server_pkg - -COPY test.sh test.sh - -CMD ./test.sh diff --git a/tests/packaging/package_test.go b/tests/packaging/package_test.go deleted file mode 100644 index 38f807c01f9..00000000000 --- a/tests/packaging/package_test.go +++ /dev/null @@ -1,94 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// +build package - -package test - -import ( - "flag" - "fmt" - "path/filepath" - "regexp" - "testing" - - "github.com/magefile/mage/sh" -) - -var ( - files = flag.String("files", "build/distributions/*", "filepath glob containing package files") -) - -type package_ struct { - arch, - path string -} - -// TestDeb ensures debian packages are created and can be installed -func TestDeb(t *testing.T) { - testInstall(t, "deb") -} - -// TestRpm ensures rpm packages are created and can be installed -func TestRpm(t *testing.T) { - testInstall(t, "rpm") -} - -// (deb|rpm) would remove check that both types of packages are created -func testInstall(t *testing.T, ext string) { - pkgs := getPackages(t, regexp.MustCompile(fmt.Sprintf(`-(\w+)\.%s$`, ext))) - if len(pkgs) == 0 { - t.Fatalf("no %ss found", ext) - } - for _, pkg := range pkgs { - t.Run(fmt.Sprintf("%s_%s", t.Name(), pkg.arch), func(t *testing.T) { - if pkg.arch == "aarch64" || pkg.arch == "arm64" { - t.Skipf("skipped package install test for %s on %s", ext, pkg.arch) - return - } - checkInstall(t, pkg.path, fmt.Sprintf("Dockerfile.%s.%s.install", pkg.arch, ext)) - }) - } -} - -func checkInstall(t *testing.T, pkg, dockerfile string) { - dir, file := filepath.Split(pkg) - imageId, err := sh.Output( - "docker", "build", "--no-cache", "-q", "-f", dockerfile, - "--build-arg", fmt.Sprintf("apm_server_pkg=%s", file), dir) - if err != nil { - t.Fatal(err) - } - if err := sh.Run("docker", "run", "--rm", imageId); err != nil { - t.Fatal(err) - } -} - -func getPackages(t *testing.T, pattern *regexp.Regexp) []package_ { - matches, err := filepath.Glob(*files) - if err != nil { - t.Fatal(err) - } - fs := make([]package_, 0) - for _, f := range matches { - if m := pattern.FindStringSubmatch(filepath.Base(f)); len(m) > 0 { - fs = append(fs, package_{arch: m[1], path: f}) - } - } - - return fs -} diff --git a/tests/packaging/test.sh b/tests/packaging/test.sh deleted file mode 100644 index c673fd2b0a2..00000000000 --- a/tests/packaging/test.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -/etc/init.d/apm-server start && \ - sleep 2 && \ - stat /var/lib/apm-server/meta.json /var/log/apm-server/apm-server && \ - /etc/init.d/apm-server stop diff --git a/tests/publish.go b/tests/publish.go deleted file mode 100644 index badfffc8934..00000000000 --- a/tests/publish.go +++ /dev/null @@ -1,31 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package tests - -import ( - "context" - - "github.com/elastic/apm-server/publish" -) - -func TestReporter(reqs *[]publish.PendingReq) publish.Reporter { - return func(ctx context.Context, req publish.PendingReq) error { - *reqs = append(*reqs, req) - return nil - } -} diff --git a/tests/set.go b/tests/set.go deleted file mode 100644 index 7d11e3674df..00000000000 --- a/tests/set.go +++ /dev/null @@ -1,134 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package tests - -import ( - "fmt" - "regexp" -) - -type Set struct { - entries map[interface{}]interface{} -} - -func NewSet(entries ...interface{}) *Set { - s := Set{entries: map[interface{}]interface{}{}} - for _, v := range entries { - s.Add(v) - } - return &s -} - -func (s *Set) Add(input interface{}) { - if s == nil { - return - } - s.entries[input] = nil -} - -func (s *Set) Remove(input interface{}) { - if s == nil { - return - } - delete(s.entries, input) -} - -func (s *Set) Contains(input interface{}) bool { - if s == nil { - return false - } - if _, ok := s.entries[input]; ok { - return true - } - return false -} - -func (s *Set) ContainsStrPattern(str string) bool { - if s.Contains(str) { - return true - } - for _, entry := range s.Array() { - if entryStr, ok := entry.(string); ok { - re, err := regexp.Compile(fmt.Sprintf("^%s$", entryStr)) - if err == nil && re.MatchString(str) { - return true - } - } - } - return false -} - -func (s *Set) Copy() *Set { - cp := NewSet() - if s == nil { - return nil - } - for k := range s.entries { - cp.Add(k) - } - return cp -} - -func (s *Set) Len() int { - if s == nil { - return 0 - } - return len(s.entries) -} - -func Union(s1, s2 *Set) *Set { - if s1 == nil { - return s2.Copy() - } - if s2 == nil { - return s1.Copy() - } - s := s1.Copy() - for k := range s2.entries { - s.Add(k) - } - return s -} - -func Difference(s1, s2 *Set) *Set { - s := NewSet() - if s1 == nil { - return s - } - for k := range s1.entries { - if !s2.Contains(k) { - s.Add(k) - } - } - return s -} - -func SymmDifference(s1, s2 *Set) *Set { - return Union(Difference(s1, s2), Difference(s2, s1)) -} - -func (s *Set) Array() []interface{} { - if s == nil { - return []interface{}{} - } - a := make([]interface{}, 0, len(s.entries)) - for k := range s.entries { - a = append(a, k) - } - return a -} diff --git a/tests/set_test.go b/tests/set_test.go deleted file mode 100644 index 3c54307cddd..00000000000 --- a/tests/set_test.go +++ /dev/null @@ -1,208 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package tests - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestNewSet(t *testing.T) { - for _, d := range []struct { - init []interface{} - out []interface{} - }{ - {[]interface{}{"a", "b", "b"}, []interface{}{"a", "b"}}, - {[]interface{}{1, 2, 1, 0, "a"}, []interface{}{1, 2, 0, "a"}}, - {[]interface{}{}, []interface{}{}}, - {nil, []interface{}{}}, - } { - assert.ElementsMatch(t, d.out, NewSet(d.init...).Array()) - } -} - -func TestSetAdd(t *testing.T) { - for _, d := range []struct { - s *Set - add interface{} - out []interface{} - }{ - {nil, "a", []interface{}{}}, - {NewSet(), "a", []interface{}{"a"}}, - {NewSet(1, "a"), "a", []interface{}{1, "a"}}, - {NewSet(123), "a", []interface{}{123, "a"}}, - } { - d.s.Add(d.add) - assert.ElementsMatch(t, d.out, d.s.Array()) - } -} - -func TestSetRemove(t *testing.T) { - for _, d := range []struct { - s *Set - remove interface{} - out []interface{} - }{ - {nil, "a", []interface{}{}}, - {NewSet(), "a", []interface{}{}}, - {NewSet("a"), "a", []interface{}{}}, - {NewSet(123, "a"), 123, []interface{}{"a"}}, - {NewSet(123, "a", "a"), "a", []interface{}{123}}, - } { - d.s.Remove(d.remove) - assert.ElementsMatch(t, d.out, d.s.Array()) - } -} - -func TestSetContains(t *testing.T) { - for _, d := range []struct { - s *Set - input interface{} - out bool - }{ - {nil, "a", false}, - {NewSet(), "a", false}, - {NewSet(1, 2, 3), "a", false}, - {NewSet("a", "b"), "a", true}, - } { - assert.Equal(t, d.out, d.s.Contains(d.input)) - } -} - -func TestSetContainsStrPattern(t *testing.T) { - for _, d := range []struct { - s *Set - input string - out bool - }{ - {NewSet(), "a", false}, - {NewSet(1, 2, 3), "a", false}, - {NewSet("a", 1, "b"), "a", true}, - {NewSet("a.b.c.d", 1, "bc"), "b.c", false}, - {NewSet("b.c", 1, "bc"), "a.b.c", false}, - {NewSet("a.*c.d", 1), "a.b.c.d", true}, - {NewSet("a.*c.", 1), "a.b.c.d", false}, - {NewSet("a.[^.]*.c.d", 1), "a.b_d.c.d", true}, - {NewSet("a.[^.].*c*", 1), "a.b.c.d", true}, - {NewSet("*", 1), "a.b.c.d", true}, - {NewSet("a.[^.].c", 1), "a.b.x.c.d", false}, - {NewSet("metrics.samples.[^.]+.type", "a", 1), "metrics.samples.shortcounter.type", true}, - } { - assert.Equal(t, d.out, d.s.ContainsStrPattern(d.input)) - } -} - -func TestSetCopy(t *testing.T) { - for _, d := range []struct { - s *Set - out []interface{} - }{ - {nil, []interface{}{}}, - {NewSet(), []interface{}{}}, - {NewSet("a"), []interface{}{"a"}}, - {NewSet(123, "a", "a"), []interface{}{"a", 123}}, - } { - copied := d.s.Copy() - d.s.Add(500) - assert.ElementsMatch(t, d.out, copied.Array()) - } -} - -func TestSetLen(t *testing.T) { - for _, d := range []struct { - s *Set - len int - }{ - {nil, 0}, - {NewSet(), 0}, - {NewSet("a"), 1}, - {NewSet(123, "a", "a"), 2}, - } { - assert.Equal(t, d.len, d.s.Len()) - } -} - -func TestSetUnion(t *testing.T) { - for _, d := range []struct { - s1 *Set - s2 *Set - out []interface{} - }{ - {nil, nil, []interface{}{}}, - {nil, NewSet(1), []interface{}{1}}, - {NewSet(1), nil, []interface{}{1}}, - {NewSet(), NewSet(), []interface{}{}}, - {NewSet(34.5, "a"), NewSet(), []interface{}{34.5, "a"}}, - {NewSet(), NewSet(1), []interface{}{1}}, - {NewSet(1, 2, 3), NewSet(1, "a"), []interface{}{1, 2, 3, "a"}}, - } { - assert.ElementsMatch(t, d.out, Union(d.s1, d.s2).Array()) - } -} - -func TestSetDifference(t *testing.T) { - for _, d := range []struct { - s1 *Set - s2 *Set - out []interface{} - }{ - {nil, nil, []interface{}{}}, - {nil, NewSet("a"), []interface{}{}}, - {NewSet(34.5), nil, []interface{}{34.5}}, - {NewSet(), NewSet(), []interface{}{}}, - {NewSet(34.5, "a"), NewSet(), []interface{}{34.5, "a"}}, - {NewSet(), NewSet(1), []interface{}{}}, - {NewSet(1, 2, 3), NewSet(1, "a"), []interface{}{2, 3}}, - } { - assert.ElementsMatch(t, d.out, Difference(d.s1, d.s2).Array()) - } -} - -func TestSetSymmDifference(t *testing.T) { - for _, d := range []struct { - s1 *Set - s2 *Set - out []interface{} - }{ - {nil, nil, []interface{}{}}, - {nil, NewSet("a"), []interface{}{"a"}}, - {NewSet("a"), nil, []interface{}{"a"}}, - {NewSet(34.5), nil, []interface{}{34.5}}, - {NewSet(), NewSet(), []interface{}{}}, - {NewSet(34.5, "a"), NewSet(), []interface{}{34.5, "a"}}, - {NewSet(), NewSet(1), []interface{}{1}}, - {NewSet(1, 2, 3, 8.9, "b"), NewSet(1, "a", 8.9, "b"), []interface{}{2, 3, "a"}}, - } { - assert.ElementsMatch(t, d.out, SymmDifference(d.s1, d.s2).Array()) - } -} - -func TestSetArray(t *testing.T) { - for _, d := range []struct { - s *Set - out []interface{} - }{ - {nil, []interface{}{}}, - {NewSet(), []interface{}{}}, - {NewSet(34.5, "a"), []interface{}{34.5, "a"}}, - {NewSet(1, 1, 1), []interface{}{1}}, - } { - assert.ElementsMatch(t, d.out, d.s.Array()) - } -} diff --git a/tests/system/apmserver.py b/tests/system/apmserver.py index 9a02f511520..dd72614b890 100644 --- a/tests/system/apmserver.py +++ b/tests/system/apmserver.py @@ -13,12 +13,7 @@ from elasticsearch import Elasticsearch, NotFoundError import requests -# Add libbeat/tests/system to the import path. -output = subprocess.check_output(["go", "list", "-m", "-f", "{{.Path}} {{.Dir}}", "all"]).decode("utf-8") -beats_line = [line for line in output.splitlines() if line.startswith("github.com/elastic/beats/")][0] -beats_dir = beats_line.split(" ", 2)[1] -sys.path.append(os.path.join(beats_dir, 'libbeat', 'tests', 'system')) - +import libbeat_paths from beat.beat import INTEGRATION_TESTS, TestCase, TimeoutError from helper import wait_until from es_helper import cleanup, default_pipelines @@ -273,7 +268,7 @@ def wait_until_ilm_logged(self): def wait_until_pipeline_logged(self): registration_enabled = self.config().get("register_pipeline_enabled") - msg = "Registered Ingest Pipelines successfully" if registration_enabled != "false" else "No pipeline callback registered" + msg = "Registered Ingest Pipelines successfully" if registration_enabled != "false" else "Pipeline registration disabled" wait_until(lambda: self.log_contains(msg), name="pipelines registration") def load_docs_with_template(self, data_path, url, endpoint, expected_events_count, @@ -342,8 +337,8 @@ def check_for_no_smap(self, doc): def logged_requests(self, url="/intake/v2/events"): for line in self.get_log_lines(): jline = json.loads(line) - u = urlparse(jline.get("URL", "")) - if jline.get("logger") == "request" and u.path == url: + u = urlparse(jline.get("url.original", "")) + if jline.get("log.logger") == "request" and u.path == url: yield jline def approve_docs(self, base_path, received): diff --git a/tests/system/config/apm-server.yml.j2 b/tests/system/config/apm-server.yml.j2 index 3430608f75f..e67c0e9e22e 100644 --- a/tests/system/config/apm-server.yml.j2 +++ b/tests/system/config/apm-server.yml.j2 @@ -109,34 +109,6 @@ apm-server: register.ingest.pipeline.overwrite: {{ register_pipeline_overwrite }} {% endif %} - {% if instrumentation_enabled %} - instrumentation.enabled: {{ instrumentation_enabled }} - {% endif %} - {% if instrumentation_host %} - instrumentation.hosts: [{{ instrumentation_host }}] - {% endif %} - {% if instrumentation_api_key %} - instrumentation.api_key: {{ instrumentation_api_key }} - {% endif %} - {% if instrumentation_secret_token %} - instrumentation.secret_token: {{ instrumentation_secret_token }} - {% endif %} - {% if profiling_cpu_enabled %} - instrumentation.profiling.cpu.enabled: {{ profiling_cpu_enabled }} - {% endif %} - {% if profiling_cpu_interval %} - instrumentation.profiling.cpu.interval: {{ profiling_cpu_interval }} - {% endif %} - {% if profiling_cpu_duration %} - instrumentation.profiling.cpu.duration: {{ profiling_cpu_duration }} - {% endif %} - {% if profiling_heap_enabled %} - instrumentation.profiling.heap.enabled: {{ profiling_heap_enabled }} - {% endif %} - {% if profiling_heap_interval %} - instrumentation.profiling.heap.interval: {{ profiling_heap_interval }} - {% endif %} - {% if aggregation_enabled %} aggregation.transactions.enabled: {{ aggregation_enabled }} {% endif %} @@ -162,7 +134,7 @@ apm-server: {% if ilm_custom_suffix %} ilm.setup.mapping: - event_type: "error" - index_suffix: "custom" + index_suffix: "CUSTOM" - event_type: "transaction" index_suffix: "foo" {% endif %} @@ -207,37 +179,6 @@ apm-server: {% endif %} {% if acm_cache_expiration is not none %} agent.config.cache.expiration: {{ acm_cache_expiration }}{% endif %} - -################### Libbeat instrumentation ############################### -{% if libbeat_instrumentation_enabled %} -instrumentation.enabled: {{ libbeat_instrumentation_enabled }} -{% endif %} -{% if libbeat_instrumentation_host %} -instrumentation.hosts: [{{ libbeat_instrumentation_host }}] -{% endif %} -{% if libbeat_instrumentation_api_key %} -instrumentation.api_key: {{ libbeat_instrumentation_api_key }} -{% endif %} -{% if libbeat_instrumentation_secret_token %} -instrumentation.secret_token: {{ libbeat_instrumentation_secret_token }} -{% endif %} -{% if libbeat_profiling_cpu_enabled %} -instrumentation.profiling.cpu.enabled: {{ libbeat_profiling_cpu_enabled }} -{% endif %} -{% if libbeat_profiling_cpu_interval %} -instrumentation.profiling.cpu.interval: {{ libbeat_profiling_cpu_interval }} -{% endif %} -{% if libbeat_profiling_cpu_duration %} -instrumentation.profiling.cpu.duration: {{ libbeat_profiling_cpu_duration }} -{% endif %} -{% if libbeat_profiling_heap_enabled %} -instrumentation.profiling.heap.enabled: {{ libbeat_profiling_heap_enabled }} -{% endif %} -{% if libbeat_profiling_heap_interval %} -instrumentation.profiling.heap.interval: {{ libbeat_profiling_heap_interval }} -{% endif %} - - ############################# Setup ########################################## {% if override_template %} @@ -308,46 +249,16 @@ output.elasticsearch: ############################# Logging ######################################### -{% if logging_json or logging_level %} -logging: -{% else %} -#logging: -{% endif %} - # Send all logging output to syslog. On Windows default is false, otherwise - # default is true. - #to_syslog: true - - # Write all logging output to files. Beats automatically rotate files if configurable - # limit is reached. - #to_files: false - - # Enable debug output for selected components. - #selectors: [] - {% if logging_json %} - # Set to true to log messages in json format. - json: {{ logging_json }} +# Defaults to true. +logging.json: {{ logging_json }} {% endif %} -{% if logging_level %} - # Set log level - level: {{ logging_level }} +{% if logging_ecs %} +# Defaults to true. +logging.ecs: {{ logging_json }} {% endif %} - #files: - # The directory where the log files will written to. - #path: /var/log/apm-server - - # The name of the files where the logs are written to. - #name: apm-server - - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated - #rotateeverybytes: 10485760 # = 10MB - - # Number of rotated log files to keep. Oldest files will be deleted first. - #keepfiles: 7 - queue.mem.flush.min_events: {{ queue_flush }} ############################# X-pack Monitoring ############################### diff --git a/tests/system/drop_unsampled_transactions.approved.json b/tests/system/drop_unsampled_transactions.approved.json deleted file mode 100644 index 5b08443a689..00000000000 --- a/tests/system/drop_unsampled_transactions.approved.json +++ /dev/null @@ -1,409 +0,0 @@ -[ - { - "@timestamp": "2017-05-30T18:53:42.281Z", - "agent": { - "name": "elastic-node", - "version": "3.14.0" - }, - "container": { - "id": "container-id" - }, - "ecs": { - "version": "1.5.0" - }, - "event": { - "ingested": "2020-08-11T10:03:11.025737Z", - "outcome": "unknown" - }, - "host": { - "architecture": "x64", - "ip": "127.0.0.1", - "os": { - "platform": "darwin" - } - }, - "kubernetes": { - "namespace": "namespace1", - "pod": { - "name": "pod-name", - "uid": "pod-uid" - } - }, - "observer": { - "ephemeral_id": "ebd89158-9f85-469e-9bc0-9e19b174a1ba", - "hostname": "goat", - "id": "01019c57-ff25-4ddf-b9e9-2f421577a9a2", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "process": { - "args": [ - "node", - "server.js" - ], - "pid": 1234, - "ppid": 6789, - "title": "node" - }, - "processor": { - "event": "transaction", - "name": "transaction" - }, - "service": { - "environment": "staging", - "framework": { - "name": "Express", - "version": "1.2.3" - }, - "language": { - "name": "ecmascript", - "version": "8" - }, - "name": "1234_service-12a3", - "node": { - "name": "container-id" - }, - "runtime": { - "name": "node", - "version": "8.0.0" - }, - "version": "5.1.3" - }, - "timestamp": { - "us": 1496170422281000 - }, - "trace": { - "id": "85925e55b43f4340aaaaaaaaaaaaaaaa" - }, - "transaction": { - "duration": { - "us": 13980 - }, - "id": "85925e55b43f4340", - "name": "GET /api/types", - "result": "failure", - "sampled": true, - "span_count": { - "started": 0 - }, - "type": "request" - }, - "user": { - "email": "foo@bar.com", - "id": "123user", - "name": "foo" - } - }, - { - "@timestamp": "2017-05-30T18:53:42.281Z", - "agent": { - "name": "elastic-node", - "version": "3.14.0" - }, - "container": { - "id": "container-id" - }, - "ecs": { - "version": "1.5.0" - }, - "event": { - "ingested": "2020-08-11T10:03:11.025919Z", - "outcome": "unknown" - }, - "host": { - "architecture": "x64", - "ip": "127.0.0.1", - "os": { - "platform": "darwin" - } - }, - "kubernetes": { - "namespace": "namespace1", - "pod": { - "name": "pod-name", - "uid": "pod-uid" - } - }, - "observer": { - "ephemeral_id": "d0a481f0-3688-40dc-8a59-04abdf72261a", - "hostname": "goat", - "id": "fb7e85d9-3666-4b9a-8440-1448c17b5738", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "process": { - "args": [ - "node", - "server.js" - ], - "pid": 1234, - "ppid": 6789, - "title": "node" - }, - "processor": { - "event": "transaction", - "name": "transaction" - }, - "service": { - "environment": "staging", - "framework": { - "name": "Express", - "version": "1.2.3" - }, - "language": { - "name": "ecmascript", - "version": "8" - }, - "name": "1234_service-12a3", - "node": { - "name": "container-id" - }, - "runtime": { - "name": "node", - "version": "8.0.0" - }, - "version": "5.1.3" - }, - "timestamp": { - "us": 1496170422281999 - }, - "trace": { - "id": "85925e55b43f4342aaaaaaaaaaaaaaaa" - }, - "transaction": { - "duration": { - "us": 13980 - }, - "id": "85925e55b43f4342", - "name": "GET /api/types", - "result": "200", - "sampled": true, - "span_count": { - "dropped": 258, - "started": 1 - }, - "type": "request" - }, - "user": { - "email": "foo@bar.com", - "id": "123user", - "name": "foo" - } - }, - { - "@timestamp": "2017-05-30T18:53:27.154Z", - "agent": { - "name": "js-base", - "version": "1.3" - }, - "client": { - "geo": { - "continent_name": "North America", - "country_iso_code": "US", - "location": { - "lat": 37.751, - "lon": -97.822 - } - }, - "ip": "8.8.8.8" - }, - "container": { - "id": "container-id" - }, - "ecs": { - "version": "1.5.0" - }, - "event": { - "ingested": "2020-08-11T10:03:10.976907Z", - "outcome": "unknown" - }, - "host": { - "architecture": "x64", - "ip": "127.0.0.1", - "os": { - "platform": "darwin" - } - }, - "http": { - "request": { - "body": { - "original": { - "additional": { - "bar": 123, - "req": "additional information" - }, - "str": "hello world" - } - }, - "cookies": { - "c1": "v1", - "c2": "v2" - }, - "env": { - "GATEWAY_INTERFACE": "CGI/1.1", - "SERVER_SOFTWARE": "nginx" - }, - "headers": { - "Array": [ - "foo", - "bar", - "baz" - ], - "Content-Type": [ - "text/html" - ], - "Cookie": [ - "c1=v1,c2=v2" - ], - "Some-Other-Header": [ - "foo" - ], - "User-Agent": [ - "Mozilla Chrome Edge" - ] - }, - "method": "post", - "referrer": "http://localhost:8000/test/e2e/", - "socket": { - "encrypted": true, - "remote_address": "8.8.8.8" - } - }, - "response": { - "finished": true, - "headers": { - "Content-Type": [ - "application/json" - ] - }, - "headers_sent": true, - "status_code": 200 - }, - "version": "1.1" - }, - "kubernetes": { - "namespace": "namespace1", - "pod": { - "name": "pod-name", - "uid": "pod-uid" - } - }, - "labels": { - "bool_error": false, - "number_code": 2, - "organization_uuid": "9f0e9d64-c185-4d21-a6f4-4673ed561ec8" - }, - "observer": { - "ephemeral_id": "d0a481f0-3688-40dc-8a59-04abdf72261a", - "hostname": "goat", - "id": "fb7e85d9-3666-4b9a-8440-1448c17b5738", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "process": { - "args": [ - "node", - "server.js" - ], - "pid": 1234, - "ppid": 6789, - "title": "node" - }, - "processor": { - "event": "transaction", - "name": "transaction" - }, - "service": { - "environment": "staging", - "framework": { - "name": "Express", - "version": "1.2.3" - }, - "language": { - "name": "ecmascript", - "version": "8" - }, - "name": "serviceabc", - "node": { - "name": "special-name" - }, - "runtime": { - "name": "javascript", - "version": "8.0.0" - }, - "version": "5.1.3" - }, - "source": { - "ip": "8.8.8.8" - }, - "timestamp": { - "us": 1496170407154000 - }, - "trace": { - "id": "945254c567a5417eaaaaaaaaaaaaaaaa" - }, - "transaction": { - "custom": { - "(": "not a valid regex and that is fine", - "and_objects": { - "foo": [ - "bar", - "baz" - ] - }, - "my_key": 1, - "some_other_value": "foo bar" - }, - "duration": { - "us": 32592 - }, - "id": "945254c567a5417e", - "marks": { - "another_mark": { - "some_float": 10, - "some_long": 10 - }, - "navigationTiming": { - "appBeforeBootstrap": 608.9300000000001, - "navigationStart": -21 - } - }, - "name": "GET /api/types", - "page": { - "referer": "http://localhost:8000/test/e2e/", - "url": "http://localhost:8000/test/e2e/general-usecase/" - }, - "result": "success", - "sampled": true, - "span_count": { - "dropped": 2, - "started": 4 - }, - "type": "request" - }, - "url": { - "domain": "www.example.com", - "fragment": "#hash", - "full": "https://www.example.com/p/a/t/h?query=string#hash", - "original": "/p/a/t/h?query=string#hash", - "path": "/p/a/t/h", - "port": 8080, - "query": "?query=string", - "scheme": "https" - }, - "user": { - "email": "foo@example.com", - "id": "99" - }, - "user_agent": { - "device": { - "name": "Other" - }, - "name": "Other", - "original": "Mozilla Chrome Edge" - } - } -] \ No newline at end of file diff --git a/tests/system/error.approved.json b/tests/system/error.approved.json index 041a2eebf68..c3a79d58b14 100644 --- a/tests/system/error.approved.json +++ b/tests/system/error.approved.json @@ -9,10 +9,11 @@ "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "error": { "grouping_key": "d6b3f958dfea98dc9ed2b57d5f0c48bb", + "grouping_name": "Cannot read property 'baz' of undefined", "id": "0f0e9d67c1854d21a6f44673ed561ec8", "log": { "level": "custom log level", @@ -40,6 +41,7 @@ "tag1": "one", "tag2": 2 }, + "message": "Cannot read property 'baz' of undefined", "observer": { "ephemeral_id": "f1838cde-80dd-4af5-b7ac-ffc2d3fccc9d", "hostname": "ix.lan", @@ -95,6 +97,7 @@ "geo": { "continent_name": "North America", "country_iso_code": "US", + "country_name": "United States", "location": { "lat": 37.751, "lon": -97.822 @@ -106,7 +109,7 @@ "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "error": { "culprit": "my.module.function_name", @@ -194,6 +197,7 @@ } ], "grouping_key": "50f62f37edffc4630c6655ba3ecfcf46", + "grouping_name": "My service could not talk to the database named foobar", "id": "5f0e9d64c1854d21a6f44673ed561ec8", "log": { "level": "warning", @@ -216,7 +220,6 @@ "exclude_from_grouping": false, "filename": "/webpack/file/name.py", "function": "foo", - "library_frame": false, "line": { "column": 4, "context": "line3", @@ -261,10 +264,6 @@ } } ] - }, - "page": { - "referer": "http://localhost:8000/test/e2e/", - "url": "http://localhost:8000/test/e2e/general-usecase/" } }, "event": { @@ -279,9 +278,7 @@ }, "http": { "request": { - "body": { - "original": "Hello World" - }, + "body.original": "Hello World", "cookies": { "c1": "v1", "c2": "v2" @@ -309,12 +306,8 @@ "Mozilla Chrome Edge" ] }, - "method": "post", - "referrer": "http://localhost:8000/test/e2e/", - "socket": { - "encrypted": true, - "remote_address": "8.8.8.8" - } + "method": "POST", + "referrer": "http://localhost:8000/test/e2e/" }, "response": { "finished": true, @@ -340,6 +333,7 @@ "tag1": "one", "tag2": 2 }, + "message": "My service could not talk to the database named foobar", "observer": { "ephemeral_id": "f1838cde-80dd-4af5-b7ac-ffc2d3fccc9d", "hostname": "ix.lan", @@ -420,7 +414,7 @@ "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "error": { "exception": [ @@ -429,6 +423,7 @@ } ], "grouping_key": "18f82051862e494727fa20e0adc15711", + "grouping_name": null, "id": "7f0e9d68c1854d21a6f44673ed561ec8" }, "event": { @@ -507,7 +502,7 @@ "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "error": { "exception": [ @@ -517,6 +512,7 @@ } ], "grouping_key": "f6b5a2877d9b00d5b32b44c9db039f11", + "grouping_name": "foo is not defined", "id": "8f0e9d68c1854d21a6f44673ed561ec8" }, "event": { @@ -540,6 +536,7 @@ "tag1": "one", "tag2": 2 }, + "message": "foo is not defined", "observer": { "ephemeral_id": "f1838cde-80dd-4af5-b7ac-ffc2d3fccc9d", "hostname": "ix.lan", diff --git a/tests/system/jaeger_batch_0.approved.json b/tests/system/jaeger_batch_0.approved.json deleted file mode 100644 index e43dbcb451f..00000000000 --- a/tests/system/jaeger_batch_0.approved.json +++ /dev/null @@ -1,258 +0,0 @@ -[ - { - "@timestamp": "2019-12-20T07:41:44.953Z", - "agent": { - "ephemeral_id": "624386e9c81d2980", - "name": "Jaeger/Go", - "version": "2.20.1" - }, - "ecs": { - "version": "1.5.0" - }, - "event": { - "ingested": "2020-04-22T14:57:44.517370Z" - }, - "host": { - "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" - }, - "labels": { - "as": "thrift", - "peer_ipv4": 2130706433, - "peer_port": 50535, - "peer_service": "driver-client", - "sampler_param": true, - "sampler_type": "const" - }, - "observer": { - "ephemeral_id": "22e70040-bc51-4c7d-922b-8e88f5050bfc", - "hostname": "ix.lan", - "id": "5387ac94-dc7a-44f5-8a0e-86713eaaf6a1", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "processor": { - "event": "transaction", - "name": "transaction" - }, - "service": { - "language": { - "name": "Go" - }, - "name": "driver", - "node": { - "name": "host01" - } - }, - "timestamp": { - "us": 1576827704953864 - }, - "trace": { - "id": "7be2fd98d0973be3" - }, - "transaction": { - "duration": { - "us": 243417 - }, - "id": "7be2fd98d0973be3", - "name": "Driver::findNearest", - "result": "Success", - "sampled": true, - "type": "custom" - } - }, - { - "@timestamp": "2019-12-20T07:41:45.007Z", - "agent": { - "ephemeral_id": "624386e9c81d2980", - "name": "Jaeger/Go", - "version": "2.20.1" - }, - "ecs": { - "version": "1.5.0" - }, - "error": { - "exception": [ - { - "message": "redis timeout" - } - ], - "grouping_key": "dd09a7d0d9dde0adfcd694967c5a88de", - "log": { - "message": "Retrying GetDriver after error" - } - }, - "event": { - "ingested": "2020-04-22T14:57:44.568016Z" - }, - "host": { - "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" - }, - "observer": { - "ephemeral_id": "22e70040-bc51-4c7d-922b-8e88f5050bfc", - "hostname": "ix.lan", - "id": "5387ac94-dc7a-44f5-8a0e-86713eaaf6a1", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "parent": { - "id": "7be2fd98d0973be3" - }, - "processor": { - "event": "error", - "name": "error" - }, - "service": { - "language": { - "name": "Go" - }, - "name": "driver", - "node": { - "name": "host01" - } - }, - "timestamp": { - "us": 1576827705007552 - }, - "trace": { - "id": "7be2fd98d0973be3" - }, - "transaction": { - "id": "7be2fd98d0973be3", - "type": "custom" - } - }, - { - "@timestamp": "2019-12-20T07:41:45.089Z", - "agent": { - "ephemeral_id": "624386e9c81d2980", - "name": "Jaeger/Go", - "version": "2.20.1" - }, - "ecs": { - "version": "1.5.0" - }, - "error": { - "exception": [ - { - "message": "redis timeout" - } - ], - "grouping_key": "dd09a7d0d9dde0adfcd694967c5a88de", - "log": { - "message": "Retrying GetDriver after error" - } - }, - "event": { - "ingested": "2020-04-22T14:57:44.572837Z" - }, - "host": { - "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" - }, - "observer": { - "ephemeral_id": "22e70040-bc51-4c7d-922b-8e88f5050bfc", - "hostname": "ix.lan", - "id": "5387ac94-dc7a-44f5-8a0e-86713eaaf6a1", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "parent": { - "id": "7be2fd98d0973be3" - }, - "processor": { - "event": "error", - "name": "error" - }, - "service": { - "language": { - "name": "Go" - }, - "name": "driver", - "node": { - "name": "host01" - } - }, - "timestamp": { - "us": 1576827705089431 - }, - "trace": { - "id": "7be2fd98d0973be3" - }, - "transaction": { - "id": "7be2fd98d0973be3", - "type": "custom" - } - }, - { - "@timestamp": "2019-12-20T07:41:45.172Z", - "agent": { - "ephemeral_id": "624386e9c81d2980", - "name": "Jaeger/Go", - "version": "2.20.1" - }, - "ecs": { - "version": "1.5.0" - }, - "error": { - "exception": [ - { - "message": "redis timeout" - } - ], - "grouping_key": "dd09a7d0d9dde0adfcd694967c5a88de", - "log": { - "message": "Retrying GetDriver after error" - } - }, - "event": { - "ingested": "2020-04-22T14:57:44.573182Z" - }, - "host": { - "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" - }, - "observer": { - "ephemeral_id": "22e70040-bc51-4c7d-922b-8e88f5050bfc", - "hostname": "ix.lan", - "id": "5387ac94-dc7a-44f5-8a0e-86713eaaf6a1", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "parent": { - "id": "7be2fd98d0973be3" - }, - "processor": { - "event": "error", - "name": "error" - }, - "service": { - "language": { - "name": "Go" - }, - "name": "driver", - "node": { - "name": "host01" - } - }, - "timestamp": { - "us": 1576827705172530 - }, - "trace": { - "id": "7be2fd98d0973be3" - }, - "transaction": { - "id": "7be2fd98d0973be3", - "type": "custom" - } - } -] \ No newline at end of file diff --git a/tests/system/jaeger_batch_0_auth_tag_removed.approved.json b/tests/system/jaeger_batch_0_auth_tag_removed.approved.json deleted file mode 100644 index d4fff61f94a..00000000000 --- a/tests/system/jaeger_batch_0_auth_tag_removed.approved.json +++ /dev/null @@ -1,259 +0,0 @@ -[ - { - "@timestamp": "2019-12-20T07:41:44.953Z", - "agent": { - "ephemeral_id": "624386e9c81d2980", - "name": "Jaeger/Go", - "version": "2.20.1" - }, - "ecs": { - "version": "1.5.0" - }, - "event": { - "ingested": "2020-08-11T09:58:54.379837Z", - "outcome": "unknown" - }, - "host": { - "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" - }, - "labels": { - "as": "thrift", - "peer_ipv4": 2130706433, - "peer_port": 50535, - "peer_service": "driver-client", - "sampler_param": true, - "sampler_type": "const" - }, - "observer": { - "ephemeral_id": "b759a57d-4daa-4708-a3ad-18c50d8c940f", - "hostname": "ix.lan", - "id": "3dfbe204-d39c-4f40-8283-de713063099c", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "processor": { - "event": "transaction", - "name": "transaction" - }, - "service": { - "language": { - "name": "Go" - }, - "name": "driver", - "node": { - "name": "host01" - } - }, - "timestamp": { - "us": 1576827704953864 - }, - "trace": { - "id": "7be2fd98d0973be3" - }, - "transaction": { - "duration": { - "us": 243417 - }, - "id": "7be2fd98d0973be3", - "name": "Driver::findNearest", - "result": "Success", - "sampled": true, - "type": "custom" - } - }, - { - "@timestamp": "2019-12-20T07:41:45.007Z", - "agent": { - "ephemeral_id": "624386e9c81d2980", - "name": "Jaeger/Go", - "version": "2.20.1" - }, - "ecs": { - "version": "1.5.0" - }, - "error": { - "exception": [ - { - "message": "redis timeout" - } - ], - "grouping_key": "dd09a7d0d9dde0adfcd694967c5a88de", - "log": { - "message": "Retrying GetDriver after error" - } - }, - "event": { - "ingested": "2020-08-11T09:58:54.454089Z" - }, - "host": { - "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" - }, - "observer": { - "ephemeral_id": "97a0be74-0ec2-433d-b3f4-8853ae5504c4", - "hostname": "goat", - "id": "a20fd88c-dddf-4e6e-8bcb-b13e08e0b4ce", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "parent": { - "id": "7be2fd98d0973be3" - }, - "processor": { - "event": "error", - "name": "error" - }, - "service": { - "language": { - "name": "Go" - }, - "name": "driver", - "node": { - "name": "host01" - } - }, - "timestamp": { - "us": 1576827705007552 - }, - "trace": { - "id": "7be2fd98d0973be3" - }, - "transaction": { - "id": "7be2fd98d0973be3", - "type": "custom" - } - }, - { - "@timestamp": "2019-12-20T07:41:45.089Z", - "agent": { - "ephemeral_id": "624386e9c81d2980", - "name": "Jaeger/Go", - "version": "2.20.1" - }, - "ecs": { - "version": "1.5.0" - }, - "error": { - "exception": [ - { - "message": "redis timeout" - } - ], - "grouping_key": "dd09a7d0d9dde0adfcd694967c5a88de", - "log": { - "message": "Retrying GetDriver after error" - } - }, - "event": { - "ingested": "2020-08-11T09:58:54.454199Z" - }, - "host": { - "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" - }, - "observer": { - "ephemeral_id": "97a0be74-0ec2-433d-b3f4-8853ae5504c4", - "hostname": "goat", - "id": "a20fd88c-dddf-4e6e-8bcb-b13e08e0b4ce", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "parent": { - "id": "7be2fd98d0973be3" - }, - "processor": { - "event": "error", - "name": "error" - }, - "service": { - "language": { - "name": "Go" - }, - "name": "driver", - "node": { - "name": "host01" - } - }, - "timestamp": { - "us": 1576827705089431 - }, - "trace": { - "id": "7be2fd98d0973be3" - }, - "transaction": { - "id": "7be2fd98d0973be3", - "type": "custom" - } - }, - { - "@timestamp": "2019-12-20T07:41:45.172Z", - "agent": { - "ephemeral_id": "624386e9c81d2980", - "name": "Jaeger/Go", - "version": "2.20.1" - }, - "ecs": { - "version": "1.5.0" - }, - "error": { - "exception": [ - { - "message": "redis timeout" - } - ], - "grouping_key": "dd09a7d0d9dde0adfcd694967c5a88de", - "log": { - "message": "Retrying GetDriver after error" - } - }, - "event": { - "ingested": "2020-08-11T09:58:54.463556Z" - }, - "host": { - "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" - }, - "observer": { - "ephemeral_id": "97a0be74-0ec2-433d-b3f4-8853ae5504c4", - "hostname": "goat", - "id": "a20fd88c-dddf-4e6e-8bcb-b13e08e0b4ce", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "parent": { - "id": "7be2fd98d0973be3" - }, - "processor": { - "event": "error", - "name": "error" - }, - "service": { - "language": { - "name": "Go" - }, - "name": "driver", - "node": { - "name": "host01" - } - }, - "timestamp": { - "us": 1576827705172530 - }, - "trace": { - "id": "7be2fd98d0973be3" - }, - "transaction": { - "id": "7be2fd98d0973be3", - "type": "custom" - } - } -] \ No newline at end of file diff --git a/tests/system/jaeger_batch_0_authorization.approved.json b/tests/system/jaeger_batch_0_authorization.approved.json deleted file mode 100644 index e83d0d9c1a4..00000000000 --- a/tests/system/jaeger_batch_0_authorization.approved.json +++ /dev/null @@ -1,259 +0,0 @@ -[ - { - "@timestamp": "2019-12-20T07:41:44.953Z", - "agent": { - "ephemeral_id": "624386e9c81d2980", - "name": "Jaeger/Go", - "version": "2.20.1" - }, - "ecs": { - "version": "1.5.0" - }, - "event": { - "ingested": "2020-08-11T09:59:10.729866Z", - "outcome": "unknown" - }, - "host": { - "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" - }, - "labels": { - "as": "thrift", - "peer_ipv4": 2130706433, - "peer_port": 50535, - "peer_service": "driver-client", - "sampler_param": true, - "sampler_type": "const" - }, - "observer": { - "ephemeral_id": "f672294a-23de-44c6-82c1-5a1487d72e4a", - "hostname": "ix.lan", - "id": "09366851-25e3-463f-bc4a-28c00c792112", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "processor": { - "event": "transaction", - "name": "transaction" - }, - "service": { - "language": { - "name": "Go" - }, - "name": "driver", - "node": { - "name": "host01" - } - }, - "timestamp": { - "us": 1576827704953864 - }, - "trace": { - "id": "7be2fd98d0973be3" - }, - "transaction": { - "duration": { - "us": 243417 - }, - "id": "7be2fd98d0973be3", - "name": "Driver::findNearest", - "result": "Success", - "sampled": true, - "type": "custom" - } - }, - { - "@timestamp": "2019-12-20T07:41:45.007Z", - "agent": { - "ephemeral_id": "624386e9c81d2980", - "name": "Jaeger/Go", - "version": "2.20.1" - }, - "ecs": { - "version": "1.5.0" - }, - "error": { - "exception": [ - { - "message": "redis timeout" - } - ], - "grouping_key": "dd09a7d0d9dde0adfcd694967c5a88de", - "log": { - "message": "Retrying GetDriver after error" - } - }, - "event": { - "ingested": "2020-08-11T09:59:10.788604Z" - }, - "host": { - "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" - }, - "observer": { - "ephemeral_id": "f2414fe9-88b3-4a35-ad5a-6951ba183d43", - "hostname": "goat", - "id": "78749c0f-d2f4-42e7-a8c2-a276a77f2555", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "parent": { - "id": "7be2fd98d0973be3" - }, - "processor": { - "event": "error", - "name": "error" - }, - "service": { - "language": { - "name": "Go" - }, - "name": "driver", - "node": { - "name": "host01" - } - }, - "timestamp": { - "us": 1576827705007552 - }, - "trace": { - "id": "7be2fd98d0973be3" - }, - "transaction": { - "id": "7be2fd98d0973be3", - "type": "custom" - } - }, - { - "@timestamp": "2019-12-20T07:41:45.089Z", - "agent": { - "ephemeral_id": "624386e9c81d2980", - "name": "Jaeger/Go", - "version": "2.20.1" - }, - "ecs": { - "version": "1.5.0" - }, - "error": { - "exception": [ - { - "message": "redis timeout" - } - ], - "grouping_key": "dd09a7d0d9dde0adfcd694967c5a88de", - "log": { - "message": "Retrying GetDriver after error" - } - }, - "event": { - "ingested": "2020-08-11T09:59:10.794738Z" - }, - "host": { - "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" - }, - "observer": { - "ephemeral_id": "f2414fe9-88b3-4a35-ad5a-6951ba183d43", - "hostname": "goat", - "id": "78749c0f-d2f4-42e7-a8c2-a276a77f2555", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "parent": { - "id": "7be2fd98d0973be3" - }, - "processor": { - "event": "error", - "name": "error" - }, - "service": { - "language": { - "name": "Go" - }, - "name": "driver", - "node": { - "name": "host01" - } - }, - "timestamp": { - "us": 1576827705089431 - }, - "trace": { - "id": "7be2fd98d0973be3" - }, - "transaction": { - "id": "7be2fd98d0973be3", - "type": "custom" - } - }, - { - "@timestamp": "2019-12-20T07:41:45.172Z", - "agent": { - "ephemeral_id": "624386e9c81d2980", - "name": "Jaeger/Go", - "version": "2.20.1" - }, - "ecs": { - "version": "1.5.0" - }, - "error": { - "exception": [ - { - "message": "redis timeout" - } - ], - "grouping_key": "dd09a7d0d9dde0adfcd694967c5a88de", - "log": { - "message": "Retrying GetDriver after error" - } - }, - "event": { - "ingested": "2020-08-11T09:59:10.794838Z" - }, - "host": { - "hostname": "host01", - "ip": "10.0.0.13", - "name": "host01" - }, - "observer": { - "ephemeral_id": "f2414fe9-88b3-4a35-ad5a-6951ba183d43", - "hostname": "goat", - "id": "78749c0f-d2f4-42e7-a8c2-a276a77f2555", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "parent": { - "id": "7be2fd98d0973be3" - }, - "processor": { - "event": "error", - "name": "error" - }, - "service": { - "language": { - "name": "Go" - }, - "name": "driver", - "node": { - "name": "host01" - } - }, - "timestamp": { - "us": 1576827705172530 - }, - "trace": { - "id": "7be2fd98d0973be3" - }, - "transaction": { - "id": "7be2fd98d0973be3", - "type": "custom" - } - } -] \ No newline at end of file diff --git a/tests/system/jaeger_span.approved.json b/tests/system/jaeger_span.approved.json deleted file mode 100644 index 8c4276585bf..00000000000 --- a/tests/system/jaeger_span.approved.json +++ /dev/null @@ -1,63 +0,0 @@ -[ - { - "@timestamp": "2020-01-08T02:48:51.616Z", - "agent": { - "ephemeral_id": "3a5c6b00dd41a605", - "name": "Jaeger/Go", - "version": "2.21.2-dev" - }, - "ecs": { - "version": "1.5.0" - }, - "event": { - "ingested": "2020-08-11T09:59:01.990087Z", - "outcome": "unknown" - }, - "host": { - "hostname": "alloy", - "ip": "10.1.1.101", - "name": "alloy" - }, - "labels": { - "sampler_param": true, - "sampler_type": "const" - }, - "observer": { - "ephemeral_id": "5b7f950d-a9d1-460d-b133-29d214466fd7", - "hostname": "ix.lan", - "id": "ec06f895-6301-4055-bab7-0d612933c9fd", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "processor": { - "event": "transaction", - "name": "transaction" - }, - "service": { - "language": { - "name": "Go" - }, - "name": "test_service", - "node": { - "name": "alloy" - } - }, - "timestamp": { - "us": 1578451731616515 - }, - "trace": { - "id": "5025e08c7fef6542" - }, - "transaction": { - "duration": { - "us": 2 - }, - "id": "5025e08c7fef6542", - "name": "test_span", - "result": "Success", - "sampled": true, - "type": "custom" - } - } -] \ No newline at end of file diff --git a/tests/system/jaegergrpc/main.go b/tests/system/jaegergrpc/main.go deleted file mode 100644 index 203ee59b5fa..00000000000 --- a/tests/system/jaegergrpc/main.go +++ /dev/null @@ -1,105 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package main - -import ( - "context" - "encoding/json" - "flag" - "fmt" - "io/ioutil" - "log" - "os" - "path/filepath" - - "github.com/jaegertracing/jaeger/proto-gen/api_v2" - "google.golang.org/grpc" -) - -var ( - serverAddr = flag.String("addr", "localhost:14250", "Jaeger gRPC server address") - insecure = flag.Bool("insecure", false, "Disable certificate verification") - endpoint = flag.String("endpoint", "collector", "Which Jaeger gRPC endpoint to call ('collector', 'sampler')") - service = flag.String("service", "xyz", "Service for which sampling rate should be fetched") - path = flag.String("out", "", "Output path for sampling response") -) - -func main() { - flag.Parse() - if *endpoint != "sampler" && flag.NArg() == 0 { - fmt.Fprintf(os.Stderr, "Usage: %s [flags] [ ...]\n", filepath.Base(os.Args[0])) - os.Exit(2) - } - - var opts []grpc.DialOption - if *insecure { - opts = append(opts, grpc.WithInsecure()) - } - - conn, err := grpc.Dial(*serverAddr, opts...) - if err != nil { - log.Fatal(err) - } - defer conn.Close() - - switch *endpoint { - case "sampler": - if *path == "" { - log.Fatal("output path missing") - } - p, err := filepath.Abs(*path) - if err != nil { - log.Fatal(err) - } - os.Remove(p) - client := api_v2.NewSamplingManagerClient(conn) - resp, err := client.GetSamplingStrategy(context.Background(), &api_v2.SamplingStrategyParameters{ServiceName: *service}) - var out string - if err != nil { - out = err.Error() - } else { - out = fmt.Sprintf("strategy: %s, sampling rate: %v", resp.StrategyType.String(), resp.ProbabilisticSampling.SamplingRate) - } - if err := ioutil.WriteFile(p, []byte(out), 0644); err != nil { - log.Fatal(err) - } - - default: - client := api_v2.NewCollectorServiceClient(conn) - for _, arg := range flag.Args() { - request, err := decodeRequest(arg) - if err != nil { - log.Fatal(err) - } - _, err = client.PostSpans(context.Background(), request) - if err != nil { - log.Fatal(err) - } - } - } -} - -func decodeRequest(filename string) (*api_v2.PostSpansRequest, error) { - var request api_v2.PostSpansRequest - f, err := os.Open(filename) - if err != nil { - return nil, err - } - defer f.Close() - return &request, json.NewDecoder(f).Decode(&request) -} diff --git a/tests/system/keep_unsampled_transactions.approved.json b/tests/system/keep_unsampled_transactions.approved.json deleted file mode 100644 index 609ccaea068..00000000000 --- a/tests/system/keep_unsampled_transactions.approved.json +++ /dev/null @@ -1,505 +0,0 @@ -[ - { - "@timestamp": "2017-05-30T18:53:42.281Z", - "agent": { - "name": "elastic-node", - "version": "3.14.0" - }, - "container": { - "id": "container-id" - }, - "ecs": { - "version": "1.5.0" - }, - "event": { - "ingested": "2020-08-11T10:03:14.274519Z", - "outcome": "unknown" - }, - "host": { - "architecture": "x64", - "ip": "127.0.0.1", - "os": { - "platform": "darwin" - } - }, - "kubernetes": { - "namespace": "namespace1", - "pod": { - "name": "pod-name", - "uid": "pod-uid" - } - }, - "observer": { - "ephemeral_id": "3f31e73c-2fdf-4f39-8c81-57b25d00fc16", - "hostname": "goat", - "id": "0f00bacd-a23e-41c5-a2fb-4d90b2776018", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "process": { - "args": [ - "node", - "server.js" - ], - "pid": 1234, - "ppid": 6789, - "title": "node" - }, - "processor": { - "event": "transaction", - "name": "transaction" - }, - "service": { - "environment": "staging", - "framework": { - "name": "Express", - "version": "1.2.3" - }, - "language": { - "name": "ecmascript", - "version": "8" - }, - "name": "1234_service-12a3", - "node": { - "name": "container-id" - }, - "runtime": { - "name": "node", - "version": "8.0.0" - }, - "version": "5.1.3" - }, - "timestamp": { - "us": 1496170422281000 - }, - "trace": { - "id": "85925e55b43f4340aaaaaaaaaaaaaaaa" - }, - "transaction": { - "duration": { - "us": 13980 - }, - "id": "85925e55b43f4340", - "name": "GET /api/types", - "result": "failure", - "sampled": true, - "span_count": { - "started": 0 - }, - "type": "request" - }, - "user": { - "email": "foo@bar.com", - "id": "123user", - "name": "foo" - } - }, - { - "@timestamp": "2017-05-30T18:53:42.000Z", - "agent": { - "name": "elastic-node", - "version": "3.14.0" - }, - "container": { - "id": "container-id" - }, - "ecs": { - "version": "1.5.0" - }, - "event": { - "ingested": "2020-08-11T10:03:14.274681Z", - "outcome": "unknown" - }, - "host": { - "architecture": "x64", - "ip": "127.0.0.1", - "os": { - "platform": "darwin" - } - }, - "kubernetes": { - "namespace": "namespace1", - "pod": { - "name": "pod-name", - "uid": "pod-uid" - } - }, - "observer": { - "ephemeral_id": "1c8e1e58-0c93-4a11-9395-687a4fd608b6", - "hostname": "goat", - "id": "6d235bfa-00c6-46d3-83f4-d52a555ac820", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "process": { - "args": [ - "node", - "server.js" - ], - "pid": 1234, - "ppid": 6789, - "title": "node" - }, - "processor": { - "event": "transaction", - "name": "transaction" - }, - "service": { - "environment": "staging", - "framework": { - "name": "Express", - "version": "1.2.3" - }, - "language": { - "name": "ecmascript", - "version": "8" - }, - "name": "1234_service-12a3", - "node": { - "name": "container-id" - }, - "runtime": { - "name": "node", - "version": "8.0.0" - }, - "version": "5.1.3" - }, - "timestamp": { - "us": 1496170422000000 - }, - "trace": { - "id": "85925e55b43f4341aaaaaaaaaaaaaaaa" - }, - "transaction": { - "duration": { - "us": 13980 - }, - "id": "85925e55b43f4341", - "name": "GET /api/types", - "result": "200", - "sampled": false, - "span_count": { - "started": 0 - }, - "type": "request" - }, - "user": { - "email": "foo@bar.com", - "id": "123user", - "name": "foo" - } - }, - { - "@timestamp": "2017-05-30T18:53:42.281Z", - "agent": { - "name": "elastic-node", - "version": "3.14.0" - }, - "container": { - "id": "container-id" - }, - "ecs": { - "version": "1.5.0" - }, - "event": { - "ingested": "2020-08-11T10:03:14.274782Z", - "outcome": "unknown" - }, - "host": { - "architecture": "x64", - "ip": "127.0.0.1", - "os": { - "platform": "darwin" - } - }, - "kubernetes": { - "namespace": "namespace1", - "pod": { - "name": "pod-name", - "uid": "pod-uid" - } - }, - "observer": { - "ephemeral_id": "1c8e1e58-0c93-4a11-9395-687a4fd608b6", - "hostname": "goat", - "id": "6d235bfa-00c6-46d3-83f4-d52a555ac820", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "process": { - "args": [ - "node", - "server.js" - ], - "pid": 1234, - "ppid": 6789, - "title": "node" - }, - "processor": { - "event": "transaction", - "name": "transaction" - }, - "service": { - "environment": "staging", - "framework": { - "name": "Express", - "version": "1.2.3" - }, - "language": { - "name": "ecmascript", - "version": "8" - }, - "name": "1234_service-12a3", - "node": { - "name": "container-id" - }, - "runtime": { - "name": "node", - "version": "8.0.0" - }, - "version": "5.1.3" - }, - "timestamp": { - "us": 1496170422281999 - }, - "trace": { - "id": "85925e55b43f4342aaaaaaaaaaaaaaaa" - }, - "transaction": { - "duration": { - "us": 13980 - }, - "id": "85925e55b43f4342", - "name": "GET /api/types", - "result": "200", - "sampled": true, - "span_count": { - "dropped": 258, - "started": 1 - }, - "type": "request" - }, - "user": { - "email": "foo@bar.com", - "id": "123user", - "name": "foo" - } - }, - { - "@timestamp": "2017-05-30T18:53:27.154Z", - "agent": { - "name": "js-base", - "version": "1.3" - }, - "client": { - "geo": { - "continent_name": "North America", - "country_iso_code": "US", - "location": { - "lat": 37.751, - "lon": -97.822 - } - }, - "ip": "8.8.8.8" - }, - "container": { - "id": "container-id" - }, - "ecs": { - "version": "1.5.0" - }, - "event": { - "ingested": "2020-08-11T10:03:14.218629Z", - "outcome": "unknown" - }, - "host": { - "architecture": "x64", - "ip": "127.0.0.1", - "os": { - "platform": "darwin" - } - }, - "http": { - "request": { - "body": { - "original": { - "additional": { - "bar": 123, - "req": "additional information" - }, - "str": "hello world" - } - }, - "cookies": { - "c1": "v1", - "c2": "v2" - }, - "env": { - "GATEWAY_INTERFACE": "CGI/1.1", - "SERVER_SOFTWARE": "nginx" - }, - "headers": { - "Array": [ - "foo", - "bar", - "baz" - ], - "Content-Type": [ - "text/html" - ], - "Cookie": [ - "c1=v1,c2=v2" - ], - "Some-Other-Header": [ - "foo" - ], - "User-Agent": [ - "Mozilla Chrome Edge" - ] - }, - "method": "post", - "referrer": "http://localhost:8000/test/e2e/", - "socket": { - "encrypted": true, - "remote_address": "8.8.8.8" - } - }, - "response": { - "finished": true, - "headers": { - "Content-Type": [ - "application/json" - ] - }, - "headers_sent": true, - "status_code": 200 - }, - "version": "1.1" - }, - "kubernetes": { - "namespace": "namespace1", - "pod": { - "name": "pod-name", - "uid": "pod-uid" - } - }, - "labels": { - "bool_error": false, - "number_code": 2, - "organization_uuid": "9f0e9d64-c185-4d21-a6f4-4673ed561ec8" - }, - "observer": { - "ephemeral_id": "1c8e1e58-0c93-4a11-9395-687a4fd608b6", - "hostname": "goat", - "id": "6d235bfa-00c6-46d3-83f4-d52a555ac820", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "process": { - "args": [ - "node", - "server.js" - ], - "pid": 1234, - "ppid": 6789, - "title": "node" - }, - "processor": { - "event": "transaction", - "name": "transaction" - }, - "service": { - "environment": "staging", - "framework": { - "name": "Express", - "version": "1.2.3" - }, - "language": { - "name": "ecmascript", - "version": "8" - }, - "name": "serviceabc", - "node": { - "name": "special-name" - }, - "runtime": { - "name": "javascript", - "version": "8.0.0" - }, - "version": "5.1.3" - }, - "source": { - "ip": "8.8.8.8" - }, - "timestamp": { - "us": 1496170407154000 - }, - "trace": { - "id": "945254c567a5417eaaaaaaaaaaaaaaaa" - }, - "transaction": { - "custom": { - "(": "not a valid regex and that is fine", - "and_objects": { - "foo": [ - "bar", - "baz" - ] - }, - "my_key": 1, - "some_other_value": "foo bar" - }, - "duration": { - "us": 32592 - }, - "id": "945254c567a5417e", - "marks": { - "another_mark": { - "some_float": 10, - "some_long": 10 - }, - "navigationTiming": { - "appBeforeBootstrap": 608.9300000000001, - "navigationStart": -21 - } - }, - "name": "GET /api/types", - "page": { - "referer": "http://localhost:8000/test/e2e/", - "url": "http://localhost:8000/test/e2e/general-usecase/" - }, - "result": "success", - "sampled": true, - "span_count": { - "dropped": 2, - "started": 4 - }, - "type": "request" - }, - "url": { - "domain": "www.example.com", - "fragment": "#hash", - "full": "https://www.example.com/p/a/t/h?query=string#hash", - "original": "/p/a/t/h?query=string#hash", - "path": "/p/a/t/h", - "port": 8080, - "query": "?query=string", - "scheme": "https" - }, - "user": { - "email": "foo@example.com", - "id": "99" - }, - "user_agent": { - "device": { - "name": "Other" - }, - "name": "Other", - "original": "Mozilla Chrome Edge" - } - } -] \ No newline at end of file diff --git a/tests/system/kibana.py b/tests/system/kibana.py index d84706d4b1e..c69c8b5601c 100644 --- a/tests/system/kibana.py +++ b/tests/system/kibana.py @@ -9,16 +9,10 @@ def __init__(self, url): self.url = url def create_agent_config(self, service, settings, agent=None, env=None): - resp = self._upsert_agent_config(service, settings, agent=agent, env=env) - result = resp.json()["result"] - assert result == "created", result - return resp + return self._upsert_agent_config(service, settings, agent=agent, env=env) def create_or_update_agent_config(self, service, settings, agent=None, env=None): - resp = self._upsert_agent_config(service, settings, agent=agent, env=env, overwrite=True) - result = resp.json()["result"] - assert result in ("created", "updated"), result - return resp + return self._upsert_agent_config(service, settings, agent=agent, env=env, overwrite=True) def _upsert_agent_config(self, service, settings, agent=None, env=None, overwrite=False): data = { @@ -61,7 +55,7 @@ def list_agent_config(self): } ) assert resp.status_code == 200, resp.status_code - return resp.json() + return resp.json()['configurations'] def delete_agent_config(self, service, env=None): data = { diff --git a/tests/system/libbeat_paths.py b/tests/system/libbeat_paths.py new file mode 100644 index 00000000000..75e7c5b3aa1 --- /dev/null +++ b/tests/system/libbeat_paths.py @@ -0,0 +1,9 @@ +import os.path +import subprocess +import sys + +# Add libbeat/tests/system to the import path. +output = subprocess.check_output(["go", "list", "-m", "-f", "{{.Path}} {{.Dir}}", "all"]).decode("utf-8") +beats_line = [line for line in output.splitlines() if line.startswith("github.com/elastic/beats/")][0] +beats_dir = beats_line.split(" ", 2)[1] +sys.path.append(os.path.join(beats_dir, 'libbeat', 'tests', 'system')) diff --git a/tests/system/metricset.approved.json b/tests/system/metricset.approved.json deleted file mode 100644 index 3d7bd1ffa4d..00000000000 --- a/tests/system/metricset.approved.json +++ /dev/null @@ -1,235 +0,0 @@ -[ - { - "@timestamp": "2017-05-30T18:53:41.364Z", - "agent": { - "name": "elastic-node", - "version": "3.14.0" - }, - "ecs": { - "version": "1.5.0" - }, - "event": { - "ingested": "2020-04-22T14:55:05.425020Z" - }, - "go": { - "memstats": { - "heap": { - "sys": { - "bytes": 6520832.0 - } - } - } - }, - "host": { - "ip": "127.0.0.1" - }, - "labels": { - "tag1": "one", - "tag2": 2 - }, - "observer": { - "ephemeral_id": "8785cbe1-7f89-4279-84c2-6c33979531fb", - "hostname": "ix.lan", - "id": "b0cfe4b7-76c9-4159-95ff-e558db368cbe", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "process": { - "pid": 1234 - }, - "processor": { - "event": "metric", - "name": "metric" - }, - "service": { - "language": { - "name": "ecmascript" - }, - "name": "1234_service-12a3", - "node": { - "name": "node-1" - } - }, - "user": { - "email": "user@mail.com", - "id": "axb123hg", - "name": "logged-in-user" - } - }, - { - "@timestamp": "2017-05-30T18:53:41.366Z", - "agent": { - "name": "elastic-node", - "version": "3.14.0" - }, - "ecs": { - "version": "1.5.0" - }, - "event": { - "ingested": "2020-09-08T15:57:10.396695Z" - }, - "host": { - "ip": "127.0.0.1" - }, - "labels": { - "tag1": "one", - "tag2": 2 - }, - "observer": { - "ephemeral_id": "2f30050f-81e6-491a-a54f-e7d94eec17b5", - "hostname": "simmac.net", - "id": "02f6cb38-c1ce-4382-9478-4c8b4cdbda9c", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "process": { - "pid": 1234 - }, - "processor": { - "event": "metric", - "name": "metric" - }, - "service": { - "language": { - "name": "ecmascript" - }, - "name": "1234_service-12a3", - "node": { - "name": "node-1" - } - }, - "system": { - "process": { - "cgroup": { - "memory": { - "mem": { - "limit": { - "bytes": 2048 - }, - "usage": { - "bytes": 1024 - } - }, - "stats": { - "inactive_file": { - "bytes": 48 - } - } - } - } - } - }, - "user": { - "email": "user@mail.com", - "id": "axb123hg", - "name": "logged-in-user" - } - }, - { - "@timestamp": "2017-05-30T18:53:42.281Z", - "agent": { - "name": "elastic-node", - "version": "3.14.0" - }, - "byte_counter": 1, - "dotted": { - "float": { - "gauge": 6.12 - } - }, - "double_gauge": 3.141592653589793, - "ecs": { - "version": "1.5.0" - }, - "event": { - "ingested": "2020-04-22T14:55:05.368308Z" - }, - "float_gauge": 9.16, - "host": { - "ip": "127.0.0.1" - }, - "integer_gauge": 42767, - "labels": { - "code": 200, - "some": "abc", - "success": true, - "tag1": "one", - "tag2": 2 - }, - "long_gauge": 3147483648.0, - "negative": { - "d": { - "o": { - "t": { - "t": { - "e": { - "d": -1022 - } - } - } - } - } - }, - "observer": { - "ephemeral_id": "8785cbe1-7f89-4279-84c2-6c33979531fb", - "hostname": "ix.lan", - "id": "b0cfe4b7-76c9-4159-95ff-e558db368cbe", - "type": "apm-server", - "version": "8.0.0", - "version_major": 8 - }, - "process": { - "pid": 1234 - }, - "processor": { - "event": "metric", - "name": "metric" - }, - "service": { - "language": { - "name": "ecmascript" - }, - "name": "1234_service-12a3", - "node": { - "name": "node-1" - } - }, - "short_counter": 227, - "span": { - "self_time": { - "count": 1, - "sum": { - "us": 633.288 - } - }, - "subtype": "mysql", - "type": "db" - }, - "transaction": { - "breakdown": { - "count": 12 - }, - "duration": { - "count": 2, - "sum": { - "us": 12 - } - }, - "name": "GET /", - "self_time": { - "count": 2, - "sum": { - "us": 10 - } - }, - "type": "request" - }, - "user": { - "email": "user@mail.com", - "id": "axb123hg", - "name": "logged-in-user" - } - } -] \ No newline at end of file diff --git a/tests/system/spans.approved.json b/tests/system/spans.approved.json index 788cc4795b5..95620eef2af 100644 --- a/tests/system/spans.approved.json +++ b/tests/system/spans.approved.json @@ -6,11 +6,19 @@ "version": "3.14.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "outcome": "unknown" }, + "http": { + "request": { + "method": "GET" + }, + "response": { + "status_code": 200 + } + }, "labels": { "span_tag": "something" }, @@ -47,14 +55,12 @@ "us": 3781 }, "http": { - "method": "get", + "method": "GET", "response": { "status_code": 200 - }, - "url": { - "original": "http://localhost:8000" } }, + "http.url.original": "http://localhost:8000", "id": "0aaaaaaaaaaaaaaa", "name": "SELECT FROM product_types", "stacktrace": [ @@ -108,6 +114,9 @@ }, "transaction": { "id": "945254c567a5417e" + }, + "url": { + "original": "http://localhost:8000" } }, { @@ -122,7 +131,7 @@ "port": 5432 }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "outcome": "unknown" @@ -191,7 +200,7 @@ "version": "3.14.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "outcome": "unknown" @@ -244,7 +253,7 @@ "version": "3.14.0" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "outcome": "unknown" @@ -303,7 +312,7 @@ ] }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "outcome": "unknown" diff --git a/tests/system/test_apikey_cmd.py b/tests/system/test_apikey_cmd.py deleted file mode 100644 index edcac357b82..00000000000 --- a/tests/system/test_apikey_cmd.py +++ /dev/null @@ -1,223 +0,0 @@ -import json -import os -import random -import requests - -from apmserver import BaseTest, integration_test -from helper import wait_until - - -class APIKeyHelper(object): - # APIKeyHelper contains functions related to creating and invalidating API Keys and - # waiting until the actions are completed. - def __init__(self, es_url): - # api_key related urls for configured user (default: apm_server_user) - self.api_key_url = "{}/_security/api_key".format(es_url) - self.privileges_url = "{}/_security/privilege".format(es_url) - - def wait_until_invalidated(self, name=None, id=None): - if not name and not id: - raise Exception("Either name or id must be given") - - def invalidated(): - keys = self.fetch_by_name(name) if name else self.fetch_by_id(id) - for entry in keys: - if not entry["invalidated"]: - return False - return True - wait_until(lambda: invalidated(), name="api keys invalidated") - - def wait_until_created(self, id): - wait_until(lambda: len(self.fetch_by_id(id)) == 1, name="create api key") - - def fetch_by_name(self, name): - resp = requests.get("{}?name={}".format(self.api_key_url, name)) - assert resp.status_code == 200 - assert "api_keys" in resp.json(), resp.json() - return resp.json()["api_keys"] - - def fetch_by_id(self, id): - resp = requests.get("{}?id={}".format(self.api_key_url, id)) - assert resp.status_code == 200, resp.status_code - assert "api_keys" in resp.json(), resp.json() - return resp.json()["api_keys"] - - def create(self, payload): - resp = requests.post(self.api_key_url, - data=payload, - headers={'content-type': 'application/json'}) - assert resp.status_code == 200, resp.status_code - self.wait_until_created(resp.json()["id"]) - return resp.json() - - def invalidate(self, name): - resp = requests.delete(self.api_key_url, - data=json.dumps({'name': name}), - headers={'content-type': 'application/json'}) - self.wait_until_invalidated(name=name) - return resp.json() - - -class APIKeyCommandBaseTest(BaseTest): - apikey_name = "apm_integration_key" - - def config(self): - return { - "elasticsearch_host": self.es_url, - "file_enabled": "false", - "kibana_enabled": "false", - } - - def setUp(self): - super(APIKeyCommandBaseTest, self).setUp() - self.user = os.getenv("ES_USER", "apm_server_user") - password = os.getenv("ES_PASS", "changeme") - self.es_url = self.get_elasticsearch_url(self.user, password) - self.kibana_url = self.get_kibana_url() - # apikey_helper contains helper functions for base actions related to creating and invalidating api keys - self.apikey_helper = APIKeyHelper(self.es_url) - self.render_config_template(**self.config()) - - def subcommand_output(self, *args, **kwargs): - log = self.subcommand(*args, **kwargs) - # command and go test output is combined in log, pull out the command output - command_output = self._trim_golog(log) - return json.loads(command_output) - - def subcommand(self, *args, **kwargs): - logfile = self.beat_name + "-" + str(random.randint(0, 99999)) + "-" + args[0] + ".log" - subcmd = ["apikey"] - subcmd.extend(args) - subcmd.append("--json") - exit_code = self.run_beat(logging_args=[], extra_args=subcmd, output=logfile) - log = self.get_log(logfile) - assert exit_code == kwargs.get('exit_code', 0), log - return log - - @staticmethod - def _trim_golog(log): - # If the command fails it will exit before printing coverage, - # hence why this is conditional. - pos = log.rfind("\nPASS\n") - if pos >= 0: - for trimmed in log[pos+1:].strip().splitlines(): - assert trimmed.split(None, 1)[0] in ("PASS", "coverage:"), trimmed - log = log[:pos] - return log - - def create(self, *args): - apikey = self.subcommand_output("create", "--name", self.apikey_name, *args) - self.apikey_helper.wait_until_created(apikey.get("id")) - return apikey - - def invalidate_by_id(self, id): - invalidated = self.subcommand_output("invalidate", "--id={}".format(id)) - self.apikey_helper.wait_until_invalidated(id=id) - return invalidated - - def invalidate_by_name(self, name): - invalidated = self.subcommand_output("invalidate", "--name", name) - self.apikey_helper.wait_until_invalidated(name=name) - return invalidated - - -@integration_test -class APIKeyCommandTest(APIKeyCommandBaseTest): - """ - Tests the apikey subcommand. - """ - - def setUp(self): - super(APIKeyCommandTest, self).setUp() - invalidated = self.invalidate_by_name(self.apikey_name) - assert invalidated.get("error_count") == 0 - - def test_create_with_settings_override(self): - apikey = self.create( - "-E", "output.elasticsearch.enabled=false", - "-E", "apm-server.api_key.elasticsearch.hosts=[{}]".format(self.es_url) - ) - assert apikey.get("credentials") is not None, apikey - - def test_info_by_id(self): - self.create() - apikey = self.create() - info = self.subcommand_output("info", "--id={}".format(apikey["id"])) - assert len(info.get("api_keys")) == 1, info - assert info["api_keys"][0].get("username") == self.user, info - assert info["api_keys"][0].get("id") == apikey["id"], info - assert info["api_keys"][0].get("name") == apikey["name"], info - assert info["api_keys"][0].get("invalidated") is False, info - - def test_info_by_name(self): - apikey = self.create() - invalidated = self.invalidate_by_id(apikey["id"]) - assert invalidated.get("error_count") == 0 - self.create() - self.create() - - info = self.subcommand_output("info", "--name", self.apikey_name) - # can't test exact number because these tests have side effects - assert len(info.get("api_keys")) > 2, info - - info = self.subcommand_output("info", "--name", self.apikey_name, "--valid-only") - assert len(info.get("api_keys")) == 2, info - - def test_verify_all(self): - apikey = self.create() - result = self.subcommand_output("verify", "--credentials={}".format(apikey["credentials"])) - assert result == {'event:write': True, 'config_agent:read': True, 'sourcemap:write': True}, result - - for privilege in ["ingest", "sourcemap", "agent-config"]: - result = self.subcommand_output( - "verify", "--credentials={}".format(apikey["credentials"]), "--" + privilege) - assert len(result) == 1, result - assert list(result.values())[0] is True - - def test_verify_each(self): - apikey = self.create("--ingest") - result = self.subcommand_output("verify", "--credentials={}".format(apikey["credentials"])) - assert result == {'event:write': True, 'config_agent:read': False, 'sourcemap:write': False}, result - - apikey = self.create("--sourcemap") - result = self.subcommand_output("verify", "--credentials={}".format(apikey["credentials"])) - assert result == {'event:write': False, 'config_agent:read': False, 'sourcemap:write': True}, result - - apikey = self.create("--agent-config") - result = self.subcommand_output("verify", "--credentials={}".format(apikey["credentials"])) - assert result == {'event:write': False, 'config_agent:read': True, 'sourcemap:write': False}, result - - -@integration_test -class APIKeyCommandBadUserTest(APIKeyCommandBaseTest): - - def config(self): - return { - "elasticsearch_host": self.get_elasticsearch_url(user="heartbeat_user", password="changeme"), - "file_enabled": "false", - "kibana_enabled": "false", - } - - def test_create_bad_user(self): - """heartbeat_user doesn't have required cluster privileges, so it can't create keys""" - result = self.subcommand_output("create", "--name", self.apikey_name, exit_code=1) - assert result.get("error") is not None - - -@integration_test -class APIKeyCommandBadUser2Test(APIKeyCommandBaseTest): - - def config(self): - return { - "elasticsearch_host": self.get_elasticsearch_url(user="beats_user", password="changeme"), - "file_enabled": "false", - "kibana_enabled": "false", - } - - def test_create_bad_user(self): - """beats_user does have required cluster privileges, but not APM application privileges, - so it can't create keys - """ - result = self.subcommand_output("create", "--name", self.apikey_name, exit_code=1) - assert result.get("error") is not None, result - assert "beats_user is missing the following requested privilege(s):" in result.get("error"), result diff --git a/tests/system/test_auth.py b/tests/system/test_auth.py deleted file mode 100644 index 47433b0768f..00000000000 --- a/tests/system/test_auth.py +++ /dev/null @@ -1,314 +0,0 @@ -import base64 -import json -import os -import requests - -from apmserver import ServerBaseTest, ElasticTest -from apmserver import TimeoutError, integration_test -from test_apikey_cmd import APIKeyHelper -from helper import wait_until - - -def headers(auth=None, content_type='application/x-ndjson'): - h = {'content-type': content_type} - if auth is not None: - auth_headers = {'Authorization': auth} - auth_headers.update(h) - return auth_headers - return h - - -class TestAccessDefault(ServerBaseTest): - """ - Unsecured endpoints - """ - - def test_full_access(self): - """ - Test that authorized API Key is not accepted when API Key usage is disabled - """ - events = self.get_event_payload() - - # access without token allowed - resp = requests.post(self.intake_url, data=events, headers=headers()) - assert resp.status_code == 202, resp.status_code - - # access with any Bearer token allowed - resp = requests.post(self.intake_url, data=events, headers=headers(auth="Bearer 1234")) - assert resp.status_code == 202, resp.status_code - - # access with any API Key allowed - resp = requests.post(self.intake_url, data=events, headers=headers(auth="")) - assert resp.status_code == 202, resp.status_code - - -class TestAccessWithSecretToken(ServerBaseTest): - def config(self): - cfg = super(TestAccessWithSecretToken, self).config() - cfg.update({"secret_token": "1234"}) - return cfg - - def test_backend_intake(self): - """ - Test that access works with token - """ - - events = self.get_event_payload() - - r = requests.post(self.intake_url, data=events, headers=headers("")) - assert r.status_code == 401, r.status_code - - r = requests.post(self.intake_url, data=events, headers=headers('Bearer 1234')) - assert r.status_code == 202, r.status_code - - -@integration_test -class APIKeyBaseTest(ElasticTest): - def setUp(self): - # application - self.application = "apm" - - # apm privileges - self.privilege_agent_config = "config_agent:read" - self.privilege_event = "event:write" - self.privilege_sourcemap = "sourcemap:write" - self.privileges = { - "agentConfig": self.privilege_agent_config, - "event": self.privilege_event, - "sourcemap": self.privilege_sourcemap - } - self.privileges_all = list(self.privileges.values()) - self.privilege_any = "*" - - # resources - self.resource_any = ["*"] - self.resource_backend = ["-"] - - user = os.getenv("ES_USER", "apm_server_user") - password = os.getenv("ES_PASS", "changeme") - self.apikey_name = "apm-systemtest" - self.apikey = APIKeyHelper(self.get_elasticsearch_url(user, password)) - - # delete all existing api_keys with defined name of current user - self.apikey.invalidate(self.apikey_name) - # delete all existing application privileges to ensure they can be created for current user - for p in self.privileges.keys(): - url = "{}/{}/{}".format(self.apikey.privileges_url, self.application, p) - requests.delete(url) - wait_until(lambda: requests.get(url).status_code == 404) - - super(APIKeyBaseTest, self).setUp() - - def create_api_key_header(self, privileges, resources, application="apm"): - return "ApiKey {}".format(self.create_apm_api_key(privileges, resources, application=application)) - - def create_apm_api_key(self, privileges, resources, application="apm"): - payload = json.dumps({ - "name": self.apikey_name, - "role_descriptors": { - self.apikey_name + "role_desc": { - "applications": [ - {"application": application, "privileges": privileges, "resources": resources}]}}}) - resp = self.apikey.create(payload) - enc = "utf-8" - return str(base64.b64encode("{}:{}".format(resp["id"], resp["api_key"]).encode(enc)), enc) - - -@integration_test -class TestAPIKeyCache(APIKeyBaseTest): - def config(self): - cfg = super(TestAPIKeyCache, self).config() - cfg.update({"api_key_enabled": True, "api_key_limit": 5}) - return cfg - - def test_cache_full(self): - """ - Test that authorized API Key is not accepted when cache is full - api_key.limit: number of unique API Keys per minute => cache size - """ - key1 = self.create_api_key_header([self.privilege_event], self.resource_any) - key2 = self.create_api_key_header([self.privilege_event], self.resource_any) - - def assert_intake(api_key, authorized): - resp = requests.post(self.intake_url, data=self.get_event_payload(), headers=headers(api_key)) - if authorized: - assert resp.status_code != 401, "token: {}, status_code: {}".format(api_key, resp.status_code) - else: - assert resp.status_code == 401, "token: {}, status_code: {}".format(api_key, resp.status_code) - - # fill cache up until one spot - for i in range(4): - assert_intake("ApiKey xyz{}".format(i), authorized=False) - - # allow for authorized api key - assert_intake(key1, True) - # hit cache size - assert_intake(key2, False) - # still allow already cached api key - assert_intake(key1, True) - - -@integration_test -class TestAPIKeyWithInvalidESConfig(APIKeyBaseTest): - def config(self): - cfg = super(TestAPIKeyWithInvalidESConfig, self).config() - cfg.update({"api_key_enabled": True, "api_key_es": "localhost:9999"}) - return cfg - - def test_backend_intake(self): - """ - API Key cannot be verified when invalid Elasticsearch instance configured - """ - name = "system_test_invalid_es" - key = self.create_api_key_header([self.privilege_event], self.resource_any) - resp = requests.post(self.intake_url, data=self.get_event_payload(), headers=headers(key)) - assert resp.status_code == 401, "token: {}, status_code: {}".format(key, resp.status_code) - - -@integration_test -class TestAPIKeyWithESConfig(APIKeyBaseTest): - def config(self): - cfg = super(TestAPIKeyWithESConfig, self).config() - cfg.update({"api_key_enabled": True, "api_key_es": self.get_elasticsearch_url()}) - return cfg - - def test_backend_intake(self): - """ - Use dedicated Elasticsearch configuration for API Key validation - """ - key = self.create_api_key_header([self.privilege_event], self.resource_any) - resp = requests.post(self.intake_url, data=self.get_event_payload(), headers=headers(key)) - assert resp.status_code == 202, "token: {}, status_code: {}".format(key, resp.status_code) - - -@integration_test -class TestAccessWithAuthorization(APIKeyBaseTest): - - def setUp(self): - super(TestAccessWithAuthorization, self).setUp() - - self.api_key_privileges_all_resource_any = self.create_api_key_header(self.privileges_all, self.resource_any) - self.api_key_privileges_all_resource_backend = self.create_api_key_header( - self.privileges_all, self.resource_backend) - self.api_key_privilege_any_resource_any = self.create_api_key_header(self.privilege_any, self.resource_any) - self.api_key_privilege_any_resource_backend = self.create_api_key_header( - self.privilege_any, self.resource_backend) - - self.api_key_privilege_event = self.create_api_key_header([self.privilege_event], self.resource_any) - self.api_key_privilege_config = self.create_api_key_header([self.privilege_agent_config], self.resource_any) - self.api_key_privilege_sourcemap = self.create_api_key_header([self.privilege_sourcemap], self.resource_any) - - self.api_key_invalid_application = self.create_api_key_header( - self.privileges_all, self.resource_any, application="foo") - self.api_key_invalid_privilege = self.create_api_key_header(["foo"], self.resource_any) - self.api_key_invalid_resource = self.create_api_key_header(self.privileges_all, "foo") - - self.authorized_keys = ["Bearer 1234", - self.api_key_privileges_all_resource_any, self.api_key_privileges_all_resource_backend, - self.api_key_privilege_any_resource_any, self.api_key_privilege_any_resource_backend] - - self.unauthorized_keys = ['', 'Bearer ', 'Bearer wrongtoken', 'Wrongbearer 1234', - self.api_key_invalid_privilege, self.api_key_invalid_resource, "ApiKey nonexisting"] - - def config(self): - cfg = super(TestAccessWithAuthorization, self).config() - cfg.update({"secret_token": "1234", "api_key_enabled": True, "enable_rum": True, - "kibana_enabled": "true", "kibana_host": self.get_kibana_url()}) - return cfg - - def test_root(self): - """ - Test authorization logic for root endpoint - """ - url = self.root_url - - for token in self.unauthorized_keys: - resp = requests.get(url, headers=headers(token)) - assert resp.status_code == 200, "token: {}, status_code: {}".format(token, resp.status_code) - assert resp.text == '', "token: {}, response: {}".format(token, resp.content) - - keys_one_privilege = [self.api_key_privilege_config, - self.api_key_privilege_sourcemap, self.api_key_privilege_event] - for token in self.authorized_keys+keys_one_privilege: - resp = requests.get(url, headers=headers(token)) - assert resp.status_code == 200, "token: {}, status_code: {}".format(token, resp.status_code) - assert resp.content != '', "token: {}, response: {}".format(token, resp.content) - for token in ["build_date", "build_sha", "version"]: - assert token in resp.json(), "token: {}, response: {}".format(token, resp.content) - - def test_backend_intake(self): - """ - Test authorization logic for backend Intake endpoint - """ - url = self.intake_url - events = self.get_event_payload() - - for token in self.authorized_keys+[self.api_key_privilege_event]: - resp = requests.post(url, data=events, headers=headers(token)) - assert resp.status_code == 202, "token: {}, status_code: {}".format(token, resp.status_code) - - for token in self.unauthorized_keys+[self.api_key_privilege_config, self.api_key_privilege_sourcemap]: - resp = requests.post(url, data=events, headers=headers(token)) - assert resp.status_code == 401, "token: {}, status_code: {}".format(token, resp.status_code) - - def test_rum_intake(self): - """ - Test authorization logic for RUM Intake endpoint. - """ - url = self.rum_intake_url - events = self.get_event_payload() - - # Endpoint is not secured, all keys are expected to be allowed. - for token in self.authorized_keys + self.unauthorized_keys: - resp = requests.post(url, data=events, headers=headers(token)) - assert resp.status_code != 401, "token: {}, status_code: {}".format(token, resp.status_code) - - def test_agent_config(self): - """ - Test authorization logic for backend Agent Configuration endpoint - """ - url = self.agent_config_url - - for token in self.authorized_keys+[self.api_key_privilege_config]: - resp = requests.get(url, - params={"service.name": "myservice"}, - headers=headers(token, content_type="application/json")) - assert resp.status_code == 200, "token: {}, status_code: {}".format(token, resp.status_code) - - for token in self.unauthorized_keys+[self.api_key_privilege_event, self.api_key_privilege_sourcemap]: - resp = requests.get(url, headers=headers(token, content_type="application/json")) - assert resp.status_code == 401, "token: {}, status_code: {}".format(token, resp.status_code) - - def test_rum_agent_config(self): - """ - Test authorization logic for RUM Agent Configuration endpoint - """ - url = self.rum_agent_config_url - - # Endpoint is not secured, all keys are expected to be allowed. - for token in self.authorized_keys + self.unauthorized_keys: - resp = requests.get(url, headers=headers(token, content_type="application/json")) - assert resp.status_code != 401, "token: {}, status_code: {}".format(token, resp.status_code) - - def test_sourcemap(self): - """ - Test authorization logic for Sourcemap upload endpoint - """ - def upload(token): - f = open(self._beat_path_join('testdata', 'sourcemap', 'bundle_no_mapping.js.map')) - resp = requests.post(self.sourcemap_url, - headers=headers(token, content_type=None), - files={'sourcemap': f}, - data={'service_version': '1.0.1', - 'bundle_filepath': 'mapping.js.map', - 'service_name': 'apm-agent-js' - }) - return resp - - for token in self.unauthorized_keys+[self.api_key_privilege_config, self.api_key_privilege_event]: - resp = upload(token) - assert resp.status_code == 401, "token: {}, status_code: {}".format(token, resp.status_code) - - for token in self.authorized_keys+[self.api_key_privilege_sourcemap]: - resp = upload(token) - assert resp.status_code == 202, "token: {}, status_code: {}".format(token, resp.status_code) diff --git a/tests/system/test_export.py b/tests/system/test_export.py index 3a2da401ab0..ee4ef8f61ad 100644 --- a/tests/system/test_export.py +++ b/tests/system/test_export.py @@ -12,90 +12,6 @@ class ExportCommandTest(SubCommandTest): register_pipeline_disabled = True -@integration_test -class ExportConfigDefaultTest(ExportCommandTest): - """ - Test export config subcommand. - """ - - def start_args(self): - return { - "extra_args": ["export", "config"], - "logging_args": None, - } - - def test_export_config(self): - """ - Test export default config - """ - config = yaml.load(self.command_output, Loader=Loader) - # logging settings - self.assertDictEqual( - {"metrics": {"enabled": False}, 'files': {'rotateeverybytes': 10485760}, }, config["logging"] - ) - - # template settings - self.assertDictEqual( - { - "template": { - "settings": { - "_source": {"enabled": True}, - "index": { - "codec": "best_compression", - "mapping": { - "total_fields": {"limit": 2000} - }, - "number_of_shards": 1, - }, - }, - }, - }, config["setup"]) - - -@integration_test -class ExportConfigTest(ExportCommandTest): - """ - Test export config subcommand. - """ - - def start_args(self): - return { - "extra_args": ["export", "config", - "-E", "logging.metrics.enabled=true", - "-E", "setup.template.settings.index.mapping.total_fields.limit=5", - ], - "logging_args": None, - } - - def test_export_config(self): - """ - Test export customized config - """ - config = yaml.load(self.command_output, Loader=Loader) - # logging settings - assert "metrics" in config["logging"] - self.assertDictEqual( - {"enabled": True}, config["logging"]["metrics"] - ) - - # template settings - self.assertDictEqual( - { - "template": { - "settings": { - "_source": {"enabled": True}, - "index": { - "codec": "best_compression", - "mapping": { - "total_fields": {"limit": 5} - }, - "number_of_shards": 1, - }, - }, - }, - }, config["setup"]) - - @integration_test class TestExportTemplate(ExportCommandTest): """ diff --git a/tests/system/test_instrumentation.py b/tests/system/test_instrumentation.py deleted file mode 100644 index 42bd821ffdc..00000000000 --- a/tests/system/test_instrumentation.py +++ /dev/null @@ -1,177 +0,0 @@ -from datetime import datetime, timedelta -import os -import time -import requests - -from apmserver import integration_test -from apmserver import ElasticTest -from test_auth import APIKeyBaseTest -from helper import wait_until -from es_helper import index_profile, index_transaction - -# Set ELASTIC_APM_API_REQUEST_TIME to a short duration -# to speed up the time taken for self-tracing events -# to be ingested. -os.environ["ELASTIC_APM_API_REQUEST_TIME"] = "1s" - - -# Exercises the DEPRECATED apm-server.instrumentation.* config -# When updating this file, consider test_libbeat_instrumentation.py -# Remove in 8.0 - -def get_instrumentation_event(es, index): - query = {"term": {"service.name": "apm-server"}} - return es.count(index=index, body={"query": query})['count'] > 0 - - -@integration_test -class TestInMemoryTracingAPIKey(APIKeyBaseTest): - def config(self): - cfg = super(TestInMemoryTracingAPIKey, self).config() - cfg.update({ - "api_key_enabled": True, - "instrumentation_enabled": "true", - }) - return cfg - - def test_api_key_auth(self): - """Self-instrumentation using in-memory listener without configuring an APIKey""" - - # Send a POST request to the intake API URL. Doesn't matter what the - # request body contents are, as the request will fail due to lack of - # authorization. We just want to trigger the server's in-memory tracing, - # and test that the in-memory tracer works without having an api_key configured - r = requests.post(self.intake_url, data="invalid") - self.assertEqual(401, r.status_code) - - wait_until(lambda: get_instrumentation_event(self.es, index_transaction), - name='have in-memory instrumentation documents without api_key') - - -@integration_test -class TestExternalTracingAPIKey(APIKeyBaseTest): - def config(self): - cfg = super(TestExternalTracingAPIKey, self).config() - api_key = self.create_apm_api_key([self.privilege_event], self.resource_any) - cfg.update({ - "api_key_enabled": True, - "instrumentation_enabled": "true", - "instrumentation_api_key": api_key, - # Set instrumentation.hosts to the same APM Server. - # - # Explicitly specifying hosts configures the tracer to - # behave as if it's sending to an external server, rather - # than using the in-memory transport that bypasses auth. - "instrumentation_host": APIKeyBaseTest.host, - }) - return cfg - - def test_api_key_auth(self): - # Send a POST request to the intake API URL. Doesn't matter what the - # request body contents are, as the request will fail due to lack of - # authorization. We just want to trigger the server's tracing. - r = requests.post(self.intake_url, data="invalid") - self.assertEqual(401, r.status_code) - - wait_until(lambda: get_instrumentation_event(self.es, index_transaction), - name='have external server instrumentation documents with api_key') - - -@integration_test -class TestExternalTracingSecretToken(ElasticTest): - def config(self): - cfg = super(TestExternalTracingSecretToken, self).config() - secret_token = "abc123" - cfg.update({ - "secret_token": secret_token, - "instrumentation_enabled": "true", - "instrumentation_secret_token": secret_token, - # Set instrumentation.hosts to the same APM Server. - # - # Explicitly specifying hosts configures the tracer to - # behave as if it's sending to an external server, rather - # than using the in-memory transport that bypasses auth. - "instrumentation_host": ElasticTest.host, - }) - return cfg - - def test_secret_token_auth(self): - # Send a POST request to the intake API URL. Doesn't matter what the - # request body contents are, as the request will fail due to lack of - # authorization. We just want to trigger the server's tracing. - r = requests.post(self.intake_url, data="invalid") - self.assertEqual(401, r.status_code) - - wait_until(lambda: get_instrumentation_event(self.es, index_transaction), - name='have external server instrumentation documents with secret_token') - - -class ProfilingTest(ElasticTest): - def metric_fields(self): - metric_fields = set() - rs = self.es.search(index=index_profile) - for hit in rs["hits"]["hits"]: - profile = hit["_source"]["profile"] - metric_fields.update((k for (k, v) in profile.items() if type(v) is int)) - return metric_fields - - def wait_for_profile(self): - def cond(): - response = self.es.count(index=index_profile, body={"query": {"term": {"processor.name": "profile"}}}) - return response['count'] != 0 - wait_until(cond, max_timeout=10, name="waiting for profile") - - -@integration_test -class TestCPUProfiling(ProfilingTest): - config_overrides = { - "instrumentation_enabled": "true", - "profiling_cpu_enabled": "true", - "profiling_cpu_interval": "1s", - "profiling_cpu_duration": "5s", - } - - def test_self_profiling(self): - """CPU profiling enabled""" - - def create_load(): - payload_path = self.get_payload_path("transactions_spans.ndjson") - with open(payload_path) as f: - requests.post(self.intake_url, data=f, headers={'content-type': 'application/x-ndjson'}) - - # Wait for profiling to begin, and then start sending data - # to the server to create some CPU load. - - time.sleep(1) - start = datetime.now() - while datetime.now()-start < timedelta(seconds=5): - create_load() - self.wait_for_profile() - - expected_metric_fields = set([u"cpu.ns", u"samples.count", u"duration"]) - metric_fields = self.metric_fields() - self.assertEqual(metric_fields, expected_metric_fields) - - -@integration_test -class TestHeapProfiling(ProfilingTest): - config_overrides = { - "instrumentation_enabled": "true", - "profiling_heap_enabled": "true", - "profiling_heap_interval": "1s", - } - - def test_self_profiling(self): - """Heap profiling enabled""" - - time.sleep(1) - self.wait_for_profile() - - expected_metric_fields = set([ - u"alloc_objects.count", - u"inuse_objects.count", - u"alloc_space.bytes", - u"inuse_space.bytes", - ]) - metric_fields = self.metric_fields() - self.assertEqual(metric_fields, expected_metric_fields) diff --git a/tests/system/test_integration.py b/tests/system/test_integration.py index 0c6c895d3b9..f32495353df 100644 --- a/tests/system/test_integration.py +++ b/tests/system/test_integration.py @@ -78,14 +78,6 @@ def test_load_docs_with_template_and_add_error(self): self.check_backend_error_sourcemap(index_error, count=4) - def test_load_docs_with_template_and_add_metricset(self): - self.load_docs_with_template(self.get_metricset_payload_path(), self.intake_url, 'metric', 3) - self.assert_no_logged_warnings() - - # compare existing ES documents for metricsets with new ones - metricset_docs = self.wait_for_events('metric', 3, index=index_metric) - self.approve_docs('metricset', metricset_docs) - @integration_test class EnrichEventIntegrationTest(ClientSideElasticTest): @@ -96,14 +88,14 @@ def test_backend_error(self): self.backend_intake_url, 'error', 4) - self.check_library_frames({"true": 1, "false": 1, "empty": 2}, index_error) + self.check_library_frames({"true": 1, "false": 0, "empty": 3}, index_error) def test_rum_error(self): self.load_docs_with_template(self.get_error_payload_path(), self.intake_url, 'error', 1) - self.check_library_frames({"true": 5, "false": 1, "empty": 0}, index_error) + self.check_library_frames({"true": 5, "false": 0, "empty": 1}, index_error) def test_backend_transaction(self): # for backend events library_frame information should not be changed, @@ -119,7 +111,7 @@ def test_rum_transaction(self): self.intake_url, 'transaction', 2) - self.check_library_frames({"true": 1, "false": 1, "empty": 0}, index_span) + self.check_library_frames({"true": 1, "false": 0, "empty": 1}, index_span) def test_enrich_backend_event(self): self.load_docs_with_template(self.get_backend_transaction_payload_path(), @@ -302,51 +294,3 @@ def test_expvar_exists(self): """expvar enabled, should 200""" r = self.get_debug_vars() assert r.status_code == 200, r.status_code - - -@integration_test -class MetricsIntegrationTest(ElasticTest): - def test_metric_doc(self): - self.load_docs_with_template(self.get_metricset_payload_path(), self.intake_url, 'metric', 3) - mappings = self.es.indices.get_field_mapping( - index=index_metric, fields="system.process.cpu.total.norm.pct") - expected_type = "scaled_float" - doc = mappings[self.ilm_index(index_metric)]["mappings"] - actual_type = doc["system.process.cpu.total.norm.pct"]["mapping"]["pct"]["type"] - assert expected_type == actual_type, "want: {}, got: {}".format(expected_type, actual_type) - - -@integration_test -class ExperimentalBaseTest(ElasticTest): - def check_experimental_key_indexed(self, experimental): - self.load_docs_with_template(self.get_payload_path("experimental.ndjson"), - self.intake_url, 'transaction', 2) - wait_until(lambda: self.log_contains("events have been published"), max_timeout=10) - time.sleep(2) - self.assert_no_logged_warnings() - - for idx in [index_transaction, index_span, index_error]: - # ensure documents exist - rs = self.es.search(index=idx) - assert rs['hits']['total']['value'] == 1 - - # check whether or not top level key `experimental` has been indexed - rs = self.es.search(index=idx, body={"query": {"exists": {"field": 'experimental'}}}) - ct = 1 if experimental else 0 - assert rs['hits']['total']['value'] == ct - - -@integration_test -class ProductionModeTest(ExperimentalBaseTest): - config_overrides = {"mode": "production", "queue_flush": 2048} - - def test_experimental_key_indexed(self): - self.check_experimental_key_indexed(False) - - -@integration_test -class ExperimentalModeTest(ExperimentalBaseTest): - config_overrides = {"mode": "experimental", "queue_flush": 2048} - - def test_experimental_key_indexed(self): - self.check_experimental_key_indexed(True) diff --git a/tests/system/test_integration_acm.py b/tests/system/test_integration_acm.py deleted file mode 100644 index e840b6805dd..00000000000 --- a/tests/system/test_integration_acm.py +++ /dev/null @@ -1,330 +0,0 @@ -import time -from urllib.parse import urljoin -import uuid -import requests - -from apmserver import ElasticTest, integration_test - - -class AgentConfigurationTest(ElasticTest): - - def config(self): - cfg = super(AgentConfigurationTest, self).config() - cfg.update({ - "kibana_host": self.get_kibana_url(), - "logging_json": "true", - "kibana_enabled": "true", - "acm_cache_expiration": "1s" - }) - cfg.update(self.config_overrides) - return cfg - - def create_service_config(self, settings, name, agent="python", env=None): - return self.kibana.create_agent_config(name, settings, agent=agent, env=env) - - def update_service_config(self, settings, name, env=None): - res = self.kibana.create_or_update_agent_config(name, settings, env=env) - assert res.json()["result"] == "updated" - - -@integration_test -class AgentConfigurationIntegrationTest(AgentConfigurationTest): - def test_config_requests(self): - service_name = uuid.uuid4().hex - service_env = "production" - bad_service_env = "notreal" - - expect_log = [] - - # missing service.name - r1 = requests.get(self.agent_config_url, - headers={"Content-Type": "application/json"}, - ) - assert r1.status_code == 400, r1.status_code - expect_log.append({ - "level": "error", - "message": "invalid query", - "error": "service.name is required", - "response_code": 400, - }) - - # no configuration for service - r2 = requests.get(self.agent_config_url, - params={"service.name": service_name + "_cache_bust"}, - headers={"Content-Type": "application/json"}, - ) - assert r2.status_code == 200, r2.status_code - expect_log.append({ - "level": "info", - "message": "request ok", - "response_code": 200, - }) - self.assertEqual({}, r2.json()) - - self.create_service_config({"transaction_sample_rate": "0.05"}, service_name) - - # yes configuration for service - r3 = requests.get(self.agent_config_url, - params={"service.name": service_name}, - headers={"Content-Type": "application/json"}) - assert r3.status_code == 200, r3.status_code - # TODO (gr): validate Cache-Control header - https://github.com/elastic/apm-server/issues/2438 - expect_log.append({ - "level": "info", - "message": "request ok", - "response_code": 200, - }) - self.assertEqual({"transaction_sample_rate": "0.05"}, r3.json()) - - # not modified on re-request - r3_again = requests.get(self.agent_config_url, - params={"service.name": service_name}, - headers={ - "Content-Type": "application/json", - "If-None-Match": r3.headers["Etag"], - }) - assert r3_again.status_code == 304, r3_again.status_code - expect_log.append({ - "level": "info", - "message": "not modified", - "response_code": 304, - }) - - self.create_service_config( - {"transaction_sample_rate": "0.15"}, service_name, env=service_env) - - # yes configuration for service+environment - r4 = requests.get(self.agent_config_url, - params={ - "service.name": service_name, - "service.environment": service_env, - }, - headers={"Content-Type": "application/json"}) - assert r4.status_code == 200, r4.status_code - self.assertEqual({"transaction_sample_rate": "0.15"}, r4.json()) - expect_log.append({ - "level": "info", - "message": "request ok", - "response_code": 200, - }) - - # not modified on re-request - r4_again = requests.get(self.agent_config_url, - params={ - "service.name": service_name, - "service.environment": service_env, - }, - headers={ - "Content-Type": "application/json", - "If-None-Match": r4.headers["Etag"], - }) - assert r4_again.status_code == 304, r4_again.status_code - expect_log.append({ - "level": "info", - "message": "not modified", - "response_code": 304, - }) - - self.update_service_config( - {"transaction_sample_rate": "0.99"}, service_name, env=service_env) - - # TODO (gr): remove when cache can be disabled via config - # wait for cache to purge - time.sleep(1.1) # sleep much more than acm_cache_expiration to reduce flakiness - - r4_post_update = requests.get(self.agent_config_url, - params={ - "service.name": service_name, - "service.environment": service_env, - }, - headers={ - "Content-Type": "application/json", - "If-None-Match": r4.headers["Etag"], - }) - assert r4_post_update.status_code == 200, r4_post_update.status_code - self.assertEqual({"transaction_sample_rate": "0.99"}, r4_post_update.json()) - expect_log.append({ - "level": "info", - "message": "request ok", - "response_code": 200, - }) - - # configuration for service+environment (all includes non existing) - r5 = requests.get(self.agent_config_url, - params={ - "service.name": service_name, - "service.environment": bad_service_env, - }, - headers={"Content-Type": "application/json"}) - assert r5.status_code == 200, r5.status_code - expect_log.append({ - "level": "info", - "message": "request ok", - "response_code": 200, - }) - self.assertEqual({"transaction_sample_rate": "0.05"}, r5.json()) - - config_request_logs = list(self.logged_requests(url="/config/v1/agents")) - assert len(config_request_logs) == len(expect_log) - for want, got in zip(expect_log, config_request_logs): - assert set(want).issubset(got) - - def test_rum_disabled(self): - r = requests.get(self.rum_agent_config_url, - params={ - "service.name": "rum-service", - }, - headers={"Content-Type": "application/json"} - ) - assert r.status_code == 403 - assert "RUM endpoint is disabled" in r.json().get('error'), r.json() - - -@integration_test -class AgentConfigurationKibanaDownIntegrationTest(ElasticTest): - config_overrides = { - "logging_json": "true", - "secret_token": "supersecret", - "kibana_enabled": "true", - "kibana_host": "unreachablehost" - } - - def test_config_requests(self): - r1 = requests.get(self.agent_config_url, - headers={ - "Content-Type": "application/json", - }) - assert r1.status_code == 401, r1.status_code - - r2 = requests.get(self.agent_config_url, - params={"service.name": "foo"}, - headers={ - "Content-Type": "application/json", - "Authorization": "Bearer " + self.config_overrides["secret_token"], - }) - assert r2.status_code == 503, r2.status_code - - config_request_logs = list(self.logged_requests(url="/config/v1/agents")) - assert len(config_request_logs) == 2, config_request_logs - assert set({ - "level": "error", - "message": "unauthorized", - "error": "unauthorized", - "response_code": 401, - }).issubset(config_request_logs[0]) - assert set({ - "level": "error", - "message": "unable to retrieve connection to Kibana", - "response_code": 503, - }).issubset(config_request_logs[1]) - - -@integration_test -class AgentConfigurationKibanaDisabledIntegrationTest(ElasticTest): - config_overrides = { - "logging_json": "true", - "kibana_enabled": "false", - } - - def test_log_kill_switch_active(self): - r = requests.get(self.agent_config_url, - headers={ - "Content-Type": "application/json", - }) - assert r.status_code == 403, r.status_code - config_request_logs = list(self.logged_requests(url="/config/v1/agents")) - assert set({ - "level": "error", - "message": "forbidden request", - "response_code": 403, - }).issubset(config_request_logs[0]) - - -@integration_test -class RumAgentConfigurationIntegrationTest(AgentConfigurationTest): - config_overrides = { - "enable_rum": "true", - } - - def test_rum(self): - service_name = "rum-service" - self.create_service_config({"transaction_sample_rate": "0.2"}, service_name, agent="rum-js") - - r1 = requests.get(self.rum_agent_config_url, - params={"service.name": service_name}, - headers={"Content-Type": "application/json"}) - - assert r1.status_code == 200 - assert r1.json() == {'transaction_sample_rate': '0.2'} - etag = r1.headers["Etag"].replace('"', '') # RUM will send it without double quotes - - r2 = requests.get(self.rum_agent_config_url, - params={"service.name": service_name, "ifnonematch": etag}, - headers={"Content-Type": "application/json"}) - assert r2.status_code == 304 - - def test_rum_current_name(self): - service_name = "rum-service" - self.create_service_config({"transaction_sample_rate": "0.2"}, service_name, agent="js-base") - - r1 = requests.get(self.rum_agent_config_url, - params={"service.name": service_name}, - headers={"Content-Type": "application/json"}) - - assert r1.status_code == 200 - assert r1.json() == {'transaction_sample_rate': '0.2'} - etag = r1.headers["Etag"].replace('"', '') # RUM will send it without double quotes - - r2 = requests.get(self.rum_agent_config_url, - params={"service.name": service_name, "ifnonematch": etag}, - headers={"Content-Type": "application/json"}) - assert r2.status_code == 304 - - def test_backend_after_rum(self): - service_name = "backend-service" - self.create_service_config({"transaction_sample_rate": "0.3"}, service_name) - - r1 = requests.get(self.rum_agent_config_url, - params={"service.name": service_name}, - headers={"Content-Type": "application/json"}) - - assert r1.status_code == 200, r1.status_code - assert r1.json() == {}, r1.json() - - r2 = requests.get(self.agent_config_url, - params={"service.name": service_name}, - headers={"Content-Type": "application/json"}) - - assert r2.status_code == 200, r2.status_code - assert r2.json() == {"transaction_sample_rate": "0.3"}, r2.json() - - def test_rum_after_backend(self): - service_name = "backend-service" - self.create_service_config({"transaction_sample_rate": "0.3"}, service_name) - - r1 = requests.get(self.agent_config_url, - params={"service.name": service_name}, - headers={"Content-Type": "application/json"}) - - assert r1.status_code == 200, r1.status_code - assert r1.json() == {"transaction_sample_rate": "0.3"}, r1.json() - - r2 = requests.get(self.rum_agent_config_url, - params={"service.name": service_name}, - headers={"Content-Type": "application/json"}) - - assert r2.status_code == 200, r2.status_code - assert r2.json() == {}, r2.json() - - def test_all_agents(self): - service_name = "any-service" - self.create_service_config( - {"transaction_sample_rate": "0.4", "capture_body": "all"}, service_name, agent="") - - r1 = requests.get(self.rum_agent_config_url, - params={"service.name": service_name}, - headers={"Content-Type": "application/json"}) - - assert r1.status_code == 200, r1.status_code - # only return settings applicable to RUM - assert r1.json() == {"transaction_sample_rate": "0.4"}, r1.json() diff --git a/tests/system/test_integration_sourcemap.py b/tests/system/test_integration_sourcemap.py deleted file mode 100644 index 667e654f105..00000000000 --- a/tests/system/test_integration_sourcemap.py +++ /dev/null @@ -1,313 +0,0 @@ -import time -from urllib.parse import urlparse, urlunparse -import requests -import json - -from apmserver import integration_test -from apmserver import ClientSideElasticTest -from test_auth import APIKeyHelper -from helper import wait_until -from es_helper import index_smap, index_metric, index_transaction, index_error, index_span, index_onboarding, index_name - - -class BaseSourcemapTest(ClientSideElasticTest): - def upload_sourcemap(self, - file_name='bundle.js.map', - bundle_filepath='http://localhost:8000/test/e2e/general-usecase/bundle.js.map', - service_name='apm-agent-js', - service_version='1.0.1', - expected_ct=1, - status_code=202): - path = self._beat_path_join('testdata', 'sourcemap', file_name) - with open(path) as f: - r = requests.post(self.sourcemap_url, - files={'sourcemap': f}, - data={'service_version': service_version, - 'bundle_filepath': bundle_filepath, - 'service_name': service_name}) - assert r.status_code == status_code, r.status_code - if status_code < 400: - self.wait_for_events('sourcemap', expected_ct, index=index_smap) - - def split_url(self, cfg): - url = urlparse(cfg["elasticsearch_host"]) - url_parts = list(url) - url_parts[1] = url.netloc.split('@')[-1] - return {"host": urlunparse(url_parts), "username": url.username, "password": url.password} - - -@integration_test -class SourcemappingIntegrationTest(BaseSourcemapTest): - def test_backend_error(self): - # ensure source mapping is not applied to backend events - # load event for which a sourcemap would be applied when sent to rum endpoint, - # and send against backend endpoint. - - self.upload_sourcemap() - self.load_docs_with_template(self.get_error_payload_path(), - self.backend_intake_url, - 'error', - 1) - self.assert_no_logged_warnings() - self.check_backend_error_sourcemap(index_error) - - def test_duplicated_sourcemap_warning(self): - self.upload_sourcemap() - self.upload_sourcemap(expected_ct=2) - assert self.log_contains( - "Overriding sourcemap"), "A log should be written when a sourcemap is overwritten" - self.upload_sourcemap(expected_ct=3) - assert self.log_contains("2 sourcemaps found for service"), \ - "the 3rd fetch should query ES and find that there are 2 sourcemaps with the same caching key" - self.assert_no_logged_warnings( - ["WARN.*Overriding sourcemap", "WARN.*2 sourcemaps found for service"]) - - def test_rum_error(self): - # use an uncleaned path to test that path is cleaned in upload - path = 'http://localhost:8000/test/e2e/../e2e/general-usecase/bundle.js.map' - self.upload_sourcemap(bundle_filepath=path) - self.load_docs_with_template(self.get_error_payload_path(), - self.intake_url, - 'error', - 1) - self.assert_no_logged_warnings() - self.check_rum_error_sourcemap(True) - - def test_backend_span(self): - # ensure source mapping is not applied to backend events - # load event for which a sourcemap would be applied when sent to rum endpoint, - # and send against backend endpoint. - self.upload_sourcemap(service_version='1.0.0') - self.load_docs_with_template(self.get_transaction_payload_path(), - self.backend_intake_url, - 'transaction', - 2) - self.assert_no_logged_warnings() - self.check_backend_span_sourcemap() - - def test_rum_transaction(self): - self.upload_sourcemap(service_version='1.0.0') - self.load_docs_with_template(self.get_transaction_payload_path(), - self.intake_url, - 'transaction', - 2) - self.assert_no_logged_warnings() - self.check_rum_transaction_sourcemap(True) - - def test_rum_transaction_different_subdomain(self): - self.upload_sourcemap(service_version='1.0.0') - self.load_docs_with_template(self.get_payload_path('transactions_spans_rum_2.ndjson'), - self.intake_url, - 'transaction', - 2) - self.assert_no_logged_warnings() - self.check_rum_transaction_sourcemap(True) - - def test_no_sourcemap(self): - self.load_docs_with_template(self.get_error_payload_path(), - self.intake_url, - 'error', - 1) - self.check_rum_error_sourcemap( - False, expected_err="No Sourcemap available") - - def test_no_matching_sourcemap(self): - self.upload_sourcemap(file_name='bundle_no_mapping.js.map', bundle_filepath='bundle_no_mapping.js.map') - self.assert_no_logged_warnings() - self.test_no_sourcemap() - - def test_fetch_latest_of_multiple_sourcemaps(self): - # upload sourcemap file that finds no matchings - self.upload_sourcemap(file_name='bundle_no_mapping.js.map') - self.load_docs_with_template(self.get_error_payload_path(), - self.intake_url, - 'error', - 1) - self.check_rum_error_sourcemap( - False, expected_err="No Sourcemap found for") - - # remove existing document - self.es.delete_by_query(index=index_error, body={"query": {"term": {"processor.name": 'error'}}}) - wait_until(lambda: (self.es.count(index=index_error)['count'] == 0)) - - # upload second sourcemap file with same key, - # that actually leads to proper matchings - # this also tests that the cache gets invalidated, - # as otherwise the former sourcemap would be taken from the cache. - self.upload_sourcemap(expected_ct=2) - self.load_docs_with_template(self.get_error_payload_path(), - self.intake_url, - 'error', - 1) - self.check_rum_error_sourcemap(True, count=1) - - def test_sourcemap_mapping_cache_usage(self): - self.upload_sourcemap() - # insert document, which also leads to caching the sourcemap - self.load_docs_with_template(self.get_error_payload_path(), - self.intake_url, - 'error', - 1) - self.assert_no_logged_warnings() - - # delete sourcemap from ES - # fetching from ES would lead to an error afterwards - self.es.indices.delete(index=index_smap, ignore=[400, 404]) - self.es.indices.delete(index="{}-000001".format(index_error), ignore=[400, 404]) - - # insert document, - # fetching sourcemap without errors, so it must be fetched from cache - self.load_docs_with_template(self.get_error_payload_path(), - self.intake_url, - 'error', - 1) - self.assert_no_logged_warnings() - self.check_rum_error_sourcemap(True) - - def test_rum_error_changed_index(self): - # use an uncleaned path to test that path is cleaned in upload - path = 'http://localhost:8000/test/e2e/../e2e/general-usecase/bundle.js.map' - self.upload_sourcemap(bundle_filepath=path) - self.load_docs_with_template(self.get_error_payload_path(), - self.intake_url, - 'error', - 1) - self.assert_no_logged_warnings() - self.check_rum_error_sourcemap(True) - - -@integration_test -class SourcemappingCacheIntegrationTest(BaseSourcemapTest): - config_overrides = {"smap_cache_expiration": "1"} - - def test_sourcemap_cache_expiration(self): - self.upload_sourcemap() - - # insert document, which also leads to caching the sourcemap - self.load_docs_with_template(self.get_error_payload_path(), - self.intake_url, - 'error', - 1) - self.assert_no_logged_warnings() - - # delete sourcemap and error event from ES - self.es.indices.delete(index=self.ilm_index(index_error)) - # fetching from ES will lead to an error afterwards - self.es.indices.delete(index=index_smap, ignore=[400, 404]) - wait_until(lambda: not self.es.indices.exists(index_smap)) - # ensure smap is not in cache any more - time.sleep(1) - - # after cache expiration no sourcemap should be found any more - self.load_docs_with_template(self.get_error_payload_path(), - self.intake_url, - 'error', - 1) - self.check_rum_error_sourcemap(False, expected_err="No Sourcemap available") - - -@integration_test -class SourcemappingDisabledIntegrationTest(BaseSourcemapTest): - config_overrides = { - "rum_sourcemapping_disabled": True, - } - - def test_rum_transaction(self): - self.upload_sourcemap(service_version='1.0.0', status_code=403) - self.load_docs_with_template(self.get_transaction_payload_path(), - self.intake_url, - 'transaction', - 2) - rs = self.es.search(index=index_span, params={"rest_total_hits_as_int": "true"}) - assert rs['hits']['total'] == 1, "found {} documents, expected {}".format( - rs['hits']['total'], 1) - frames_checked = 0 - for doc in rs['hits']['hits']: - span = doc["_source"]["span"] - for frame in span["stacktrace"]: - frames_checked += 1 - assert "sourcemap" not in frame, frame - assert frames_checked > 0, "no frames checked" - - -@integration_test -class SourcemapInvalidESConfig(BaseSourcemapTest): - def config(self): - cfg = super(SourcemapInvalidESConfig, self).config() - url = self.split_url(cfg) - cfg.update({ - "smap_es_host": url["host"], - "smap_es_username": url["username"], - "smap_es_password": "xxxx", - }) - return cfg - - def test_unauthorized(self): - # successful - uses output.elasticsearch.* configuration - self.upload_sourcemap() - # unauthorized - uses apm-server.rum.sourcemapping.elasticsearch configuration - self.load_docs_with_template(self.get_error_payload_path(), - self.intake_url, - 'error', - 1) - assert self.log_contains("unable to authenticate user") - - -@integration_test -class SourcemapESConfigUser(BaseSourcemapTest): - def config(self): - cfg = super(SourcemapESConfigUser, self).config() - url = self.split_url(cfg) - cfg.update({ - "smap_es_host": url["host"], - "smap_es_username": url["username"], - "smap_es_password": url["password"], - }) - return cfg - - def test_sourcemap_applied(self): - # uses output.elasticsearch.* configuration - self.upload_sourcemap() - # uses apm-server.rum.sourcemapping.elasticsearch configuration - self.load_docs_with_template(self.get_error_payload_path(), self.intake_url, 'error', 1) - self.assert_no_logged_warnings() - self.check_rum_error_sourcemap(True) - - -@integration_test -class SourcemapESConfigAPIKey(BaseSourcemapTest): - def config(self): - cfg = super(SourcemapESConfigAPIKey, self).config() - - # create API Key that is valid for fetching source maps - apikey = APIKeyHelper(self.get_elasticsearch_url()) - payload = json.dumps({ - 'name': 'test_sourcemap_apikey', - 'role_descriptors': { - 'test_sourcemap_apikey': { - 'index': [ - { - 'names': ['apm-*'], - 'privileges': ['read'] - } - ] - } - } - }) - resp = apikey.create(payload) - cfg.update({ - "smap_es_host": self.split_url(cfg)["host"], - "smap_es_apikey": "{}:{}".format(resp["id"], resp["api_key"]), - }) - return cfg - - def test_sourcemap_applied(self): - # uses output.elasticsearch.* configuration - self.upload_sourcemap() - # uses apm-server.rum.sourcemapping.elasticsearch configuration - self.load_docs_with_template(self.get_error_payload_path(), - self.intake_url, - 'error', - 1) - self.assert_no_logged_warnings() - self.check_rum_error_sourcemap(True) diff --git a/tests/system/test_jaeger.py b/tests/system/test_jaeger.py deleted file mode 100644 index 20e5073970d..00000000000 --- a/tests/system/test_jaeger.py +++ /dev/null @@ -1,172 +0,0 @@ -import os -import re -import subprocess -from urllib.parse import urljoin -import requests - -from apmserver import integration_test, ElasticTest -from helper import wait_until - - -class JaegerBaseTest(ElasticTest): - def setUp(self): - super(JaegerBaseTest, self).setUp() - wait_until(lambda: self.log_contains("Listening for Jaeger HTTP"), name="Jaeger HTTP listener started") - wait_until(lambda: self.log_contains("Listening for Jaeger gRPC"), name="Jaeger gRPC listener started") - - # Extract the Jaeger server addresses. - log = self.get_log() - match = re.search("Listening for Jaeger HTTP requests on: (.*)$", log, re.MULTILINE) - self.jaeger_http_url = "http://{}/{}".format(match.group(1), 'api/traces') - match = re.search("Listening for Jaeger gRPC requests on: (.*)$", log, re.MULTILINE) - self.jaeger_grpc_addr = match.group(1) - - def config(self): - cfg = super(JaegerBaseTest, self).config() - cfg.update({ - "jaeger_grpc_enabled": "true", - "jaeger_http_enabled": "true", - # Listen on dynamic ports - "jaeger_grpc_host": "localhost:0", - "jaeger_http_host": "localhost:0", - # jaeger_grpc_auth_tag is set in the base suite so we can - # check that the authorization tag is always removed, - # even if there's no secret token / API Key auth. - "jaeger_grpc_auth_tag": "authorization", - }) - return cfg - - -@integration_test -class Test(JaegerBaseTest): - def test_jaeger_http(self): - """ - This test sends a Jaeger span in Thrift encoding over HTTP, and verifies that it is indexed. - """ - jaeger_span_thrift = self.get_testdata_path('jaeger', 'span.thrift') - self.load_docs_with_template(jaeger_span_thrift, self.jaeger_http_url, 'transaction', 1, - extra_headers={"content-type": "application/vnd.apache.thrift.binary"}, - file_mode="rb") - - self.assert_no_logged_warnings() - transaction_docs = self.wait_for_events('transaction', 1) - self.approve_docs('jaeger_span', transaction_docs) - - def test_jaeger_auth_tag_removed(self): - """ - This test sends a Jaeger batch over gRPC, with an "authorization" process tag, - and verifies that the spans are indexed without that process tag indexed as a label. - """ - jaeger_request_data = self.get_testdata_path('jaeger', 'batch_0_authorization.json') - - client = os.path.join(os.path.dirname(__file__), 'jaegergrpc') - subprocess.run( - ['go', 'run', client, '-addr', self.jaeger_grpc_addr, '-insecure', jaeger_request_data], - check=True, - ) - - transaction_docs = self.wait_for_events('transaction', 1) - error_docs = self.wait_for_events('error', 3) - self.approve_docs('jaeger_batch_0_auth_tag_removed', transaction_docs + error_docs) - - -@integration_test -class TestAuthTag(JaegerBaseTest): - def config(self): - cfg = super(TestAuthTag, self).config() - cfg.update({"secret_token": "1234"}) - return cfg - - def test_jaeger_unauthorized(self): - """ - This test sends a Jaeger batch over gRPC, without an "authorization" process tag, - and verifies that the spans are indexed. - """ - jaeger_request_data = self.get_testdata_path('jaeger', 'batch_0.json') - - client = os.path.join(os.path.dirname(__file__), 'jaegergrpc') - proc = subprocess.Popen( - ['go', 'run', client, '-addr', self.jaeger_grpc_addr, '-insecure', jaeger_request_data], - stderr=subprocess.PIPE, - ) - stdout, stderr = proc.communicate() - self.assertNotEqual(proc.returncode, 0) - self.assertRegex(stderr.decode("utf-8"), "not authorized") - - def test_jaeger_authorized(self): - """ - This test sends a Jaeger batch over gRPC, with an "authorization" process tag, - and verifies that the spans are indexed without that tag indexed as a label. - """ - jaeger_request_data = self.get_testdata_path('jaeger', 'batch_0_authorization.json') - - client = os.path.join(os.path.dirname(__file__), 'jaegergrpc') - subprocess.run( - ['go', 'run', client, '-addr', self.jaeger_grpc_addr, '-insecure', jaeger_request_data], - check=True, - ) - - transaction_docs = self.wait_for_events('transaction', 1) - error_docs = self.wait_for_events('error', 3) - self.approve_docs('jaeger_batch_0_authorization', transaction_docs + error_docs) - - -@integration_test -class GRPCSamplingTest(JaegerBaseTest): - - def config(self): - cfg = super(GRPCSamplingTest, self).config() - cfg.update({ - "jaeger_grpc_sampling_enabled": "true", - "kibana_host": self.get_kibana_url(), - "kibana_enabled": "true", - "acm_cache_expiration": "1s" - }) - cfg.update(self.config_overrides) - return cfg - - def create_service_config(self, service, sampling_rate, agent=None): - return self.kibana.create_or_update_agent_config( - service, {"transaction_sample_rate": "{}".format(sampling_rate)}, agent=agent) - - def call_sampling_endpoint(self, service): - client = os.path.join(os.path.dirname(__file__), 'jaegergrpc') - out = os.path.abspath(self.working_dir) + "/sampling_response" - subprocess.check_call(['go', 'run', client, - '-addr', self.jaeger_grpc_addr, - '-insecure', - '-endpoint', "sampler", - '-service', service, - '-out', out - ]) - with open(out, "r") as out: - return out.read() - - def test_jaeger_grpc_sampling(self): - """ - This test sends Jaeger sampling requests over gRPC, and tests responses - """ - - # test returns a configured default sampling strategy - service = "all" - sampling_rate = 0.35 - self.create_service_config(service, sampling_rate) - expected = "strategy: PROBABILISTIC, sampling rate: {}".format(sampling_rate) - logged = self.call_sampling_endpoint(service) - assert expected == logged, logged - - # test returns a configured sampling strategy - service = "jaeger-service" - sampling_rate = 0.75 - self.create_service_config(service, sampling_rate, agent="Jaeger/Ruby") - expected = "strategy: PROBABILISTIC, sampling rate: {}".format(sampling_rate) - logged = self.call_sampling_endpoint(service) - assert expected == logged, logged - - # test returns an error as configured sampling strategy is not for Jaeger - service = "foo" - sampling_rate = 0.13 - self.create_service_config(service, sampling_rate, agent="Non-Jaeger/Agent") - expected = "no sampling rate available, check server logs for more details" - logged = self.call_sampling_endpoint(service) - assert expected in logged, logged diff --git a/tests/system/test_libbeat_instrumentation.py b/tests/system/test_libbeat_instrumentation.py deleted file mode 100644 index d813f6d8545..00000000000 --- a/tests/system/test_libbeat_instrumentation.py +++ /dev/null @@ -1,176 +0,0 @@ -from datetime import datetime, timedelta -import os -import time -import requests - -from apmserver import integration_test -from apmserver import ElasticTest -from test_auth import APIKeyBaseTest -from helper import wait_until -from es_helper import index_profile, index_transaction - -# Set ELASTIC_APM_API_REQUEST_TIME to a short duration -# to speed up the time taken for self-tracing events -# to be ingested. -os.environ["ELASTIC_APM_API_REQUEST_TIME"] = "1s" - -# This exercises the instrumentation.* config -# When updating this file, consider test_instrumentation.py - - -def get_instrumentation_event(es, index): - query = {"term": {"service.name": "apm-server"}} - return es.count(index=index, body={"query": query})['count'] > 0 - - -@integration_test -class TestInMemoryTracingAPIKey(APIKeyBaseTest): - def config(self): - cfg = super(TestInMemoryTracingAPIKey, self).config() - cfg.update({ - "api_key_enabled": True, - "libbeat_instrumentation_enabled": "true", - }) - return cfg - - def test_api_key_auth(self): - """Self-instrumentation using in-memory listener without configuring an APIKey""" - - # Send a POST request to the intake API URL. Doesn't matter what the - # request body contents are, as the request will fail due to lack of - # authorization. We just want to trigger the server's in-memory tracing, - # and test that the in-memory tracer works without having an api_key configured - r = requests.post(self.intake_url, data="invalid") - self.assertEqual(401, r.status_code) - - wait_until(lambda: get_instrumentation_event(self.es, index_transaction), - name='have in-memory instrumentation documents without api_key') - - -@integration_test -class TestExternalTracingAPIKey(APIKeyBaseTest): - def config(self): - cfg = super(TestExternalTracingAPIKey, self).config() - api_key = self.create_apm_api_key([self.privilege_event], self.resource_any) - cfg.update({ - "api_key_enabled": True, - "libbeat_instrumentation_enabled": "true", - "libbeat_instrumentation_api_key": api_key, - # Set instrumentation.hosts to the same APM Server. - # - # Explicitly specifying hosts configures the tracer to - # behave as if it's sending to an external server, rather - # than using the in-memory transport that bypasses auth. - "libbeat_instrumentation_host": APIKeyBaseTest.host, - }) - return cfg - - def test_api_key_auth(self): - # Send a POST request to the intake API URL. Doesn't matter what the - # request body contents are, as the request will fail due to lack of - # authorization. We just want to trigger the server's tracing. - r = requests.post(self.intake_url, data="invalid") - self.assertEqual(401, r.status_code) - - wait_until(lambda: get_instrumentation_event(self.es, index_transaction), - name='have external server instrumentation documents with api_key') - - -@integration_test -class TestExternalTracingSecretToken(ElasticTest): - def config(self): - cfg = super(TestExternalTracingSecretToken, self).config() - secret_token = "abc123" - cfg.update({ - "secret_token": secret_token, - "libbeat_instrumentation_enabled": "true", - "libbeat_instrumentation_secret_token": secret_token, - # Set instrumentation.hosts to the same APM Server. - # - # Explicitly specifying hosts configures the tracer to - # behave as if it's sending to an external server, rather - # than using the in-memory transport that bypasses auth. - "libbeat_instrumentation_host": ElasticTest.host, - }) - return cfg - - def test_secret_token_auth(self): - # Send a POST request to the intake API URL. Doesn't matter what the - # request body contents are, as the request will fail due to lack of - # authorization. We just want to trigger the server's tracing. - r = requests.post(self.intake_url, data="invalid") - self.assertEqual(401, r.status_code) - - wait_until(lambda: get_instrumentation_event(self.es, index_transaction), - name='have external server instrumentation documents with secret_token') - - -class ProfilingTest(ElasticTest): - def metric_fields(self): - metric_fields = set() - rs = self.es.search(index=index_profile) - for hit in rs["hits"]["hits"]: - profile = hit["_source"]["profile"] - metric_fields.update((k for (k, v) in profile.items() if type(v) is int)) - return metric_fields - - def wait_for_profile(self): - def cond(): - response = self.es.count(index=index_profile, body={"query": {"term": {"processor.name": "profile"}}}) - return response['count'] != 0 - wait_until(cond, max_timeout=10, name="waiting for profile") - - -@integration_test -class TestCPUProfiling(ProfilingTest): - config_overrides = { - "libbeat_instrumentation_enabled": "true", - "libbeat_profiling_cpu_enabled": "true", - "libbeat_profiling_cpu_interval": "1s", - "libbeat_profiling_cpu_duration": "5s", - } - - def test_self_profiling(self): - """CPU profiling enabled""" - - def create_load(): - payload_path = self.get_payload_path("transactions_spans.ndjson") - with open(payload_path, 'rb') as f: - requests.post(self.intake_url, data=f, headers={'content-type': 'application/x-ndjson'}) - - # Wait for profiling to begin, and then start sending data - # to the server to create some CPU load. - - time.sleep(1) - start = datetime.now() - while datetime.now()-start < timedelta(seconds=5): - create_load() - self.wait_for_profile() - - expected_metric_fields = set([u"cpu.ns", u"samples.count", u"duration"]) - metric_fields = self.metric_fields() - self.assertEqual(metric_fields, expected_metric_fields) - - -@integration_test -class TestHeapProfiling(ProfilingTest): - config_overrides = { - "libbeat_instrumentation_enabled": "true", - "libbeat_profiling_heap_enabled": "true", - "libbeat_profiling_heap_interval": "1s", - } - - def test_self_profiling(self): - """Heap profiling enabled""" - - time.sleep(1) - self.wait_for_profile() - - expected_metric_fields = set([ - u"alloc_objects.count", - u"inuse_objects.count", - u"alloc_space.bytes", - u"inuse_space.bytes", - ]) - metric_fields = self.metric_fields() - self.assertEqual(metric_fields, expected_metric_fields) diff --git a/tests/system/test_pipelines.py b/tests/system/test_pipelines.py index 6d9e5d9c9a4..d4ed6b2aa30 100644 --- a/tests/system/test_pipelines.py +++ b/tests/system/test_pipelines.py @@ -48,7 +48,7 @@ def config(self): return cfg def test_setup_pipelines(self): - assert self.log_contains("No pipeline callback registered") + assert self.log_contains("Pipeline registration disabled") wait_until_pipelines(self.es, []) diff --git a/tests/system/test_requests.py b/tests/system/test_requests.py deleted file mode 100644 index 27bf0b8ccb3..00000000000 --- a/tests/system/test_requests.py +++ /dev/null @@ -1,218 +0,0 @@ -from collections import defaultdict -import gzip -import requests -import threading -import time -import zlib -from io import BytesIO - -from apmserver import ServerBaseTest, ClientSideBaseTest, CorsBaseTest - - -class Test(ServerBaseTest): - - def test_ok(self): - r = self.request_intake() - assert r.status_code == 202, r.status_code - assert r.text == "", r.text - - def test_ok_verbose(self): - r = self.request_intake(url='http://localhost:8200/intake/v2/events?verbose') - assert r.status_code == 202, r.status_code - assert r.json() == {"accepted": 4}, r.json() - - def test_empty(self): - r = self.request_intake(data={}) - assert r.status_code == 400, r.status_code - - def test_not_existent(self): - r = self.request_intake(url='http://localhost:8200/transactionX') - assert r.status_code == 404, r.status_code - - def test_method_not_allowed(self): - r = requests.get(self.intake_url) - assert r.status_code == 400, r.status_code - - def test_bad_json(self): - r = self.request_intake(data="invalid content") - assert r.status_code == 400, r.status_code - - def test_validation_fail(self): - data = self.get_event_payload(name="invalid-event.ndjson") - r = self.request_intake(data=data) - assert r.status_code == 400, r.status_code - assert "failed to validate transaction: error validating JSON" in r.text, r.text - - def test_rum_default_disabled(self): - r = self.request_intake(url='http://localhost:8200/intake/v2/rum/events') - assert r.status_code == 403, r.status_code - - def test_healthcheck(self): - healtcheck_url = 'http://localhost:8200/' - r = requests.get(healtcheck_url) - assert r.status_code == 200, r.status_code - - def test_gzip(self): - events = self.get_event_payload().encode("utf-8") - out = BytesIO() - - with gzip.GzipFile(fileobj=out, mode="w") as f: - f.write(events) - - r = requests.post(self.intake_url, data=out.getvalue(), - headers={'Content-Encoding': 'gzip', 'Content-Type': 'application/x-ndjson'}) - assert r.status_code == 202, r.status_code - - def test_deflate(self): - events = self.get_event_payload().encode("utf-8") - compressed_data = zlib.compress(events) - - r = requests.post(self.intake_url, data=compressed_data, - headers={'Content-Encoding': 'deflate', 'Content-Type': 'application/x-ndjson'}) - assert r.status_code == 202, r.status_code - - def test_gzip_error(self): - events = self.get_event_payload() - r = requests.post(self.intake_url, json=events, - headers={'Content-Encoding': 'gzip', 'Content-Type': 'application/x-ndjson'}) - assert r.status_code == 400, r.status_code - - def test_deflate_error(self): - events = self.get_event_payload() - r = requests.post(self.intake_url, data=events, - headers={'Content-Encoding': 'deflate', 'Content-Type': 'application/x-ndjson'}) - assert r.status_code == 400, r.status_code - - def test_expvar_default(self): - """expvar should not be exposed by default""" - r = requests.get(self.expvar_url) - assert r.status_code == 404, r.status_code - - -class ClientSideTest(ClientSideBaseTest): - - def test_ok(self): - r = self.request_intake() - assert r.status_code == 202, r.status_code - - def test_sourcemap_upload_fail(self): - path = self._beat_path_join( - 'testdata', - 'sourcemap', - 'bundle.js.map') - file = open(path) - r = requests.post(self.sourcemap_url, - files={'sourcemap': file}) - assert r.status_code == 400, r.status_code - - -class CorsTest(CorsBaseTest): - - def test_ok(self): - r = self.request_intake(headers={'Origin': 'http://www.elastic.co', 'content-type': 'application/x-ndjson'}) - assert r.headers['Access-Control-Allow-Origin'] == 'http://www.elastic.co', r.headers - assert r.status_code == 202, r.status_code - - def test_bad_origin(self): - # origin must include protocol and match exactly the allowed origin - r = self.request_intake(headers={'Origin': 'www.elastic.co', 'content-type': 'application/x-ndjson'}) - assert r.status_code == 403, r.status_code - - def test_no_origin(self): - r = self.request_intake() - assert r.status_code == 403, r.status_code - - def test_preflight(self): - r = requests.options(self.intake_url, - data=self.get_event_payload(), - headers={'Origin': 'http://www.elastic.co', - 'Access-Control-Request-Method': 'POST', - 'Access-Control-Request-Headers': 'Content-Type, Content-Encoding'}) - assert r.status_code == 200, r.status_code - assert r.headers['Access-Control-Allow-Origin'] == 'http://www.elastic.co', r.headers - assert r.headers['Access-Control-Allow-Headers'] == 'Content-Type, Content-Encoding, Accept', r.headers - assert r.headers['Access-Control-Allow-Methods'] == 'POST, OPTIONS', r.headers - assert r.headers['Vary'] == 'Origin', r.headers - assert r.headers['Content-Length'] == '0', r.headers - assert r.headers['Access-Control-Max-Age'] == '3600', r.headers - - def test_preflight_bad_headers(self): - for h in [{'Access-Control-Request-Method': 'POST'}, {'Origin': 'www.elastic.co'}]: - r = requests.options(self.intake_url, - json=self.get_event_payload(), - headers=h) - assert r.status_code == 200, r.status_code - assert 'Access-Control-Allow-Origin' not in r.headers.keys(), r.headers - assert r.headers['Access-Control-Allow-Headers'] == 'Content-Type, Content-Encoding, Accept', r.headers - assert r.headers['Access-Control-Allow-Methods'] == 'POST, OPTIONS', r.headers - - -class RateLimitTest(ClientSideBaseTest): - - def fire_events(self, data_file, iterations, split_ips=False): - events = self.get_event_payload(name=data_file) - threads = [] - codes = defaultdict(int) - - def fire(x): - ip = '10.11.12.13' - if split_ips and x % 2: - ip = '10.11.12.14' - r = self.request_intake(data=events, - headers={'content-type': 'application/x-ndjson', 'X-Forwarded-For': ip}) - codes[r.status_code] += 1 - return r.status_code - - # rate limit hit, because every event in request is counted - for x in range(iterations): - threads.append(threading.Thread(target=fire, args=(x,))) - - for t in threads: - t.start() - time.sleep(0.01) - - for t in threads: - t.join() - return codes - - # limit: 16, burst_multiplier: 3, burst: 48 - def test_rate_limit(self): - # all requests from the same ip - # 19 events, batch size 10 => 20+1 events per requ - codes = self.fire_events("ratelimit.ndjson", 3) - assert set(codes.keys()) == set([202]), codes - - def test_rate_limit_hit(self): - # all requests from the same ip - codes = self.fire_events("ratelimit.ndjson", 5) - assert set(codes.keys()) == set([202, 429]), codes - assert codes[429] == 2, codes - assert codes[202] == 3, codes - - def test_rate_limit_small_hit(self): - # all requests from the same ip - # 4 events, batch size 10 => 10+1 events per requ - codes = self.fire_events("events.ndjson", 8) - assert set(codes.keys()) == set([202, 429]), codes - assert codes[429] == 3, codes - assert codes[202] == 5, codes - - def test_rate_limit_only_metadata(self): - # all requests from the same ip - # no events, batch size 10 => 10+1 events per requ - codes = self.fire_events("metadata.ndjson", 8) - assert set(codes.keys()) == set([202, 429]), codes - assert codes[429] == 3, codes - assert codes[202] == 5, codes - - def test_multiple_ips_rate_limit(self): - # requests from 2 different ips - codes = self.fire_events("ratelimit.ndjson", 6, True) - assert set(codes.keys()) == set([202]), codes - - def test_multiple_ips_rate_limit_hit(self): - # requests from 2 different ips - codes = self.fire_events("ratelimit.ndjson", 10, True) - assert set(codes.keys()) == set([202, 429]), codes - assert codes[429] == 4, codes - assert codes[202] == 6, codes diff --git a/tests/system/test_sampling.py b/tests/system/test_sampling.py deleted file mode 100644 index 52c6255b4a8..00000000000 --- a/tests/system/test_sampling.py +++ /dev/null @@ -1,56 +0,0 @@ -import time - -from apmserver import integration_test -from apmserver import ClientSideElasticTest, ElasticTest, ExpvarBaseTest, ProcStartupFailureTest -from helper import wait_until -from es_helper import index_smap, index_metric, index_transaction - - -@integration_test -class TestKeepUnsampled(ElasticTest): - def config(self): - cfg = super(TestKeepUnsampled, self).config() - cfg.update({"sampling_keep_unsampled": True}) - return cfg - - def test(self): - self.load_docs_with_template(self.get_payload_path("transactions_spans.ndjson"), - self.intake_url, 'transaction', 9) - self.assert_no_logged_warnings() - docs = self.wait_for_events('transaction', 4, index=index_transaction) - self.approve_docs('keep_unsampled_transactions', docs) - - -@integration_test -class TestDropUnsampled(ElasticTest): - def config(self): - cfg = super(TestDropUnsampled, self).config() - cfg.update({ - "sampling_keep_unsampled": False, - # Enable aggregation to avoid a warning. - "aggregation_enabled": True, - }) - return cfg - - def test(self): - self.load_docs_with_template(self.get_payload_path("transactions_spans.ndjson"), - self.intake_url, 'transaction', 8) - self.assert_no_logged_warnings() - docs = self.wait_for_events('transaction', 3, index=index_transaction) - self.approve_docs('drop_unsampled_transactions', docs) - - -@integration_test -class TestConfigWarning(ElasticTest): - def config(self): - cfg = super(TestConfigWarning, self).config() - cfg.update({ - "sampling_keep_unsampled": False, - # Disable aggregation to force a warning. - "aggregation_enabled": False, - }) - return cfg - - def test(self): - expected = "apm-server.sampling.keep_unsampled and apm-server.aggregation.transactions.enabled are both false, which will lead to incorrect metrics being reported in the APM UI" - self.assertIn(expected, self.get_log()) diff --git a/tests/system/test_tls.py b/tests/system/test_tls.py deleted file mode 100644 index 5d303a8bb59..00000000000 --- a/tests/system/test_tls.py +++ /dev/null @@ -1,267 +0,0 @@ -import os -import requests -import shutil -import ssl -import subprocess -import socket -import pytest -from requests.packages.urllib3.exceptions import SubjectAltNameWarning -requests.packages.urllib3.disable_warnings(SubjectAltNameWarning) - -from apmserver import ServerBaseTest -from apmserver import TimeoutError, integration_test - -INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False) - - -@integration_test -class TestSecureServerBaseTest(ServerBaseTest): - @classmethod - def setUpClass(cls): - # According to https://docs.python.org/2/library/unittest.html#setupclass-and-teardownclass setUp and tearDown - # should be skipped when class is skipped, which is apparently not true. - # This is a hack to avoid running the setup while it should be skipped - if not INTEGRATION_TESTS: - return - cls.config_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "config")) - cls.cert_path = os.path.join(cls.config_path, "certs") - shutil.rmtree(cls.cert_path, ignore_errors=True) - cls.create_certs_cmd = os.path.join(cls.config_path, "create_certs.sh") - with open(os.devnull, 'wb') as dev_null: - subprocess.call([cls.create_certs_cmd, cls.config_path, cls.cert_path, - "foobar"], stdout=dev_null, stderr=dev_null) - super(TestSecureServerBaseTest, cls).setUpClass() - - @classmethod - def tearDownClass(cls): - if not INTEGRATION_TESTS: - return - super(TestSecureServerBaseTest, cls).tearDownClass() - shutil.rmtree(cls.cert_path) - - def setUp(self): - self.config_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "config")) - self.cert_path = os.path.join(self.config_path, "certs") - self.ca_cert = os.path.join(self.cert_path, "ca.crt.pem") - self.simple_cert = os.path.join(self.cert_path, "simple.crt.pem") - self.simple_key = os.path.join(self.cert_path, "simple.key.pem") - self.client_cert = os.path.join(self.cert_path, "client.crt.pem") - self.client_key = os.path.join(self.cert_path, "client.key.pem") - self.server_cert = os.path.join(self.cert_path, "server.crt.pem") - self.server_key = os.path.join(self.cert_path, "server.key.pem") - self.password = "foobar" - self.host = "localhost" - self.port = 8200 - super(TestSecureServerBaseTest, self).setUp() - - def stop_proc(self): - self.apmserver_proc.kill_and_wait() - - def ssl_overrides(self): - return {} - - def config(self): - cfg = super(TestSecureServerBaseTest, self).config() - overrides = { - "ssl_enabled": "true", - "ssl_certificate": self.server_cert, - "ssl_key": self.server_key, - "ssl_key_passphrase": self.password - } - cfg.update(overrides) - cfg.update(self.ssl_overrides()) - return cfg - - def ssl_connect(self, min_version=ssl.TLSVersion.TLSv1_1, max_version=ssl.TLSVersion.TLSv1_3, - ciphers=None, cert=None, key=None, ca_cert=None): - context = ssl.SSLContext(ssl.PROTOCOL_TLS) - context.minimum_version = min_version - context.maximum_version = max_version - if ciphers: - context.set_ciphers(ciphers) - if not ca_cert: - ca_cert = self.ca_cert - context.load_verify_locations(ca_cert) - if cert and key: - context.load_cert_chain(certfile=cert, keyfile=key, password=self.password) - with context.wrap_socket(ssl.socket()) as s: - # For TLS 1.3 the client certificate authentication happens after the handshake, - # leading to s.connect not failing for invalid or missing client certs. - # The authentication happens when the client performs the first read. - # Setting a timeout helps speed up the tests, as `s.recv` is blocking. - # Timeout errors only happen when the cient can read from the socket, - # otherwise a SSLError occurs. - s.connect((self.host, self.port)) - s.sendall(str.encode("sending TLS data")) - s.settimeout(0.5) - try: - s.recv(8) - except socket.timeout: - pass - s.close() - - -class TestSSLBadPassphraseTest(TestSecureServerBaseTest): - def ssl_overrides(self): - return {"ssl_key_passphrase": "invalid"} - - def setUp(self): - with pytest.raises(TimeoutError): - super(TestSecureServerBaseTest, self).setUp() - - -@integration_test -class TestSSLEnabledNoClientAuthenticationTest(TestSecureServerBaseTest): - # no ssl_overrides necessary as `none` is default - - def test_https_no_cert_ok(self): - self.ssl_connect() - - def test_http_fails(self): - with self.assertRaises(requests.exceptions.HTTPError): - with requests.Session() as session: - try: - session.headers.update({"Connection": "close"}) - resp = session.get("http://localhost:8200") - resp.raise_for_status() - finally: - session.close() - - -@integration_test -class TestSSLEnabledOptionalClientAuthenticationTest(TestSecureServerBaseTest): - def ssl_overrides(self): - return {"ssl_client_authentication": "optional"} - - def test_https_no_certificate_ok(self): - self.ssl_connect() - - def test_https_verify_cert_if_given(self): - # invalid certificate - with pytest.raises(ssl.SSLError): - self.ssl_connect(cert=self.simple_cert, key=self.simple_key) - - def test_https_self_signed_cert(self): - # CA is not configured server side, so self signed certs are not valid - with pytest.raises(ssl.SSLError): - self.ssl_connect(cert=self.client_cert, key=self.client_key) - - -@integration_test -class TestSSLEnabledOptionalClientAuthenticationWithCATest(TestSecureServerBaseTest): - def ssl_overrides(self): - return {"ssl_certificate_authorities": self.ca_cert} - - def test_https_no_certificate(self): - # since CA is configured, client auth is required - with pytest.raises(ssl.SSLError): - self.ssl_connect() - - def test_https_verify_cert_if_given(self): - # invalid certificate - with pytest.raises(ssl.SSLError): - self.ssl_connect(cert=self.simple_cert, key=self.simple_key) - - def test_https_auth_cert_ok(self): - self.ssl_connect(cert=self.client_cert, key=self.client_key) - - -@integration_test -class TestSSLEnabledRequiredClientAuthenticationTest(TestSecureServerBaseTest): - def ssl_overrides(self): - return {"ssl_client_authentication": "required", - "ssl_certificate_authorities": self.ca_cert} - - def test_https_no_cert_fails(self): - with pytest.raises(ssl.SSLError): - self.ssl_connect() - - def test_https_invalid_cert_fails(self): - with pytest.raises(ssl.SSLError): - self.ssl_connect(cert=self.simple_cert, key=self.simple_key) - - def test_https_auth_cert_ok(self): - self.ssl_connect(cert=self.client_cert, key=self.client_key) - - -@integration_test -class TestSSLDefaultSupportedProcotolsTest(TestSecureServerBaseTest): - def ssl_overrides(self): - return {"ssl_certificate_authorities": self.ca_cert} - - def test_tls_v1_0(self): - with pytest.raises(ssl.SSLError): - self.ssl_connect(min_version=ssl.TLSVersion.TLSv1, - max_version=ssl.TLSVersion.TLSv1, - cert=self.server_cert, key=self.server_key) - - def test_tls_v1_1(self): - self.ssl_connect(min_version=ssl.TLSVersion.TLSv1_1, - max_version=ssl.TLSVersion.TLSv1_1, - cert=self.server_cert, key=self.server_key) - - def test_tls_v1_2(self): - self.ssl_connect(min_version=ssl.TLSVersion.TLSv1_2, - max_version=ssl.TLSVersion.TLSv1_2, - cert=self.server_cert, key=self.server_key) - - def test_tls_v1_3(self): - if ssl.HAS_TLSv1_3: - self.ssl_connect(min_version=ssl.TLSVersion.TLSv1_3, - max_version=ssl.TLSVersion.TLSv1_3, - cert=self.server_cert, key=self.server_key) - - -@integration_test -class TestSSLSupportedProcotolsTest(TestSecureServerBaseTest): - def ssl_overrides(self): - return {"ssl_supported_protocols": ["TLSv1.2"], - "ssl_certificate_authorities": self.ca_cert} - - def test_tls_v1_1(self): - with pytest.raises(ssl.SSLError): - self.ssl_connect(min_version=ssl.TLSVersion.TLSv1_1, - max_version=ssl.TLSVersion.TLSv1_1, - cert=self.server_cert, key=self.server_key) - - def test_tls_v1_3(self): - with pytest.raises(ssl.SSLError): - if ssl.HAS_TLSv1_3: - self.ssl_connect(min_version=ssl.TLSVersion.TLSv1_3, - max_version=ssl.TLSVersion.TLSv1_3, - cert=self.server_cert, key=self.server_key) - - def test_tls_v1_2(self): - self.ssl_connect(cert=self.server_cert, key=self.server_key) - - -@integration_test -class TestSSLSupportedCiphersTest(TestSecureServerBaseTest): - # Tests explicitly set TLS 1.2 as cipher suites are not configurable for TLS 1.3 - def ssl_overrides(self): - return {"ssl_cipher_suites": ['ECDHE-RSA-AES-128-GCM-SHA256'], - "ssl_certificate_authorities": self.ca_cert} - - def test_https_no_cipher_set(self): - self.ssl_connect(max_version=ssl.TLSVersion.TLSv1_2, - cert=self.server_cert, key=self.server_key) - - def test_https_supports_cipher(self): - # set the same cipher in the client as set in the server - self.ssl_connect(max_version=ssl.TLSVersion.TLSv1_2, - ciphers='ECDHE-RSA-AES128-GCM-SHA256', - cert=self.server_cert, key=self.server_key) - - def test_https_unsupported_cipher(self): - # client only offers unsupported cipher - with self.assertRaisesRegex(ssl.SSLError, 'SSLV3_ALERT_HANDSHAKE_FAILURE'): - self.ssl_connect(max_version=ssl.TLSVersion.TLSv1_2, - ciphers='ECDHE-RSA-AES256-SHA384', - cert=self.server_cert, key=self.server_key) - - def test_https_no_cipher_selected(self): - # client provides invalid cipher - with self.assertRaisesRegex(ssl.SSLError, 'No cipher can be selected'): - self.ssl_connect(max_version=ssl.TLSVersion.TLSv1_2, - ciphers='AES1sd28-CCM8', - cert=self.server_cert, key=self.server_key) diff --git a/tests/system/transaction.approved.json b/tests/system/transaction.approved.json index 024ab9d4273..741ab42b26c 100644 --- a/tests/system/transaction.approved.json +++ b/tests/system/transaction.approved.json @@ -9,7 +9,7 @@ "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "ingested": "2020-08-11T09:55:04.391451Z", @@ -105,7 +105,7 @@ "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "ingested": "2020-08-11T09:55:04.391568Z", @@ -179,7 +179,6 @@ "id": "85925e55b43f4341", "name": "GET /api/types", "result": "200", - "sampled": false, "span_count": { "started": 0 }, @@ -201,7 +200,7 @@ "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "ingested": "2020-08-11T09:55:04.391639Z", @@ -298,6 +297,7 @@ "geo": { "continent_name": "North America", "country_iso_code": "US", + "country_name": "United States", "location": { "lat": 37.751, "lon": -97.822 @@ -309,7 +309,7 @@ "id": "container-id" }, "ecs": { - "version": "1.5.0" + "version": "1.11.0" }, "event": { "ingested": "2020-08-11T09:55:04.338986Z", @@ -324,14 +324,12 @@ }, "http": { "request": { - "body": { - "original": { - "additional": { - "bar": 123, - "req": "additional information" - }, - "str": "hello world" - } + "body.original": { + "additional": { + "bar": 123, + "req": "additional information" + }, + "str": "hello world" }, "cookies": { "c1": "v1", @@ -360,12 +358,8 @@ "Mozilla Chrome Edge" ] }, - "method": "post", - "referrer": "http://localhost:8000/test/e2e/", - "socket": { - "encrypted": true, - "remote_address": "8.8.8.8" - } + "method": "POST", + "referrer": "http://localhost:8000/test/e2e/" }, "response": { "finished": true, @@ -468,10 +462,6 @@ } }, "name": "GET /api/types", - "page": { - "referer": "http://localhost:8000/test/e2e/", - "url": "http://localhost:8000/test/e2e/general-usecase/" - }, "result": "success", "sampled": true, "span_count": { diff --git a/tools.go b/tools.go index 64bcf2f3c55..b00e39b4afc 100644 --- a/tools.go +++ b/tools.go @@ -24,9 +24,6 @@ package main import ( - _ "github.com/jstemmer/go-junit-report" - _ "github.com/reviewdog/reviewdog" - _ "github.com/t-yuki/gocover-cobertura" - - _ "github.com/elastic/go-licenser" + _ "github.com/jstemmer/go-junit-report" // go.mod/go.sum + _ "gotest.tools/gotestsum/cmd" // go.mod/go.sum ) diff --git a/tools/go.mod b/tools/go.mod new file mode 100644 index 00000000000..303ea106613 --- /dev/null +++ b/tools/go.mod @@ -0,0 +1,12 @@ +module github.com/elastic/apm-server/tools + +go 1.16 + +require ( + github.com/elastic/elastic-package v0.11.3 + github.com/elastic/go-licenser v0.3.1 + github.com/reviewdog/reviewdog v0.13.0 + github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c + go.elastic.co/go-licence-detector v0.5.0 + honnef.co/go/tools v0.2.0 +) diff --git a/tools/go.sum b/tools/go.sum new file mode 100644 index 00000000000..e941f2c4b0c --- /dev/null +++ b/tools/go.sum @@ -0,0 +1,1514 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0 h1:8ZtzmY4a2JIO2sljMbpqkDYxA8aJQveYr3AMa+X40oc= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +contrib.go.opencensus.io/exporter/stackdriver v0.13.8/go.mod h1:huNtlWx75MwO7qMs0KrMxPZXzNNWebav1Sq/pm02JdQ= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/AlecAivazis/survey/v2 v2.2.15 h1:6UNMnk+YGegYFiPfdTOyZDIN+m08x2nGnqOn15BWcEQ= +github.com/AlecAivazis/survey/v2 v2.2.15/go.mod h1:TH2kPCDU3Kqq7pLbnCWwZXDBjnhZtmsCle5EiYDJ2fg= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd h1:sjQovDkwrZp8u+gxLtPgKGjk5hCxuy2hrRejBTA9xFU= +github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= +github.com/Masterminds/squirrel v1.5.0/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8 h1:xzYJEypr/85nBpB11F9br+3HUrpgb+fcm5iADzXXYEw= +github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PaesslerAG/gval v1.0.0 h1:GEKnRwkWDdf9dOmKcNrar9EA1bz1z9DqPIO1+iLzhd8= +github.com/PaesslerAG/gval v1.0.0/go.mod h1:y/nm5yEyTeX6av0OfKJNp9rBNj2XrGhAf5+v24IBN1I= +github.com/PaesslerAG/jsonpath v0.1.0/go.mod h1:4BzmtoM/PI8fPO4aQGIusjGxGir2BzcV0grWtFzq1Y8= +github.com/PaesslerAG/jsonpath v0.1.1 h1:c1/AToHQMVsduPAa4Vh6xp2U0evy4t8SWp8imEsylIk= +github.com/PaesslerAG/jsonpath v0.1.1/go.mod h1:lVboNxFGal/VwW6d9JzIy56bUsYAP6tH/x80vjnCseY= +github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ= +github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= +github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.30.15/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/aymerick/raymond v2.0.2+incompatible h1:VEp3GpgdAnv9B2GFyTvqgcKvY+mfKMjPOA3SbKLtnU0= +github.com/aymerick/raymond v2.0.2+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/bradleyfalzon/ghinstallation v1.1.1 h1:pmBXkxgM1WeF8QYvDLT5kuQiHMcmf+X015GI0KM/E3I= +github.com/bradleyfalzon/ghinstallation v1.1.1/go.mod h1:vyCmHTciHx/uuyN82Zc3rXN3X2KTK8nUTCrTMwAhcug= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creasty/defaults v1.5.1 h1:j8WexcS3d/t4ZmllX4GEkl4wIB/trOr035ajcLHCISM= +github.com/creasty/defaults v1.5.1/go.mod h1:FPZ+Y0WNrbqOVw+c6av63eyHUAl6pMHZwqLPvXUZGfY= +github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= +github.com/deislabs/oras v0.11.1/go.mod h1:39lCtf8Q6WDC7ul9cnyWXONNzKvabEKk+AX+L0ImnQk= +github.com/denisenkom/go-mssqldb v0.0.0-20191001013358-cfbb681360f0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v20.10.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elastic/elastic-package v0.11.3 h1:/IOwNVVJSE+7WPOUYLkroOLtSFbkd7QC5cj5LTr/1Es= +github.com/elastic/elastic-package v0.11.3/go.mod h1:RJpz7wcAUuRsAVNXy4YgcVbYJ0MZKtkJ+Nsrg0Y18Bw= +github.com/elastic/go-elasticsearch/v7 v7.13.1 h1:PaM3V69wPlnwR+ne50rSKKn0RNDYnnOFQcuGEI0ce80= +github.com/elastic/go-elasticsearch/v7 v7.13.1/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= +github.com/elastic/go-licenser v0.3.1 h1:RmRukU/JUmts+rpexAw0Fvt2ly7VVu6mw8z4HrEzObU= +github.com/elastic/go-licenser v0.3.1/go.mod h1:D8eNQk70FOCVBl3smCGQt/lv7meBeQno2eI1S5apiHQ= +github.com/elastic/go-ucfg v0.8.3 h1:leywnFjzr2QneZZWhE6uWd+QN/UpP0sdJRHYyuFvkeo= +github.com/elastic/go-ucfg v0.8.3/go.mod h1:iaiY0NBIYeasNgycLyTvhJftQlQEUO2hpF+FX0JKxzo= +github.com/elastic/package-spec/code/go v0.0.0-20210623152222-b358e974b7f9 h1:qvoqy6W/mhBY1t4xxP82oy34VTeF+MEqfVLiIGNBsEs= +github.com/elastic/package-spec/code/go v0.0.0-20210623152222-b358e974b7f9/go.mod h1:t0uvhLQGg3D4iQ5lSQEQs4YYS53MIIS05v0zm0fIBPM= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= +github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc= +github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= +github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= +github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= +github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34= +github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-git-fixtures/v4 v4.2.1 h1:n9gGL1Ct/yIw+nfsfr8s4+sbhT+Ncu2SubfXjIWgci8= +github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= +github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= +github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.5 h1:Xm0Ao53uqnk9QE/LlYV5DEU09UAgpliA85QoT9LzqPw= +github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.6 h1:epWc+q5qSgsy7A7+/HYyxLF37vLEYdPSkNB9G8mRqjw= +github.com/go-openapi/strfmt v0.19.6/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w= +github.com/gobuffalo/here v0.6.0 h1:hYrd0a6gDmWxBM4TnrGw8mQg24iSVoIkHEk7FodQcBI= +github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= +github.com/gobuffalo/logger v1.0.1/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs= +github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q= +github.com/gobuffalo/packr/v2 v2.7.1/go.mod h1:qYEvAazPaVxy7Y7KR0W8qYEE+RymX74kETFqjFoFlOc= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godror/godror v0.13.3/go.mod h1:2ouUT4kdhUBk7TAkHWD4SN0CdI0pgEQbo8FVHhbSKWg= +github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-github/v29 v29.0.2 h1:opYN6Wc7DOz7Ku3Oh4l7prmkOMwEcQxpFtxdU8N8Pts= +github.com/google/go-github/v29 v29.0.2/go.mod h1:CHKiKKPHJ0REzfwc14QMklvtHwCveD0PxlMjLlzAM5E= +github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II= +github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= +github.com/google/go-github/v37 v37.0.0 h1:rCspN8/6kB1BAJWZfuafvHhyfIo5fkAulaP/3bOQ/tM= +github.com/google/go-github/v37 v37.0.0/go.mod h1:LM7in3NmXDrX58GbEHy7FtNLbI2JijX93RnMKvWG3m4= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/licenseclassifier v0.0.0-20200402202327-879cb1424de0 h1:OggOMmdI0JLwg1FkOKH9S7fVHF0oEm8PX6S8kAdpOps= +github.com/google/licenseclassifier v0.0.0-20200402202327-879cb1424de0/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-retryablehttp v0.6.8 h1:92lWxgpa+fF3FozM4B3UZtHZMJX8T5XT+TFdCxsPyWs= +github.com/hashicorp/go-retryablehttp v0.6.8/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/haya14busa/go-actions-toolkit v0.0.0-20200105081403-ca0307860f01 h1:HiJF8Mek+I7PY0Bm+SuhkwaAZSZP83sw6rrTMrgZ0io= +github.com/haya14busa/go-actions-toolkit v0.0.0-20200105081403-ca0307860f01/go.mod h1:1DWDZmeYf0LX30zscWb7K9rUMeirNeBMd5Dum+seUhc= +github.com/haya14busa/go-checkstyle v0.0.0-20170303121022-5e9d09f51fa1/go.mod h1:RsN5RGgVYeXpcXNtWyztD5VIe7VNSEqpJvF2iEH7QvI= +github.com/haya14busa/go-sarif v0.0.0-20210102043135-e2c5fed2fa3d/go.mod h1:1Hkn3JseGMB/hv1ywzkapVQDWV3bFgp6POZobZmR/5g= +github.com/haya14busa/secretbox v0.0.0-20180525171038-07c7ecf409f5/go.mod h1:FGO/dXIFZnan7KvvUSFk1hYMnoVNzB6NTMPrmke8SSI= +github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174 h1:WlZsjVhE8Af9IcZDGgJGQpNflI3+MJSBhsgT5PCtzBQ= +github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jedib0t/go-pretty v4.3.0+incompatible h1:CGs8AVhEKg/n9YbUenWmNStRW2PHJzaeDodcfvRAbIo= +github.com/jedib0t/go-pretty v4.3.0+incompatible/go.mod h1:XemHduiw8R651AF9Pt4FwCTKeG3oo7hrHJAoznj9nag= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.3.1/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/justinas/nosurf v1.1.1/go.mod h1:ALpWdSbuNGy2lZWtyXdjkYv4edL23oSEgfBT1gPJ5BQ= +github.com/karrick/godirwalk v1.15.6 h1:Yf2mmR8TJy+8Fa0SuQVto5SYap6IF7lNVX4Jdl8G1qA= +github.com/karrick/godirwalk v1.15.6/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= +github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5 h1:hyz3dwM5QLc1Rfoz4FuWJQG5BN7tc6K1MndAUnGpQr4= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magefile/mage v1.11.0 h1:C/55Ywp9BpgVVclD3lRnSYCwXTYxmSppIgLeDYlNuls= +github.com/magefile/mage v1.11.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/markbates/pkger v0.17.0/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= +github.com/markbates/pkger v0.17.1 h1:/MKEtWqtc0mZvu9OinB9UzVN9iYCwLWuyUv4Bw+PCno= +github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= +github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-oci8 v0.0.7/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-shellwords v1.0.11/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= +github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.1.1/go.mod h1:EBArHfARyrSWO/+Wyr9zwEkc6XMFB9XyNgFNmRkZZU4= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= +github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ= +github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/reva2/bitbucket-insights-api v1.0.0 h1:lpQ/Q7OmnG04w/EM77piOwZBxP41PeTlbytXxVrnplA= +github.com/reva2/bitbucket-insights-api v1.0.0/go.mod h1:pLs+ki3MKUntrPryxaGIvpRLiEtBhwfJ/uvxQIMfqHU= +github.com/reviewdog/errorformat v0.0.0-20210517100703-fd739bda5dda h1:5Y9XKToBKO/WPJ9qq30nvpu9DUNy+nwvT8vcSwk/n8w= +github.com/reviewdog/errorformat v0.0.0-20210517100703-fd739bda5dda/go.mod h1:AqhrP0G7F9YRROF10JQwdd4cNO8bdm6bY6KzcOc3Cp8= +github.com/reviewdog/go-bitbucket v0.0.0-20201024094602-708c3f6a7de0 h1:XZ60Bp2UqwaJ6fDQExoFVrgs4nIzwBCy9ct6GCj9hH8= +github.com/reviewdog/go-bitbucket v0.0.0-20201024094602-708c3f6a7de0/go.mod h1:5JbWAMFyq9hbISZawRyIe7QTcLaptvCIvmZnYo+1VvA= +github.com/reviewdog/reviewdog v0.13.0 h1:f9DItB+OPnmsvU6ITvISnpqcT6rMUvC+I0PT6kiZRtA= +github.com/reviewdog/reviewdog v0.13.0/go.mod h1:IkTrA1PhI7Og7gq+6i9YTDZvF7R8EuC4vRbrvfyBUkQ= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.4.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351/go.mod h1:DCgfY80j8GYL7MLEfvcpSFvjD0L5yZq/aZUJmhZklyg= +github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c h1:+aPplBwWcHBo6q9xrfWdMrT9o4kltkmmvpemgIjep/8= +github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/vvakame/sdlog v0.0.0-20200409072131-7c0d359efddc h1:El7LEavRpa49dYFE9ezO8aQxQn5E7u7eQkFsaXsoQAY= +github.com/vvakame/sdlog v0.0.0-20200409072131-7c0d359efddc/go.mod h1:MmhrKtbECoUJTctfak+MnOFoJ9XQqYZ7chcwV9O7v3I= +github.com/xanzy/go-gitlab v0.50.1 h1:eH1G0/ZV1j81rhGrtbcePjbM5Ern7mPA4Xjt+yE+2PQ= +github.com/xanzy/go-gitlab v0.50.1/go.mod h1:Q+hQhV508bDPoBijv7YjK/Lvlb4PhVhJdKqXVQrUoAE= +github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= +github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI= +github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +go.elastic.co/go-licence-detector v0.5.0 h1:YXPCyt9faKMdJ8uMrkcI4patk8WZ0ME5oaIhYBUsRU4= +go.elastic.co/go-licence-detector v0.5.0/go.mod h1:fSJQU8au4SAgDK+UQFbgUPsXKYNBDv4E/dwWevrMpXU= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2 h1:jxcFYjlkl8xaERsgLo+RNquI0epW6zuy/ZRQs6jnrFA= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= +go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20200616162219-07bebbe343e9 h1:SgmspiKqqI4Du0T87bPBEezUSzVOKhKDgconpLrfyuc= +golang.org/x/build v0.0.0-20200616162219-07bebbe343e9/go.mod h1:ia5pRNoJUuxRhXkmwkySu4YBTbXHSKig2ie6daQXihg= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b h1:7mWr3k41Qtv8XlltBkDkl8LoP3mpSgBW8BUoxtEdbXg= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 h1:a8jGStKg0XqKDlKqjLrXn0ioF5MH36pT7Z0BRTqLhbk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210126194326-f9ce19ea3013/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914 h1:3B43BWw0xEBsLZ/NO1VALz6fppU3481pik+2Ksv45z8= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210503060354-a79de5458b56 h1:b8jxX3zqjpqb2LklXPzKSGJhzyxCOZSz8ncv8Nv+y7w= +golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191004055002-72853e10c5a3/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200406213809-066fd1390ee0/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.37.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0 h1:LX7NFCFYOHzr7WHaYiRUpeipZe9o5L8T+2F4Z798VDw= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a h1:89EorDSnBRFywcvGsJvpxw2IsiDMI+DeM7iZOaunfHs= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0 h1:Klz8I9kdtkIN6EpHHUOMLCYhTn/2WAe5a0s1hcBkdTI= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +helm.sh/helm/v3 v3.6.3 h1:0nKDyXJr23nI3JrcP7HH7NcR+CYRvro/52Dvr1KhGO0= +helm.sh/helm/v3 v3.6.3/go.mod h1:mIIus8EOqj+obtycw3sidsR4ORr2aFDmXMSI3k+oeVY= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.2.0 h1:ws8AfbgTX3oIczLPNPCu5166oBg9ST2vNs0rcht+mDE= +honnef.co/go/tools v0.2.0/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= +k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= +k8s.io/api v0.21.3 h1:cblWILbLO8ar+Fj6xdDGr603HRsf8Wu9E9rngJeprZQ= +k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg= +k8s.io/apiextensions-apiserver v0.21.0 h1:Nd4uBuweg6ImzbxkC1W7xUNZcCV/8Vt10iTdTIVF3hw= +k8s.io/apiextensions-apiserver v0.21.0/go.mod h1:gsQGNtGkc/YoDG9loKI0V+oLZM4ljRPjc/sql5tmvzc= +k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/apimachinery v0.21.3 h1:3Ju4nvjCngxxMYby0BimUk+pQHPOQp3eCGChk5kfVII= +k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= +k8s.io/apiserver v0.21.0/go.mod h1:w2YSn4/WIwYuxG5zJmcqtRdtqgW/J2JRgFAqps3bBpg= +k8s.io/cli-runtime v0.21.0/go.mod h1:XoaHP93mGPF37MkLbjGVYqg3S1MnsFdKtiA/RZzzxOo= +k8s.io/cli-runtime v0.21.3 h1:eXevRomULAAGjQ7m6qo+AWHvtVRqaLG8WQICEBwjtmo= +k8s.io/cli-runtime v0.21.3/go.mod h1:h65y0uXIXDnNjd5J+F3CvQU3ZNplH4+rjqbII7JkD4A= +k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= +k8s.io/client-go v0.21.3 h1:J9nxZTOmvkInRDCzcSNQmPJbDYN/PjlxXT9Mos3HcLg= +k8s.io/client-go v0.21.3/go.mod h1:+VPhCgTsaFmGILxR/7E1N0S+ryO010QBeNCv5JwRGYU= +k8s.io/code-generator v0.21.0/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= +k8s.io/component-base v0.21.0 h1:tLLGp4BBjQaCpS/KiuWh7m2xqvAdsxLm4ATxHSe5Zpg= +k8s.io/component-base v0.21.0/go.mod h1:qvtjz6X0USWXbgmbfXR+Agik4RZ3jv2Bgr5QnZzdPYw= +k8s.io/component-helpers v0.21.0/go.mod h1:tezqefP7lxfvJyR+0a+6QtVrkZ/wIkyMLK4WcQ3Cj8U= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= +k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0= +k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= +k8s.io/kubectl v0.21.0 h1:WZXlnG/yjcE4LWO2g6ULjFxtzK6H1TKzsfaBFuVIhNg= +k8s.io/kubectl v0.21.0/go.mod h1:EU37NukZRXn1TpAkMUoy8Z/B2u6wjHDS4aInsDzVvks= +k8s.io/metrics v0.21.0/go.mod h1:L3Ji9EGPP1YBbfm9sPfEXSpnj8i24bfQbAFAsW0NueQ= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/kustomize/api v0.8.5/go.mod h1:M377apnKT5ZHJS++6H4rQoCHmWtt6qTpp3mbe7p6OLY= +sigs.k8s.io/kustomize/api v0.8.8 h1:G2z6JPSSjtWWgMeWSoHdXqyftJNmMmyxXpwENGoOtGE= +sigs.k8s.io/kustomize/api v0.8.8/go.mod h1:He1zoK0nk43Pc6NlV085xDXDXTNprtcyKZVm3swsdNY= +sigs.k8s.io/kustomize/cmd/config v0.9.7/go.mod h1:MvXCpHs77cfyxRmCNUQjIqCmZyYsbn5PyQpWiq44nW0= +sigs.k8s.io/kustomize/kustomize/v4 v4.0.5/go.mod h1:C7rYla7sI8EnxHE/xEhRBSHMNfcL91fx0uKmUlUhrBk= +sigs.k8s.io/kustomize/kyaml v0.10.15/go.mod h1:mlQFagmkm1P+W4lZJbJ/yaxMd8PqMRSC4cPcfUVt5Hg= +sigs.k8s.io/kustomize/kyaml v0.10.17 h1:4zrV0ym5AYa0e512q7K3Wp1u7mzoWW0xR3UHJcGWGIg= +sigs.k8s.io/kustomize/kyaml v0.10.17/go.mod h1:mlQFagmkm1P+W4lZJbJ/yaxMd8PqMRSC4cPcfUVt5Hg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/tools/tools.go b/tools/tools.go new file mode 100644 index 00000000000..07142bb7e91 --- /dev/null +++ b/tools/tools.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This file creates dependencies on build/test tools, so we can +// track them in go.mod. See: +// https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module + +//+build tools + +package main + +import ( + _ "github.com/reviewdog/reviewdog/cmd/reviewdog" // go.mod/go.sum + _ "github.com/t-yuki/gocover-cobertura" // go.mod/go.sum + _ "go.elastic.co/go-licence-detector" // go.mod/go.sum + _ "honnef.co/go/tools/cmd/staticcheck" // go.mod/go.sum + + _ "github.com/elastic/elastic-package" // go.mod/go.sum + _ "github.com/elastic/go-licenser" // go.mod/go.sum +) diff --git a/transform/transform.go b/transform/transform.go deleted file mode 100644 index d44eea774ec..00000000000 --- a/transform/transform.go +++ /dev/null @@ -1,43 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package transform - -import ( - "context" - "regexp" - - "github.com/elastic/beats/v7/libbeat/beat" - - "github.com/elastic/apm-server/sourcemap" -) - -type Transformable interface { - Transform(context.Context, *Config) []beat.Event -} - -// Config holds general transformation configuration. -type Config struct { - RUM RUMConfig -} - -// RUMConfig holds RUM-related transformation configuration. -type RUMConfig struct { - LibraryPattern *regexp.Regexp - ExcludeFromGrouping *regexp.Regexp - SourcemapStore *sourcemap.Store -} diff --git a/utility/common.go b/utility/common.go index b5bd672ae92..9101e451f01 100644 --- a/utility/common.go +++ b/utility/common.go @@ -18,8 +18,11 @@ package utility import ( + "net/http" + "net/textproto" "net/url" "path" + "strings" ) func UrlPath(p string) string { @@ -39,32 +42,8 @@ func CleanUrlPath(p string) string { return url.String() } -// InsertInMap modifies `data` *in place*, inserting `values` at the given `key`. -// If `key` doesn't exist in data (at the top level), it gets created. -// If the value under `key` is not a map, InsertInMap does nothing. -func InsertInMap(data map[string]interface{}, key string, values map[string]interface{}) { - if data == nil || values == nil || key == "" { - return - } - - if _, ok := data[key]; !ok { - data[key] = make(map[string]interface{}) - } - - if nested, ok := data[key].(map[string]interface{}); ok { - for newKey, newValue := range values { - nested[newKey] = newValue - } - } - -} - -// Contains does the obvious -func Contains(s string, a []string) bool { - for _, x := range a { - if x == s { - return true - } - } - return false +// UserAgentHeader fetches all `user-agent` values from a given header and combines them into one string. +// Values are separated by `;`. +func UserAgentHeader(header http.Header) string { + return strings.Join(header[textproto.CanonicalMIMEHeaderKey("User-Agent")], ", ") } diff --git a/utility/common_test.go b/utility/common_test.go index efecd262242..a7891464b8f 100644 --- a/utility/common_test.go +++ b/utility/common_test.go @@ -43,61 +43,3 @@ func TestCleanUrlPath(t *testing.T) { assert.Equal(t, test.CleanedUrl, cleanedUrl, fmt.Sprintf("(%v): Expected %s, got %s", idx, test.CleanedUrl, cleanedUrl)) } } - -func TestInsertInMap(t *testing.T) { - type M = map[string]interface{} - testData := []struct { - data M - key string - values M - result M - }{ - { - nil, - "a", - M{"a": 1}, - nil, - }, - { - M{"a": 1}, - "", - nil, - M{"a": 1}, - }, - { - M{"a": 1}, - "", - M{}, - M{"a": 1}, - }, - { - M{}, - "", - M{"a": 1}, - M{}, - }, - { - M{"a": 1}, - "b", - M{"c": 2}, - M{"a": 1, "b": M{"c": 2}}, - }, - { - M{"a": 1}, - "a", - M{"b": 2}, - M{"a": 1}, - }, - { - M{"a": M{"b": 1}}, - "a", - M{"c": 2}, - M{"a": M{"b": 1, "c": 2}}, - }, - } - for idx, test := range testData { - InsertInMap(test.data, test.key, test.values) - assert.Equal(t, test.result, test.data, - fmt.Sprintf("At (%v): Expected %s, got %s", idx, test.result, test.data)) - } -} diff --git a/utility/data_fetcher.go b/utility/data_fetcher.go deleted file mode 100644 index fcb36b437b1..00000000000 --- a/utility/data_fetcher.go +++ /dev/null @@ -1,286 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package utility - -import ( - "encoding/json" - "errors" - "net/http" - "net/textproto" - "strings" - "time" - - "github.com/elastic/beats/v7/libbeat/common" -) - -type ManualDecoder struct { - Err error -} - -func ErrFetch(field string, path []string) error { - if path != nil { - field = strings.Join(path, ".") + "." + field - } - return errors.New("error fetching field " + field) -} - -func (d *ManualDecoder) Float64(base map[string]interface{}, key string, keys ...string) float64 { - val := getDeep(base, keys...)[key] - if valFloat, ok := val.(float64); ok { - return valFloat - } else if valNumber, ok := val.(json.Number); ok { - if valFloat, err := valNumber.Float64(); err != nil { - d.Err = err - } else { - return valFloat - } - } - - d.Err = ErrFetch(key, keys) - return 0.0 -} - -func (d *ManualDecoder) Float64Ptr(base map[string]interface{}, key string, keys ...string) *float64 { - val := getDeep(base, keys...)[key] - if val == nil { - return nil - } else if valFloat, ok := val.(float64); ok { - return &valFloat - } else if valNumber, ok := val.(json.Number); ok { - if valFloat, err := valNumber.Float64(); err != nil { - d.Err = err - } else { - return &valFloat - } - } - - d.Err = ErrFetch(key, keys) - return nil -} - -func (d *ManualDecoder) IntPtr(base map[string]interface{}, key string, keys ...string) *int { - val := getDeep(base, keys...)[key] - if val == nil { - return nil - } else if valNumber, ok := val.(json.Number); ok { - if valInt, err := valNumber.Int64(); err != nil { - d.Err = err - } else { - i := int(valInt) - return &i - } - } else if valFloat, ok := val.(float64); ok { - valInt := int(valFloat) - if valFloat == float64(valInt) { - return &valInt - } - } else if valFloat, ok := val.(float32); ok { - valInt := int(valFloat) - if valFloat == float32(valInt) { - return &valInt - } - } - d.Err = ErrFetch(key, keys) - return nil -} - -func (d *ManualDecoder) Int64Ptr(base map[string]interface{}, key string, keys ...string) *int64 { - val := getDeep(base, keys...)[key] - if val == nil { - return nil - } else if valNumber, ok := val.(json.Number); ok { - if valInt, err := valNumber.Int64(); err != nil { - d.Err = err - } else { - i := int64(valInt) - return &i - } - } else if valFloat, ok := val.(float64); ok { - valInt := int64(valFloat) - if valFloat == float64(valInt) { - return &valInt - } - } - d.Err = ErrFetch(key, keys) - return nil -} - -func (d *ManualDecoder) Int(base map[string]interface{}, key string, keys ...string) int { - if val := d.IntPtr(base, key, keys...); val != nil { - return *val - } - d.Err = ErrFetch(key, keys) - return 0 -} - -func (d *ManualDecoder) StringPtr(base map[string]interface{}, key string, keys ...string) *string { - val := getDeep(base, keys...)[key] - if val == nil { - return nil - } - if valStr, ok := val.(string); ok { - return &valStr - } - d.Err = ErrFetch(key, keys) - return nil -} - -func (d *ManualDecoder) String(base map[string]interface{}, key string, keys ...string) string { - if val := d.StringPtr(base, key, keys...); val != nil { - return *val - } - d.Err = ErrFetch(key, keys) - return "" -} - -func (d *ManualDecoder) StringArr(base map[string]interface{}, key string, keys ...string) []string { - val := getDeep(base, keys...)[key] - if val == nil { - return nil - } - arr := getDeep(base, keys...)[key] - if valArr, ok := arr.([]interface{}); ok { - strArr := make([]string, len(valArr)) - for idx, v := range valArr { - if valStr, ok := v.(string); ok { - strArr[idx] = valStr - } else { - d.Err = ErrFetch(key, keys) - return nil - } - } - return strArr - } - if strArr, ok := arr.([]string); ok { - return strArr - } - d.Err = ErrFetch(key, keys) - return nil -} - -func (d *ManualDecoder) Interface(base map[string]interface{}, key string, keys ...string) interface{} { - return getDeep(base, keys...)[key] -} - -func (d *ManualDecoder) InterfaceArr(base map[string]interface{}, key string, keys ...string) []interface{} { - val := getDeep(base, keys...)[key] - if val == nil { - return nil - } else if valArr, ok := val.([]interface{}); ok { - return valArr - } - d.Err = ErrFetch(key, keys) - return nil -} - -func (d *ManualDecoder) BoolPtr(base map[string]interface{}, key string, keys ...string) *bool { - val := getDeep(base, keys...)[key] - if val == nil { - return nil - } else if valBool, ok := val.(bool); ok { - return &valBool - } - d.Err = ErrFetch(key, keys) - return nil -} - -func (d *ManualDecoder) MapStr(base map[string]interface{}, key string, keys ...string) map[string]interface{} { - val := getDeep(base, keys...)[key] - if val == nil { - return nil - } else if valMapStr, ok := val.(map[string]interface{}); ok { - return valMapStr - } - d.Err = ErrFetch(key, keys) - return nil -} - -func (d *ManualDecoder) TimeRFC3339(base map[string]interface{}, key string, keys ...string) time.Time { - val := getDeep(base, keys...)[key] - if val == nil { - return time.Time{} - } - if valStr, ok := val.(string); ok { - if valTime, err := time.Parse(time.RFC3339, valStr); err == nil { - return valTime - } - } - d.Err = ErrFetch(key, keys) - return time.Time{} -} - -func (d *ManualDecoder) TimeEpochMicro(base map[string]interface{}, key string, keys ...string) time.Time { - val := getDeep(base, keys...)[key] - if val == nil { - return time.Time{} - } - - if valNum, ok := val.(json.Number); ok { - if t, err := valNum.Int64(); err == nil { - sec := t / 1000000 - microsec := t - (sec * 1000000) - return time.Unix(sec, microsec*1000).UTC() - } - } - d.Err = ErrFetch(key, keys) - return time.Time{} -} - -func (d *ManualDecoder) Headers(base map[string]interface{}, fieldName string) http.Header { - - h := d.MapStr(base, fieldName) - if d.Err != nil || len(h) == 0 { - return nil - } - httpHeader := http.Header{} - for key, val := range h { - if v, ok := val.(string); ok { - httpHeader.Add(key, v) - continue - } - vals := d.StringArr(h, key) - if d.Err != nil { - return nil - } - for _, v := range vals { - httpHeader.Add(key, v) - } - } - return httpHeader -} - -// UserAgentHeader fetches all `user-agent` values from a given header and combines them into one string. -// Values are separated by `;`. -func (d *ManualDecoder) UserAgentHeader(header http.Header) string { - return strings.Join(header[textproto.CanonicalMIMEHeaderKey("User-Agent")], ", ") -} - -func getDeep(raw map[string]interface{}, keys ...string) map[string]interface{} { - if raw == nil { - return nil - } - if len(keys) == 0 { - return raw - } - if valMap, ok := raw[keys[0]].(map[string]interface{}); ok { - return getDeep(valMap, keys[1:]...) - } else if valMap, ok := raw[keys[0]].(common.MapStr); ok { - return getDeep(valMap, keys[1:]...) - } - return nil -} diff --git a/utility/data_fetcher_test.go b/utility/data_fetcher_test.go deleted file mode 100644 index d74b85d5422..00000000000 --- a/utility/data_fetcher_test.go +++ /dev/null @@ -1,255 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package utility - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -var ( - str, str2 = "foo", "bar" - integer, integer2, intFl32, intFl64 = 12.0, 24.0, 32, 64 - fl64, fl64Also float64 = 7.1, 90.1 - boolTrue, boolFalse = true, false - timeRFC3339 = "2017-05-30T18:53:27.154Z" - decoderBase = map[string]interface{}{ - "true": boolTrue, - "str": str, - "fl32": float32(5.4), - "fl64": fl64, - "intfl32": float32(intFl32), - "intfl64": float64(intFl64), - "int": integer, - "strArr": []interface{}{"c", "d"}, - "time": timeRFC3339, - "a": map[string]interface{}{ - "b": map[string]interface{}{ - "false": boolFalse, - "str": str2, - "fl32": float32(78.4), - "fl64": fl64Also, - "int": integer2, - "strArr": []interface{}{"k", "d"}, - "intArr": []interface{}{1, 2}, - }, - }, - } -) - -type testStr struct { - key string - keys []string - out interface{} - err error -} - -func TestFloat64(t *testing.T) { - for _, test := range []testStr{ - {key: "fl64", keys: []string{"a", "b"}, out: fl64Also, err: nil}, - {key: "fl64", keys: []string{}, out: fl64, err: nil}, - {key: "missing", keys: []string{"a", "b"}, out: 0.0, err: ErrFetch("missing", []string{"a", "b"})}, - {key: "str", keys: []string{"a", "b"}, out: 0.0, err: ErrFetch("str", []string{"a", "b"})}, - } { - decoder := ManualDecoder{} - out := decoder.Float64(decoderBase, test.key, test.keys...) - assert.Equal(t, out, test.out) - assert.Equal(t, decoder.Err, test.err) - } -} - -func TestFloat64Ptr(t *testing.T) { - var outnil *float64 - for _, test := range []testStr{ - {key: "fl64", keys: []string{"a", "b"}, out: &fl64Also, err: nil}, - {key: "fl64", keys: []string{}, out: &fl64, err: nil}, - {key: "missing", keys: []string{"a", "b"}, out: outnil, err: nil}, - {key: "str", keys: []string{"a", "b"}, out: outnil, err: ErrFetch("str", []string{"a", "b"})}, - } { - decoder := ManualDecoder{} - out := decoder.Float64Ptr(decoderBase, test.key, test.keys...) - assert.Equal(t, out, test.out) - assert.Equal(t, decoder.Err, test.err) - } -} - -func TestIntPtr(t *testing.T) { - var outnil *int - for _, test := range []testStr{ - {key: "intfl32", keys: []string{}, out: &intFl32, err: nil}, - {key: "intfl64", keys: []string{}, out: &intFl64, err: nil}, - {key: "missing", keys: []string{"a", "b"}, out: outnil, err: nil}, - {key: "str", keys: []string{"a", "b"}, out: outnil, err: ErrFetch("str", []string{"a", "b"})}, - } { - decoder := ManualDecoder{} - out := decoder.IntPtr(decoderBase, test.key, test.keys...) - assert.Equal(t, out, test.out) - assert.Equal(t, decoder.Err, test.err) - } -} - -func TestInt64Ptr(t *testing.T) { - var outnil *int64 - int64Fl64 := int64(intFl64) - for _, test := range []testStr{ - {key: "intfl64", keys: []string{}, out: &int64Fl64, err: nil}, - {key: "missing", keys: []string{"a", "b"}, out: outnil, err: nil}, - {key: "str", keys: []string{"a", "b"}, out: outnil, err: ErrFetch("str", []string{"a", "b"})}, - } { - decoder := ManualDecoder{} - out := decoder.Int64Ptr(decoderBase, test.key, test.keys...) - assert.Equal(t, test.out, out) - assert.Equal(t, test.err, decoder.Err) - } -} - -func TestInt(t *testing.T) { - for _, test := range []testStr{ - {key: "intfl32", keys: []string{}, out: intFl32, err: nil}, - {key: "intfl64", keys: []string{}, out: intFl64, err: nil}, - {key: "missing", keys: []string{"a", "b"}, out: 0, err: ErrFetch("missing", []string{"a", "b"})}, - {key: "str", keys: []string{"a", "b"}, out: 0, err: ErrFetch("str", []string{"a", "b"})}, - } { - decoder := ManualDecoder{} - out := decoder.Int(decoderBase, test.key, test.keys...) - assert.Equal(t, out, test.out) - assert.Equal(t, decoder.Err, test.err) - } -} - -func TestStrPtr(t *testing.T) { - var outnil *string - for _, test := range []testStr{ - {key: "str", keys: []string{}, out: &str, err: nil}, - {key: "str", keys: []string{"a", "b"}, out: &str2, err: nil}, - {key: "missing", keys: []string{"a", "b"}, out: outnil, err: nil}, - {key: "int", keys: []string{"a", "b"}, out: outnil, err: ErrFetch("int", []string{"a", "b"})}, - } { - decoder := ManualDecoder{} - out := decoder.StringPtr(decoderBase, test.key, test.keys...) - assert.Equal(t, out, test.out) - assert.Equal(t, decoder.Err, test.err) - } -} - -func TestStr(t *testing.T) { - for _, test := range []testStr{ - {key: "str", keys: []string{}, out: str, err: nil}, - {key: "str", keys: []string{"a", "b"}, out: str2, err: nil}, - {key: "missing", keys: []string{"a", "b"}, out: "", err: ErrFetch("missing", []string{"a", "b"})}, - {key: "int", keys: []string{"a", "b"}, out: "", err: ErrFetch("int", []string{"a", "b"})}, - } { - decoder := ManualDecoder{} - out := decoder.String(decoderBase, test.key, test.keys...) - assert.Equal(t, out, test.out) - assert.Equal(t, decoder.Err, test.err) - } -} - -func TestStrArray(t *testing.T) { - var outnil []string - for _, test := range []testStr{ - {key: "strArr", keys: []string{}, out: []string{"c", "d"}, err: nil}, - {key: "strArr", keys: []string{"a", "b"}, out: []string{"k", "d"}, err: nil}, - {key: "missing", keys: []string{"a", "b"}, out: outnil, err: nil}, - {key: "str", keys: []string{"a", "b"}, out: outnil, err: ErrFetch("str", []string{"a", "b"})}, - {key: "intArr", keys: []string{"a", "b"}, out: outnil, err: ErrFetch("intArr", []string{"a", "b"})}, - } { - decoder := ManualDecoder{} - out := decoder.StringArr(decoderBase, test.key, test.keys...) - assert.Equal(t, test.err, decoder.Err) - assert.Equal(t, test.out, out) - assert.Equal(t, out, test.out) - assert.Equal(t, decoder.Err, test.err) - } -} - -func TestInterface(t *testing.T) { - for _, test := range []testStr{ - {key: "str", keys: []string{}, out: str, err: nil}, - {key: "str", keys: []string{"a", "b"}, out: str2, err: nil}, - {key: "missing", keys: []string{"a", "b"}, out: nil, err: nil}, - } { - decoder := ManualDecoder{} - out := decoder.Interface(decoderBase, test.key, test.keys...) - assert.Equal(t, test.out, out) - assert.Equal(t, test.err, decoder.Err) - assert.Equal(t, out, test.out) - assert.Equal(t, decoder.Err, test.err) - } -} - -func TestInterfaceArray(t *testing.T) { - var outnil []interface{} - for _, test := range []testStr{ - {key: "strArr", keys: []string{"a", "b"}, out: []interface{}{"k", "d"}, err: nil}, - {key: "missing", keys: []string{"a", "b"}, out: outnil, err: nil}, - {key: "int", keys: []string{"a", "b"}, out: outnil, err: ErrFetch("int", []string{"a", "b"})}, - } { - decoder := ManualDecoder{} - out := decoder.InterfaceArr(decoderBase, test.key, test.keys...) - assert.Equal(t, out, test.out) - assert.Equal(t, decoder.Err, test.err) - } -} -func TestBoolPtr(t *testing.T) { - var outnil *bool - for _, test := range []testStr{ - {key: "true", keys: []string{}, out: &boolTrue, err: nil}, - {key: "false", keys: []string{"a", "b"}, out: &boolFalse, err: nil}, - {key: "missing", keys: []string{"a", "b"}, out: outnil, err: nil}, - {key: "int", keys: []string{"a", "b"}, out: outnil, err: ErrFetch("int", []string{"a", "b"})}, - } { - decoder := ManualDecoder{} - out := decoder.BoolPtr(decoderBase, test.key, test.keys...) - assert.Equal(t, out, test.out) - assert.Equal(t, decoder.Err, test.err) - } -} -func TestMapStr(t *testing.T) { - var outnil map[string]interface{} - for _, test := range []testStr{ - {key: "a", keys: []string{}, out: decoderBase["a"], err: nil}, - {key: "b", keys: []string{"a"}, out: decoderBase["a"].(map[string]interface{})["b"], err: nil}, - {key: "missing", keys: []string{"a", "b"}, out: outnil, err: nil}, - {key: "str", keys: []string{"a", "b"}, out: outnil, err: ErrFetch("str", []string{"a", "b"})}, - } { - decoder := ManualDecoder{} - out := decoder.MapStr(decoderBase, test.key, test.keys...) - assert.Equal(t, out, test.out) - assert.Equal(t, decoder.Err, test.err) - } -} - -func TestTimeRFC3339(t *testing.T) { - var outZero time.Time - tp := time.Date(2017, 5, 30, 18, 53, 27, 154*1e6, time.UTC) - for _, test := range []testStr{ - {key: "time", keys: []string{}, out: tp, err: nil}, - {key: "missing", keys: []string{"a", "b"}, out: outZero, err: nil}, - {key: "str", keys: []string{"a", "b"}, out: outZero, err: ErrFetch("str", []string{"a", "b"})}, - {key: "b", keys: []string{"a"}, out: outZero, err: ErrFetch("b", []string{"a"})}, - } { - decoder := ManualDecoder{} - out := decoder.TimeRFC3339(decoderBase, test.key, test.keys...) - assert.InDelta(t, out.Unix(), test.out.(time.Time).Unix(), time.Millisecond.Seconds()*10) - assert.Equal(t, decoder.Err, test.err) - } -} diff --git a/utility/forwarded.go b/utility/forwarded.go index f87c108bf6c..74ff3860787 100644 --- a/utility/forwarded.go +++ b/utility/forwarded.go @@ -22,22 +22,22 @@ import ( "strings" ) -// ForwardedHeader holds information extracted from a "Forwarded" HTTP header. -type ForwardedHeader struct { +// forwardedHeader holds information extracted from a "Forwarded" HTTP header. +type forwardedHeader struct { For string Host string Proto string } -// ParseForwarded parses a "Forwarded" HTTP header. -func ParseForwarded(f string) ForwardedHeader { +// parseForwarded parses a "Forwarded" HTTP header. +func parseForwarded(f string) forwardedHeader { // We only consider the first value in the sequence, // if there are multiple. Disregard everything after // the first comma. if comma := strings.IndexRune(f, ','); comma != -1 { f = f[:comma] } - var result ForwardedHeader + var result forwardedHeader for f != "" { field := f if semi := strings.IndexRune(f, ';'); semi != -1 { diff --git a/utility/forwarded_test.go b/utility/forwarded_test.go index 889d1d33931..c054caecf4c 100644 --- a/utility/forwarded_test.go +++ b/utility/forwarded_test.go @@ -15,27 +15,25 @@ // specific language governing permissions and limitations // under the License. -package utility_test +package utility import ( "testing" "github.com/stretchr/testify/assert" - - "github.com/elastic/apm-server/utility" ) func TestParseForwarded(t *testing.T) { type test struct { name string header string - expect utility.ForwardedHeader + expect forwardedHeader } tests := []test{{ name: "Forwarded", header: "by=127.0.0.1; for=127.1.1.1; Host=\"forwarded.invalid:443\"; proto=HTTPS", - expect: utility.ForwardedHeader{ + expect: forwardedHeader{ For: "127.1.1.1", Host: "forwarded.invalid:443", Proto: "HTTPS", @@ -43,26 +41,26 @@ func TestParseForwarded(t *testing.T) { }, { name: "Forwarded-Multi", header: "host=first.invalid, host=second.invalid", - expect: utility.ForwardedHeader{ + expect: forwardedHeader{ Host: "first.invalid", }, }, { name: "Forwarded-Malformed-Fields-Ignored", header: "what; nonsense=\"; host=first.invalid", - expect: utility.ForwardedHeader{ + expect: forwardedHeader{ Host: "first.invalid", }, }, { name: "Forwarded-Trailing-Separators", header: "host=first.invalid;,", - expect: utility.ForwardedHeader{ + expect: forwardedHeader{ Host: "first.invalid", }, }} for _, test := range tests { t.Run(test.name, func(t *testing.T) { - parsed := utility.ParseForwarded(test.header) + parsed := parseForwarded(test.header) assert.Equal(t, test.expect, parsed) }) } diff --git a/utility/ip.go b/utility/ip.go index 8a90adc343a..af636d42d91 100644 --- a/utility/ip.go +++ b/utility/ip.go @@ -20,6 +20,7 @@ package utility import ( "net" "net/http" + "strconv" ) // ExtractIP calls ExtractIPFromHeader(r) to extract a valid IP address. If no valid IP can be extracted from headers, @@ -53,3 +54,18 @@ func ParseIP(inp string) net.IP { } return nil } + +// ParseTCPAddr returns a net.Addr parsed from a given input, if it is a valid IP:port pair. Otherwise returns nil. +func ParseTCPAddr(in string) net.Addr { + if in == "" { + return nil + } + host, portstr := splitHost(in) + if ip := net.ParseIP(host); ip != nil { + port, err := strconv.ParseUint(portstr, 10, 16) + if err == nil { + return &net.TCPAddr{IP: ip, Port: int(port)} + } + } + return nil +} diff --git a/utility/ip_test.go b/utility/ip_test.go index a5999e26c31..b01933e39cf 100644 --- a/utility/ip_test.go +++ b/utility/ip_test.go @@ -19,10 +19,12 @@ package utility_test import ( "fmt" + "net" "net/http" "testing" "github.com/stretchr/testify/require" + "google.golang.org/grpc/metadata" "github.com/elastic/apm-server/utility" @@ -58,6 +60,17 @@ func TestExtractIP(t *testing.T) { assert.Equal(t, "2001:db8:cafe::17", utility.ExtractIP(req).String()) } +func TestExtractIPFromGRPCMetadata(t *testing.T) { + ip := "123.0.0.1" + // metadata.Pairs stores keys as lowercase; we want to test that + // ExtractIPFromHeader is accounting for that. + md := metadata.Pairs("x-real-ip", ip) + headers := http.Header(md) + extractedIP := utility.ExtractIPFromHeader(headers) + assert.NotNil(t, ip) + assert.Equal(t, ip, extractedIP.String()) +} + func TestExtractIPFromHeader(t *testing.T) { for name, tc := range map[string]struct { header map[string]string @@ -131,6 +144,20 @@ func TestParseIP(t *testing.T) { } } +func TestParseTCPAddr(t *testing.T) { + assert.Equal(t, &net.TCPAddr{ + IP: net.ParseIP("192.168.0.1"), + Port: 8080, + }, utility.ParseTCPAddr("192.168.0.1:8080")) + + assert.Equal(t, &net.TCPAddr{ + IP: net.ParseIP("::1"), + Port: 8080, + }, utility.ParseTCPAddr("[::1]:8080")) + + assert.Equal(t, nil, utility.ParseTCPAddr("::1")) +} + func BenchmarkExtractIP(b *testing.B) { remote := "10.11.12.13" remoteWithPort := remote + ":8080" diff --git a/utility/map_str_enhancer.go b/utility/map_str_enhancer.go deleted file mode 100644 index 9cd87b61931..00000000000 --- a/utility/map_str_enhancer.go +++ /dev/null @@ -1,210 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package utility - -import ( - "encoding/json" - "net/http" - "reflect" - "strings" - "time" - - "github.com/elastic/beats/v7/libbeat/common" -) - -// Set takes a map and changes key to point to the provided value. -// In case the provided value is nil or of length 0, the key is deleted from the map . -func Set(m common.MapStr, key string, val interface{}) { - update(m, key, val, true) -} - -func update(m common.MapStr, key string, val interface{}, remove bool) { - if m == nil || key == "" { - return - } - - if val == nil { - if remove { - delete(m, key) - } - return - } - - switch value := val.(type) { - case *bool: - if value != nil { - m[key] = *value - } else if remove { - delete(m, key) - } - case *int: - if value != nil { - m[key] = *value - } else if remove { - delete(m, key) - } - case *int64: - if newVal := val.(*int64); newVal != nil { - m[key] = *newVal - } else if remove { - delete(m, key) - } - case *string: - if value != nil { - m[key] = *value - } else if remove { - delete(m, key) - } - case common.MapStr: - if len(value) > 0 { - newValMap := common.MapStr{} - for k, v := range value { - update(newValMap, k, v, remove) - } - if len(newValMap) > 0 { - m[key] = newValMap - } else if remove { - delete(m, key) - } - } else if remove { - delete(m, key) - } - case map[string]interface{}: - if len(value) > 0 { - newValMap := map[string]interface{}{} - for k, v := range value { - update(newValMap, k, v, remove) - } - if len(newValMap) > 0 { - m[key] = newValMap - } else if remove { - delete(m, key) - } - } else if remove { - delete(m, key) - } - case json.Number: - if floatVal, err := value.Float64(); err != nil { - update(m, key, value.String(), remove) - } else { - update(m, key, floatVal, remove) - } - case float64: - if value == float64(int64(value)) { - m[key] = int64(value) - } else { - m[key] = common.Float(value) - } - case *float64: - if value != nil { - m[key] = *value - } else if remove { - delete(m, key) - } - case float32: - if value == float32(int32(value)) { - m[key] = int32(value) - } else { - m[key] = common.Float(value) - } - case string, bool, complex64, complex128: - m[key] = val - case int, int8, int16, int32, int64, uint, uint8, uint32, uint64: - m[key] = val - case http.Header: - if value != nil { - m[key] = value - } else if remove { - delete(m, key) - } - default: - v := reflect.ValueOf(val) - switch v.Type().Kind() { - case reflect.Slice, reflect.Array: - if v.Len() > 0 { - m[key] = val - } else if remove { - delete(m, key) - } - - // do not store values of following type - // has been rejected so far by the libbeat normalization - case reflect.Interface, reflect.Chan, reflect.Func, reflect.UnsafePointer, reflect.Uintptr: - - default: - m[key] = val - } - } -} - -// DeepUpdate splits the key by '.' and merges the given value at m[de-dottedKeys]. -func DeepUpdate(m common.MapStr, dottedKeys string, val interface{}) { - if m == nil { - m = common.MapStr{} - } - keys := strings.Split(dottedKeys, ".") - if len(keys) == 0 { - return - } - reverse(keys) - v := val - for _, k := range keys { - subMap := common.MapStr{} - update(subMap, k, v, false) - v = subMap - } - m.DeepUpdate(v.(common.MapStr)) -} - -func reverse(slice []string) { - size := len(slice) - for i := 0; i < len(slice)/2; i++ { - slice[i], slice[size-i-1] = slice[size-i-1], slice[i] - } -} - -func MillisAsMicros(ms float64) common.MapStr { - m := common.MapStr{} - m["us"] = int(ms * 1000) - return m -} - -func TimeAsMicros(t time.Time) common.MapStr { - if t.IsZero() { - return nil - } - - m := common.MapStr{} - m["us"] = t.UnixNano() / 1000 - return m -} - -func Prune(m common.MapStr) common.MapStr { - for k, v := range m { - if v == nil { - delete(m, k) - } - } - return m -} - -func AddID(fields common.MapStr, key, id string) { - if id != "" { - fields[key] = common.MapStr{"id": id} - } -} diff --git a/utility/map_str_enhancer_test.go b/utility/map_str_enhancer_test.go deleted file mode 100644 index 1938d749730..00000000000 --- a/utility/map_str_enhancer_test.go +++ /dev/null @@ -1,256 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package utility - -import ( - "fmt" - "reflect" - "testing" - "unsafe" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/beats/v7/libbeat/common" -) - -const addKey = "added" - -func TestAddGeneral(t *testing.T) { - var m common.MapStr - Set(m, "s", "s") - assert.Nil(t, m) - - m = common.MapStr{} - Set(m, "", "") - assert.Equal(t, common.MapStr{}, m) -} - -func TestEmptyCollections(t *testing.T) { - m := common.MapStr{"foo": "bar", "user": common.MapStr{"id": "1", "name": "bar"}} - add := common.MapStr{} - Set(m, "user", add) - assert.Equal(t, common.MapStr{"foo": "bar"}, m) - - m = common.MapStr{"foo": "bar", "user": common.MapStr{"id": "1", "name": "bar"}} - add = common.MapStr{"id": nil, "email": nil, "info": common.MapStr{"a": nil}} - Set(m, "user", add) - assert.Equal(t, common.MapStr{"foo": "bar"}, m) - - m = common.MapStr{"foo": "bar", "user": common.MapStr{"id": "1", "name": "bar"}} - add = map[string]interface{}{"id": nil, "email": nil, "info": map[string]interface{}{"a": nil}} - Set(m, "user", add) - assert.Equal(t, common.MapStr{"foo": "bar"}, m) - - m = common.MapStr{"foo": "bar", "user": common.MapStr{"id": "1", "name": "bar"}} - add = map[string]interface{}{} - Set(m, "user", add) - assert.Equal(t, common.MapStr{"foo": "bar"}, m) -} - -func TestIgnoredTypes(t *testing.T) { - m := common.MapStr{} - - Set(m, "foo", make(chan int)) - assert.Equal(t, common.MapStr{}, m) - - Set(m, "foo", func() {}) - assert.Equal(t, common.MapStr{}, m) - - uintPtr := uintptr(8) - Set(m, "foo", uintPtr) - assert.Equal(t, common.MapStr{}, m) - - a := []int{} - Set(m, "foo", unsafe.Pointer(&a)) - assert.Equal(t, common.MapStr{}, m) -} - -func TestAdd(t *testing.T) { - existing := "foo" - newArrMapStr := []common.MapStr{{"b": "bar"}} - var nilArrMapStr []common.MapStr - - newArrStr := []string{"bar"} - var nilArrStr []string - - newMap := map[string]interface{}{"b": "bar"} - var nilMap map[string]interface{} - - newMapStr := common.MapStr{"b": "bar"} - var nilMapStr common.MapStr - - newStr := "bar" - var nilStr *string - - newInt := 123 - var nilInt *int - - newBool := true - var nilBool *bool - - tests := []struct { - v interface{} - expV interface{} - nilV interface{} - }{ - { - v: "some string", - expV: "some string", - nilV: nil, - }, - { - v: &newBool, - expV: newBool, - nilV: nilBool, - }, - { - v: &newInt, - expV: newInt, - nilV: nilInt, - }, - { - v: &newStr, - expV: newStr, - nilV: nilStr, - }, - { - v: newMapStr, - expV: newMapStr, - nilV: nilMapStr, - }, - { - v: newMap, - expV: newMap, - nilV: nilMap, - }, - { - v: newArrStr, - expV: newArrStr, - nilV: nilArrStr, - }, - { - v: newArrMapStr, - expV: newArrMapStr, - nilV: nilArrMapStr, - }, - { - v: float64(5.98), - expV: common.Float(5.980000), - nilV: nil, - }, - { - v: float32(5.987654321), - expV: common.Float(float32(5.987654321)), - nilV: nil, - }, - { - v: float64(5), - expV: int64(5), - nilV: nil, - }, - { - v: float32(5), - expV: int32(5), - nilV: nil, - }, - } - - for idx, te := range tests { - // add new value - m := common.MapStr{"existing": existing} - Set(m, addKey, te.v) - expected := common.MapStr{"existing": existing, addKey: te.expV} - assert.Equal(t, expected, m, - fmt.Sprintf("<%v>: Set new value - Expected: %v, Actual: %v", idx, expected, m)) - - // replace existing value - m = common.MapStr{addKey: existing} - Set(m, addKey, te.v) - expected = common.MapStr{addKey: te.expV} - assert.Equal(t, expected, m, - fmt.Sprintf("<%v>: Replace existing value - Expected: %v, Actual: %v", idx, expected, m)) - - // remove empty value - m = common.MapStr{addKey: existing} - Set(m, addKey, te.nilV) - expected = common.MapStr{} - assert.Equal(t, expected, m, - fmt.Sprintf("<%v>: Remove empty value - Expected: %v, Actual: %v", idx, expected, m)) - } -} - -func TestAddEnsureCopy(t *testing.T) { - for _, test := range []struct { - v interface{} - }{ - { - common.MapStr{"b": "bar"}, - }, - { - map[string]interface{}{"b": "bar"}, - }, - } { - dest := common.MapStr{} - Set(dest, "key", test.v) - - // modify the original value and ensure if doesn't modify the "add"ed value - reflect.ValueOf(test.v).SetMapIndex(reflect.ValueOf("f"), reflect.ValueOf("foo")) - actual := dest["key"] - - assert.NotEqual(t, actual, test.v) - } -} - -func TestDeepAdd(t *testing.T) { - type M = common.MapStr - m := M{} - DeepUpdate(m, "a.b.c", 1) - DeepUpdate(m, "a.b.d", 2) - DeepUpdate(m, "a.b.d.3", 3) - DeepUpdate(m, "a.b.d.4", 4) - DeepUpdate(m, "a.x.y", 5) - DeepUpdate(m, "a.x.z.nil", nil) - DeepUpdate(m, "a.nil", nil) - - assert.Equal(t, M{ - "a": M{ - "b": M{ - "c": 1, - "d": M{ - "3": 3, - "4": 4, - }, - }, - "x": M{ - "y": 5, - }, - }, - }, m) - - m = M{} - DeepUpdate(m, "a", 1) - DeepUpdate(m, "", 2) - assert.Equal(t, M{"a": 1}, m) -} - -func TestMillisAsMicros(t *testing.T) { - ms := 4.5 - m := MillisAsMicros(ms) - expectedMap := common.MapStr{"us": 4500} - assert.Equal(t, expectedMap, m) -} diff --git a/utility/remoteaddr.go b/utility/remoteaddr.go index 203bc112266..3f88e6d615a 100644 --- a/utility/remoteaddr.go +++ b/utility/remoteaddr.go @@ -24,7 +24,7 @@ import ( ) var parseHeadersInOrder = []func(http.Header) string{ - parseForwarded, + parseForwardedHeader, parseXRealIP, parseXForwardedFor, } @@ -67,9 +67,9 @@ func splitHost(in string) (host, port string) { return host, port } -func parseForwarded(header http.Header) string { - if fwd := header.Get("Forwarded"); fwd != "" { - forwarded := ParseForwarded(fwd) +func parseForwardedHeader(header http.Header) string { + if fwd := getHeader(header, "Forwarded"); fwd != "" { + forwarded := parseForwarded(fwd) if forwarded.For != "" { host, _ := splitHost(forwarded.For) return host @@ -79,11 +79,11 @@ func parseForwarded(header http.Header) string { } func parseXRealIP(header http.Header) string { - return header.Get("X-Real-Ip") + return getHeader(header, "X-Real-Ip") } func parseXForwardedFor(header http.Header) string { - if xff := header.Get("X-Forwarded-For"); xff != "" { + if xff := getHeader(header, "X-Forwarded-For"); xff != "" { if sep := strings.IndexRune(xff, ','); sep > 0 { xff = xff[:sep] } @@ -91,3 +91,17 @@ func parseXForwardedFor(header http.Header) string { } return "" } + +func getHeader(header http.Header, key string) string { + if v := header.Get(key); v != "" { + return v + } + + // header.Get() internally canonicalizes key names, but metadata.Pairs uses + // lowercase keys. Using the lowercase key name allows this function to be + // used for gRPC metadata. + if v, ok := header[strings.ToLower(key)]; ok && len(v) > 0 { + return v[0] + } + return "" +} diff --git a/validation/validator.go b/validation/validator.go deleted file mode 100644 index e70d5f0637f..00000000000 --- a/validation/validator.go +++ /dev/null @@ -1,75 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package validation - -import ( - "strings" - - "github.com/pkg/errors" - "github.com/santhosh-tekuri/jsonschema" -) - -// Error represents an error due to JSON validation. -type Error struct { - Err error -} - -func (e *Error) Error() string { - return "error validating JSON: " + e.Err.Error() -} - -func (e *Error) Unwrap() error { - return e.Err -} - -func CreateSchema(schemaData string, url string) *jsonschema.Schema { - compiler := jsonschema.NewCompiler() - if err := compiler.AddResource(url, strings.NewReader(schemaData)); err != nil { - panic(err) - } - compiler.Draft = jsonschema.Draft7 - schema, err := compiler.Compile(url) - if err != nil { - panic(err) - } - return schema -} - -// ValidateObject checks that raw is a non-nil, decoded JSON object -// (i.e. has type map[string]interface{}), and validates against the -// provided schema. -func ValidateObject(raw interface{}, schema *jsonschema.Schema) (map[string]interface{}, error) { - if raw == nil { - return nil, &Error{errors.New("input missing")} - } - obj, ok := raw.(map[string]interface{}) - if !ok { - return nil, &Error{errors.New("invalid input type")} - } - if err := Validate(raw, schema); err != nil { - return nil, err - } - return obj, nil -} - -func Validate(raw interface{}, schema *jsonschema.Schema) error { - if err := schema.ValidateInterface(raw); err != nil { - return &Error{err} - } - return nil -} diff --git a/validation/validator_test.go b/validation/validator_test.go deleted file mode 100644 index d4f14034b68..00000000000 --- a/validation/validator_test.go +++ /dev/null @@ -1,79 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package validation - -import ( - "strings" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestCreateSchemaInvalidResource(t *testing.T) { - invalid := `{` - assert.Panics(t, func() { CreateSchema(invalid, "myschema") }) -} - -func TestCreateSchemaInvalidSchema(t *testing.T) { - assert.Panics(t, func() { CreateSchema(invalidSchema, "myschema") }) -} - -func TestCreateSchemaOK(t *testing.T) { - schema := CreateSchema(validSchema, "myschema") - assert.NotNil(t, schema) -} - -func TestValidateFails(t *testing.T) { - data := map[string]interface{}{"age": 12} - schema := CreateSchema(validSchema, "myschema") - err := Validate(data, schema) - assert.NotNil(t, err) - assert.True(t, strings.Contains(err.Error(), "missing properties: \"name\"")) -} - -func TestValidateOK(t *testing.T) { - data := map[string]interface{}{"name": "john"} - schema := CreateSchema(validSchema, "myschema") - err := Validate(data, schema) - assert.Nil(t, err) -} - -var invalidSchema = `{ - "id": "person", - "type": "object", - "properties": { - "name":{ - "type": "unknown" - } - } -}` - -var validSchema = `{ - "id": "person", - "type": "object", - "properties": { - "name":{ - "type": "string" - }, - "age":{ - "description": "some age", - "type": "number" - } - }, - "required": ["name"] -}` diff --git a/x-pack/apm-server/aggregation/spanmetrics/aggregator.go b/x-pack/apm-server/aggregation/spanmetrics/aggregator.go index ae9f5072ec6..5318edd0274 100644 --- a/x-pack/apm-server/aggregation/spanmetrics/aggregator.go +++ b/x-pack/apm-server/aggregation/spanmetrics/aggregator.go @@ -14,15 +14,18 @@ import ( logs "github.com/elastic/apm-server/log" "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/publish" - "github.com/elastic/apm-server/transform" "github.com/elastic/beats/v7/libbeat/logp" ) +const ( + metricsetName = "service_destination" +) + // AggregatorConfig holds configuration for creating an Aggregator. type AggregatorConfig struct { - // Report is a publish.Reporter for reporting metrics documents. - Report publish.Reporter + // BatchProcessor is a model.BatchProcessor for asynchronously + // processing metrics documents. + BatchProcessor model.BatchProcessor // MaxGroups is the maximum number of distinct service destination // group metrics to store within an aggregation period. Once this @@ -43,8 +46,8 @@ type AggregatorConfig struct { // Validate validates the aggregator config. func (config AggregatorConfig) Validate() error { - if config.Report == nil { - return errors.New("Report unspecified") + if config.BatchProcessor == nil { + return errors.New("BatchProcessor unspecified") } if config.MaxGroups <= 0 { return errors.New("MaxGroups unspecified or negative") @@ -155,65 +158,71 @@ func (a *Aggregator) publish(ctx context.Context) error { } now := time.Now() - metricsets := make([]transform.Transformable, 0, size) + batch := make(model.Batch, 0, size) for key, metrics := range a.inactive.m { metricset := makeMetricset(now, key, metrics, a.config.Interval.Milliseconds()) - metricsets = append(metricsets, &metricset) + batch = append(batch, metricset) delete(a.inactive.m, key) } - a.config.Logger.Debugf("publishing %d metricsets", len(metricsets)) - return a.config.Report(ctx, publish.PendingReq{ - Transformables: metricsets, - Trace: true, - }) + a.config.Logger.Debugf("publishing %d metricsets", len(batch)) + return a.config.BatchProcessor.ProcessBatch(ctx, &batch) } -// ProcessTransformables aggregates all transactions contained in -// "in", returning the input. +// ProcessBatch aggregates all spans contained in "b", adding to it any +// metricsets requiring immediate publication. // // This method is expected to be used immediately prior to publishing // the events. -func (a *Aggregator) ProcessTransformables(in []transform.Transformable) []transform.Transformable { +func (a *Aggregator) ProcessBatch(ctx context.Context, b *model.Batch) error { a.mu.RLock() defer a.mu.RUnlock() - out := in - for _, tf := range in { - if span, ok := tf.(*model.Span); ok { - if metricset := a.processSpan(span); metricset != nil { - out = append(out, metricset) - } + for _, event := range *b { + if event.Processor != model.SpanProcessor { + continue + } + if metricsetEvent := a.processSpan(&event); metricsetEvent.Metricset != nil { + *b = append(*b, metricsetEvent) } } - return out + return nil } -func (a *Aggregator) processSpan(span *model.Span) *model.Metricset { - if span.DestinationService == nil || span.DestinationService.Resource == nil { - return nil +func (a *Aggregator) processSpan(event *model.APMEvent) model.APMEvent { + if event.Span.DestinationService == nil || event.Span.DestinationService.Resource == "" { + return model.APMEvent{} } - if span.RepresentativeCount <= 0 { + if event.Span.RepresentativeCount <= 0 { // RepresentativeCount is zero when the sample rate is unknown. // We cannot calculate accurate span metrics without the sample // rate, so we don't calculate any at all in this case. - return nil + return model.APMEvent{} + } + + // For composite spans we use the composite sum duration, which is the sum of + // pre-aggregated spans and excludes time gaps that are counted in the reported + // span duration. For non-composite spans we just use the reported span duration. + count := 1 + duration := event.Event.Duration + if event.Span.Composite != nil { + count = event.Span.Composite.Count + duration = time.Duration(event.Span.Composite.Sum * float64(time.Millisecond)) } key := aggregationKey{ - serviceEnvironment: span.Metadata.Service.Environment, - serviceName: span.Metadata.Service.Name, - outcome: span.Outcome, - resource: *span.DestinationService.Resource, + serviceEnvironment: event.Service.Environment, + serviceName: event.Service.Name, + agentName: event.Agent.Name, + outcome: event.Event.Outcome, + resource: event.Span.DestinationService.Resource, } - duration := time.Duration(span.Duration * float64(time.Millisecond)) metrics := spanMetrics{ - count: span.RepresentativeCount, - sum: float64(duration.Microseconds()) * span.RepresentativeCount, + count: float64(count) * event.Span.RepresentativeCount, + sum: float64(duration) * event.Span.RepresentativeCount, } if a.active.storeOrUpdate(key, metrics) { - return nil + return model.APMEvent{} } - metricset := makeMetricset(time.Now(), key, metrics, 0) - return &metricset + return makeMetricset(time.Now(), key, metrics, 0) } type metricsBuffer struct { @@ -245,6 +254,7 @@ type aggregationKey struct { // origin serviceName string serviceEnvironment string + agentName string // destination resource string outcome string @@ -255,43 +265,29 @@ type spanMetrics struct { sum float64 } -func makeMetricset(timestamp time.Time, key aggregationKey, metrics spanMetrics, interval int64) model.Metricset { - out := model.Metricset{ +func makeMetricset(timestamp time.Time, key aggregationKey, metrics spanMetrics, interval int64) model.APMEvent { + return model.APMEvent{ Timestamp: timestamp, - Metadata: model.Metadata{ - Service: model.Service{ - Name: key.serviceName, - Environment: key.serviceEnvironment, - }, + Agent: model.Agent{Name: key.agentName}, + Service: model.Service{ + Name: key.serviceName, + Environment: key.serviceEnvironment, }, - Event: model.MetricsetEventCategorization{ + Event: model.Event{ Outcome: key.outcome, }, - Span: model.MetricsetSpan{ - // TODO add span type/subtype? - DestinationService: model.DestinationService{Resource: &key.resource}, + Processor: model.MetricsetProcessor, + Metricset: &model.Metricset{ + Name: metricsetName, }, - Samples: []model.Sample{ - { - Name: "span.destination.service.response_time.count", - Value: math.Round(metrics.count), - }, - { - Name: "span.destination.service.response_time.sum.us", - Value: math.Round(metrics.sum), + Span: &model.Span{ + DestinationService: &model.DestinationService{ + Resource: key.resource, + ResponseTime: model.AggregatedDuration{ + Count: int(math.Round(metrics.count)), + Sum: time.Duration(math.Round(metrics.sum)), + }, }, }, } - if interval > 0 { - // Only set metricset.period for a positive interval. - // - // An interval of zero means the metricset is computed - // from an instantaneous value, meaning there is no - // aggregation period. - out.Samples = append(out.Samples, model.Sample{ - Name: "metricset.period", - Value: float64(interval), - }) - } - return out } diff --git a/x-pack/apm-server/aggregation/spanmetrics/aggregator_test.go b/x-pack/apm-server/aggregation/spanmetrics/aggregator_test.go index 41cff80e76c..835e9369175 100644 --- a/x-pack/apm-server/aggregation/spanmetrics/aggregator_test.go +++ b/x-pack/apm-server/aggregation/spanmetrics/aggregator_test.go @@ -14,28 +14,26 @@ import ( "github.com/stretchr/testify/require" "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/publish" - "github.com/elastic/apm-server/transform" ) func BenchmarkAggregateSpan(b *testing.B) { agg, err := NewAggregator(AggregatorConfig{ - Report: makeErrReporter(nil), - Interval: time.Minute, - MaxGroups: 1000, + BatchProcessor: makeErrBatchProcessor(nil), + Interval: time.Minute, + MaxGroups: 1000, }) require.NoError(b, err) - span := makeSpan("test_service", "test_destination", "success", time.Second, 1) + span := makeSpan("test_service", "agent", "test_destination", "success", time.Second, 1) b.RunParallel(func(pb *testing.PB) { for pb.Next() { - agg.ProcessTransformables([]transform.Transformable{span}) + agg.ProcessBatch(context.Background(), &model.Batch{span}) } }) } func TestNewAggregatorConfigInvalid(t *testing.T) { - report := makeErrReporter(nil) + report := makeErrBatchProcessor(nil) type test struct { config AggregatorConfig @@ -44,16 +42,16 @@ func TestNewAggregatorConfigInvalid(t *testing.T) { for _, test := range []test{{ config: AggregatorConfig{}, - err: "Report unspecified", + err: "BatchProcessor unspecified", }, { config: AggregatorConfig{ - Report: report, + BatchProcessor: report, }, err: "MaxGroups unspecified or negative", }, { config: AggregatorConfig{ - Report: report, - MaxGroups: 1, + BatchProcessor: report, + MaxGroups: 1, }, err: "Interval unspecified or negative", }} { @@ -65,16 +63,17 @@ func TestNewAggregatorConfigInvalid(t *testing.T) { } func TestAggregatorRun(t *testing.T) { - reqs := make(chan publish.PendingReq, 1) + batches := make(chan model.Batch, 1) agg, err := NewAggregator(AggregatorConfig{ - Report: makeChanReporter(reqs), - Interval: 10 * time.Millisecond, - MaxGroups: 1000, + BatchProcessor: makeChanBatchProcessor(batches), + Interval: 10 * time.Millisecond, + MaxGroups: 1000, }) require.NoError(t, err) type input struct { serviceName string + agentName string destination string outcome string count float64 @@ -83,13 +82,13 @@ func TestAggregatorRun(t *testing.T) { destinationX := "destination-X" destinationZ := "destination-Z" inputs := []input{ - {serviceName: "service-A", destination: destinationZ, outcome: "success", count: 2}, - {serviceName: "service-A", destination: destinationX, outcome: "success", count: 1}, - {serviceName: "service-B", destination: destinationZ, outcome: "success", count: 1}, - {serviceName: "service-A", destination: destinationZ, outcome: "success", count: 1}, - {serviceName: "service-A", destination: destinationZ, outcome: "success", count: 0}, - {serviceName: "service-A", outcome: "success", count: 1}, // no destination - {serviceName: "service-A", destination: destinationZ, outcome: "failure", count: 1}, + {serviceName: "service-A", agentName: "java", destination: destinationZ, outcome: "success", count: 2}, + {serviceName: "service-A", agentName: "java", destination: destinationX, outcome: "success", count: 1}, + {serviceName: "service-B", agentName: "python", destination: destinationZ, outcome: "success", count: 1}, + {serviceName: "service-A", agentName: "java", destination: destinationZ, outcome: "success", count: 1}, + {serviceName: "service-A", agentName: "java", destination: destinationZ, outcome: "success", count: 0}, + {serviceName: "service-A", agentName: "java", outcome: "success", count: 1}, // no destination + {serviceName: "service-A", agentName: "java", destination: destinationZ, outcome: "failure", count: 1}, } var wg sync.WaitGroup @@ -97,9 +96,12 @@ func TestAggregatorRun(t *testing.T) { wg.Add(1) go func(in input) { defer wg.Done() - span := makeSpan(in.serviceName, in.destination, in.outcome, 100*time.Millisecond, in.count) + span := makeSpan(in.serviceName, in.agentName, in.destination, in.outcome, 100*time.Millisecond, in.count) + batch := model.Batch{span} for i := 0; i < 100; i++ { - assert.Len(t, agg.ProcessTransformables([]transform.Transformable{span}), 1) + err := agg.ProcessBatch(context.Background(), &batch) + require.NoError(t, err) + assert.Equal(t, model.Batch{span}, batch) } }(in) } @@ -109,183 +111,228 @@ func TestAggregatorRun(t *testing.T) { go agg.Run() defer agg.Stop(context.Background()) - req := expectPublish(t, reqs) - metricsets := make([]*model.Metricset, len(req.Transformables)) - for i, tf := range req.Transformables { - ms := tf.(*model.Metricset) - require.NotZero(t, ms.Timestamp) - ms.Timestamp = time.Time{} - metricsets[i] = ms - } + batch := expectBatch(t, batches) + metricsets := batchMetricsets(t, batch) - assert.ElementsMatch(t, []*model.Metricset{{ - Metadata: model.Metadata{ - Service: model.Service{Name: "service-A"}, - }, - Event: model.MetricsetEventCategorization{ - Outcome: "success", - }, - Span: model.MetricsetSpan{ - DestinationService: model.DestinationService{Resource: &destinationX}, - }, - Samples: []model.Sample{ - {Name: "span.destination.service.response_time.count", Value: 100.0}, - {Name: "span.destination.service.response_time.sum.us", Value: 10000000.0}, - {Name: "metricset.period", Value: 10}, + assert.ElementsMatch(t, []model.APMEvent{{ + Agent: model.Agent{Name: "java"}, + Service: model.Service{Name: "service-A"}, + Event: model.Event{Outcome: "success"}, + Processor: model.MetricsetProcessor, + Metricset: &model.Metricset{Name: "service_destination"}, + Span: &model.Span{ + DestinationService: &model.DestinationService{ + Resource: destinationX, + ResponseTime: model.AggregatedDuration{ + Count: 100, + Sum: 10 * time.Second, + }, + }, }, }, { - Metadata: model.Metadata{ - Service: model.Service{Name: "service-A"}, - }, - Event: model.MetricsetEventCategorization{ - Outcome: "failure", - }, - Span: model.MetricsetSpan{ - DestinationService: model.DestinationService{Resource: &destinationZ}, - }, - Samples: []model.Sample{ - {Name: "span.destination.service.response_time.count", Value: 100.0}, - {Name: "span.destination.service.response_time.sum.us", Value: 10000000.0}, - {Name: "metricset.period", Value: 10}, + Agent: model.Agent{Name: "java"}, + Service: model.Service{Name: "service-A"}, + Event: model.Event{Outcome: "failure"}, + Processor: model.MetricsetProcessor, + Metricset: &model.Metricset{Name: "service_destination"}, + Span: &model.Span{ + DestinationService: &model.DestinationService{ + Resource: destinationZ, + ResponseTime: model.AggregatedDuration{ + Count: 100, + Sum: 10 * time.Second, + }, + }, }, }, { - Metadata: model.Metadata{ - Service: model.Service{Name: "service-A"}, - }, - Event: model.MetricsetEventCategorization{ - Outcome: "success", - }, - Span: model.MetricsetSpan{ - DestinationService: model.DestinationService{Resource: &destinationZ}, - }, - Samples: []model.Sample{ - {Name: "span.destination.service.response_time.count", Value: 300.0}, - {Name: "span.destination.service.response_time.sum.us", Value: 30000000.0}, - {Name: "metricset.period", Value: 10}, + Agent: model.Agent{Name: "java"}, + Service: model.Service{Name: "service-A"}, + Event: model.Event{Outcome: "success"}, + Processor: model.MetricsetProcessor, + Metricset: &model.Metricset{Name: "service_destination"}, + Span: &model.Span{ + DestinationService: &model.DestinationService{ + Resource: destinationZ, + ResponseTime: model.AggregatedDuration{ + Count: 300, + Sum: 30 * time.Second, + }, + }, }, }, { - Metadata: model.Metadata{ - Service: model.Service{Name: "service-B"}, - }, - Event: model.MetricsetEventCategorization{ - Outcome: "success", - }, - Span: model.MetricsetSpan{ - DestinationService: model.DestinationService{Resource: &destinationZ}, - }, - Samples: []model.Sample{ - {Name: "span.destination.service.response_time.count", Value: 100.0}, - {Name: "span.destination.service.response_time.sum.us", Value: 10000000.0}, - {Name: "metricset.period", Value: 10}, + Agent: model.Agent{Name: "python"}, + Service: model.Service{Name: "service-B"}, + Event: model.Event{Outcome: "success"}, + Processor: model.MetricsetProcessor, + Metricset: &model.Metricset{Name: "service_destination"}, + Span: &model.Span{ + DestinationService: &model.DestinationService{ + Resource: destinationZ, + ResponseTime: model.AggregatedDuration{ + Count: 100, + Sum: 10 * time.Second, + }, + }, }, }}, metricsets) select { - case <-reqs: + case <-batches: t.Fatal("unexpected publish") case <-time.After(100 * time.Millisecond): } } +func TestAggregateCompositeSpan(t *testing.T) { + batches := make(chan model.Batch, 1) + agg, err := NewAggregator(AggregatorConfig{ + BatchProcessor: makeChanBatchProcessor(batches), + Interval: 10 * time.Millisecond, + MaxGroups: 1000, + }) + require.NoError(t, err) + + span := makeSpan("service-A", "java", "final_destination", "success", time.Second, 2) + span.Span.Composite = &model.Composite{Count: 25, Sum: 700 /* milliseconds */} + err = agg.ProcessBatch(context.Background(), &model.Batch{span}) + require.NoError(t, err) + + // Start the aggregator after processing to ensure metrics are aggregated deterministically. + go agg.Run() + defer agg.Stop(context.Background()) + + batch := expectBatch(t, batches) + metricsets := batchMetricsets(t, batch) + + assert.Equal(t, []model.APMEvent{{ + Agent: model.Agent{Name: "java"}, + Service: model.Service{Name: "service-A"}, + Event: model.Event{Outcome: "success"}, + Processor: model.MetricsetProcessor, + Metricset: &model.Metricset{Name: "service_destination"}, + Span: &model.Span{ + DestinationService: &model.DestinationService{ + Resource: "final_destination", + ResponseTime: model.AggregatedDuration{ + Count: 50, + Sum: 1400 * time.Millisecond, + }, + }, + }, + }}, metricsets) +} + func TestAggregatorOverflow(t *testing.T) { - reqs := make(chan publish.PendingReq, 1) + batches := make(chan model.Batch, 1) agg, err := NewAggregator(AggregatorConfig{ - Report: makeChanReporter(reqs), - Interval: 10 * time.Millisecond, - MaxGroups: 2, + BatchProcessor: makeChanBatchProcessor(batches), + Interval: 10 * time.Millisecond, + MaxGroups: 2, }) require.NoError(t, err) // The first two transaction groups will not require immediate publication, // as we have configured the spanmetrics with a maximum of two buckets. - var input []transform.Transformable - for i := 0; i < 10; i++ { - input = append(input, makeSpan("service", "destination1", "success", 100*time.Millisecond, 1)) - input = append(input, makeSpan("service", "destination2", "success", 100*time.Millisecond, 1)) + batch := make(model.Batch, 20) + for i := 0; i < len(batch); i += 2 { + batch[i] = makeSpan("service", "agent", "destination1", "success", 100*time.Millisecond, 1) + batch[i+1] = makeSpan("service", "agent", "destination2", "success", 100*time.Millisecond, 1) } - output := agg.ProcessTransformables(input) - assert.Equal(t, input, output) + err = agg.ProcessBatch(context.Background(), &batch) + require.NoError(t, err) + assert.Empty(t, batchMetricsets(t, batch)) // The third group will return a metricset for immediate publication. for i := 0; i < 2; i++ { - input = append(input, makeSpan("service", "destination3", "success", 100*time.Millisecond, 1)) + batch = append(batch, makeSpan("service", "agent", "destination3", "success", 100*time.Millisecond, 1)) } - output = agg.ProcessTransformables(input) - assert.Len(t, output, len(input)+2) - assert.Equal(t, input, output[:len(input)]) - - for _, tf := range output[len(input):] { - m, ok := tf.(*model.Metricset) - require.True(t, ok) - require.NotNil(t, m) - require.False(t, m.Timestamp.IsZero()) - - m.Timestamp = time.Time{} - assert.Equal(t, &model.Metricset{ - Metadata: model.Metadata{ - Service: model.Service{Name: "service"}, - }, - Event: model.MetricsetEventCategorization{ - Outcome: "success", - }, - Span: model.MetricsetSpan{ - DestinationService: model.DestinationService{Resource: newString("destination3")}, - }, - Samples: []model.Sample{ - {Name: "span.destination.service.response_time.count", Value: 1.0}, - {Name: "span.destination.service.response_time.sum.us", Value: 100000.0}, - // No metricset.period is recorded as these metrics are instantanous, not aggregated. + err = agg.ProcessBatch(context.Background(), &batch) + require.NoError(t, err) + + metricsets := batchMetricsets(t, batch) + assert.Len(t, metricsets, 2) + + for _, m := range metricsets { + assert.Equal(t, model.APMEvent{ + Agent: model.Agent{Name: "agent"}, + Service: model.Service{Name: "service"}, + Event: model.Event{Outcome: "success"}, + Processor: model.MetricsetProcessor, + Metricset: &model.Metricset{Name: "service_destination"}, + Span: &model.Span{ + DestinationService: &model.DestinationService{ + Resource: "destination3", + ResponseTime: model.AggregatedDuration{ + Count: 1, + Sum: 100 * time.Millisecond, + }, + }, }, }, m) } } func makeSpan( - serviceName string, destinationServiceResource, outcome string, + serviceName, agentName, destinationServiceResource, outcome string, duration time.Duration, count float64, -) *model.Span { - span := &model.Span{ - Metadata: model.Metadata{Service: model.Service{Name: serviceName}}, - Name: serviceName + ":" + destinationServiceResource, - Duration: duration.Seconds() * 1000, - RepresentativeCount: count, - Outcome: outcome, +) model.APMEvent { + event := model.APMEvent{ + Agent: model.Agent{Name: agentName}, + Service: model.Service{Name: serviceName}, + Event: model.Event{ + Outcome: outcome, + Duration: duration, + }, + Processor: model.SpanProcessor, + Span: &model.Span{ + Name: serviceName + ":" + destinationServiceResource, + RepresentativeCount: count, + }, } if destinationServiceResource != "" { - span.DestinationService = &model.DestinationService{ - Resource: &destinationServiceResource, + event.Span.DestinationService = &model.DestinationService{ + Resource: destinationServiceResource, } } - return span + return event } -func makeErrReporter(err error) publish.Reporter { - return func(context.Context, publish.PendingReq) error { return err } +func makeErrBatchProcessor(err error) model.BatchProcessor { + return model.ProcessBatchFunc(func(context.Context, *model.Batch) error { return err }) } -func makeChanReporter(ch chan<- publish.PendingReq) publish.Reporter { - return func(ctx context.Context, req publish.PendingReq) error { +func makeChanBatchProcessor(ch chan<- model.Batch) model.BatchProcessor { + return model.ProcessBatchFunc(func(ctx context.Context, batch *model.Batch) error { select { case <-ctx.Done(): return ctx.Err() - case ch <- req: + case ch <- *batch: return nil } - } + }) } -func expectPublish(t *testing.T, ch <-chan publish.PendingReq) publish.PendingReq { +func expectBatch(t *testing.T, ch <-chan model.Batch) model.Batch { t.Helper() select { - case req := <-ch: - return req + case batch := <-ch: + return batch case <-time.After(time.Second * 5): t.Fatal("expected publish") } panic("unreachable") } -func newString(s string) *string { - return &s +func batchMetricsets(t testing.TB, batch model.Batch) []model.APMEvent { + var metricsets []model.APMEvent + for _, event := range batch { + if event.Metricset == nil { + continue + } + require.NotZero(t, event.Timestamp) + event.Timestamp = time.Time{} + metricsets = append(metricsets, event) + } + return metricsets } diff --git a/x-pack/apm-server/aggregation/txmetrics/aggregator.go b/x-pack/apm-server/aggregation/txmetrics/aggregator.go index 383d938a552..d88594dcf07 100644 --- a/x-pack/apm-server/aggregation/txmetrics/aggregator.go +++ b/x-pack/apm-server/aggregation/txmetrics/aggregator.go @@ -10,18 +10,18 @@ import ( "math" "strings" "sync" + "sync/atomic" "time" "github.com/cespare/xxhash/v2" "github.com/pkg/errors" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/monitoring" "github.com/elastic/go-hdrhistogram" logs "github.com/elastic/apm-server/log" "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/publish" - "github.com/elastic/apm-server/transform" ) const ( @@ -36,6 +36,12 @@ const ( // we will record a count of 5000 (2 * 2.5 * histogramCountScale). When we // publish metrics, we will scale down to 5 (5000 / histogramCountScale). histogramCountScale = 1000 + + // tooManyGroupsLoggerRateLimit is the maximum frequency at which + // "too many groups" log messages are logged. + tooManyGroupsLoggerRateLimit = time.Minute + + metricsetName = "transaction" ) // Aggregator aggregates transaction durations, periodically publishing histogram metrics. @@ -44,17 +50,22 @@ type Aggregator struct { stopping chan struct{} stopped chan struct{} - config AggregatorConfig - userAgentLookup *userAgentLookup + config AggregatorConfig + metrics *aggregatorMetrics // heap-allocated for 64-bit alignment + tooManyGroupsLogger *logp.Logger mu sync.RWMutex active, inactive *metrics } +type aggregatorMetrics struct { + overflowed int64 +} + // AggregatorConfig holds configuration for creating an Aggregator. type AggregatorConfig struct { - // Report is a publish.Reporter for reporting metrics documents. - Report publish.Reporter + // BatchProcessor is a model.BatchProcessor for asynchronously processing metrics documents. + BatchProcessor model.BatchProcessor // Logger is the logger for logging histogram aggregation/publishing. // @@ -76,16 +87,12 @@ type AggregatorConfig struct { // to maintain in the HDR Histograms. HDRHistogramSignificantFigures // must be in the range [1,5]. HDRHistogramSignificantFigures int - - // RUMUserAgentLRUSize is the size of the LRU cache for mapping RUM - // page-load User-Agent strings to browser names. - RUMUserAgentLRUSize int } // Validate validates the aggregator config. func (config AggregatorConfig) Validate() error { - if config.Report == nil { - return errors.New("Report unspecified") + if config.BatchProcessor == nil { + return errors.New("BatchProcessor unspecified") } if config.MaxTransactionGroups <= 0 { return errors.New("MaxTransactionGroups unspecified or negative") @@ -96,9 +103,6 @@ func (config AggregatorConfig) Validate() error { if n := config.HDRHistogramSignificantFigures; n < 1 || n > 5 { return errors.Errorf("HDRHistogramSignificantFigures (%d) outside range [1,5]", n) } - if config.RUMUserAgentLRUSize <= 0 { - return errors.New("RUMUserAgentLRUSize unspecified or negative") - } return nil } @@ -110,17 +114,14 @@ func NewAggregator(config AggregatorConfig) (*Aggregator, error) { if config.Logger == nil { config.Logger = logp.NewLogger(logs.TransactionMetrics) } - ual, err := newUserAgentLookup(config.RUMUserAgentLRUSize) - if err != nil { - return nil, err - } return &Aggregator{ - stopping: make(chan struct{}), - stopped: make(chan struct{}), - config: config, - userAgentLookup: ual, - active: newMetrics(config.MaxTransactionGroups), - inactive: newMetrics(config.MaxTransactionGroups), + stopping: make(chan struct{}), + stopped: make(chan struct{}), + config: config, + metrics: &aggregatorMetrics{}, + tooManyGroupsLogger: config.Logger.WithOptions(logs.WithRateLimit(tooManyGroupsLoggerRateLimit)), + active: newMetrics(config.MaxTransactionGroups), + inactive: newMetrics(config.MaxTransactionGroups), }, nil } @@ -179,6 +180,25 @@ func (a *Aggregator) Stop(ctx context.Context) error { return nil } +// CollectMonitoring may be called to collect monitoring metrics from the +// aggregation. It is intended to be used with libbeat/monitoring.NewFunc. +// +// The metrics should be added to the "apm-server.aggregation.txmetrics" registry. +func (a *Aggregator) CollectMonitoring(_ monitoring.Mode, V monitoring.Visitor) { + V.OnRegistryStart() + defer V.OnRegistryFinished() + + a.mu.RLock() + defer a.mu.RUnlock() + + m := a.active + m.mu.RLock() + defer m.mu.RUnlock() + + monitoring.ReportInt(V, "active_groups", int64(m.entries)) + monitoring.ReportInt(V, "overflowed", atomic.LoadInt64(&a.metrics.overflowed)) +} + func (a *Aggregator) publish(ctx context.Context) error { // We hold a.mu only long enough to swap the metrics. This will // be blocked by metrics updates, which is OK, as we prefer not @@ -197,65 +217,66 @@ func (a *Aggregator) publish(ctx context.Context) error { // the specific time period (date_range) on the metrics documents. now := time.Now() - metricsets := make([]transform.Transformable, 0, a.inactive.entries) + batch := make(model.Batch, 0, a.inactive.entries) for hash, entries := range a.inactive.m { for _, entry := range entries { - counts, values := entry.transactionMetrics.histogramBuckets() - metricset := makeMetricset(entry.transactionAggregationKey, hash, now, counts, values) - metricsets = append(metricsets, &metricset) + totalCount, counts, values := entry.transactionMetrics.histogramBuckets() + batch = append(batch, makeMetricset(entry.transactionAggregationKey, hash, now, totalCount, counts, values)) } delete(a.inactive.m, hash) } a.inactive.entries = 0 - a.config.Logger.Debugf("publishing %d metricsets", len(metricsets)) - return a.config.Report(ctx, publish.PendingReq{ - Transformables: metricsets, - Trace: true, - }) + a.config.Logger.Debugf("publishing %d metricsets", len(batch)) + return a.config.BatchProcessor.ProcessBatch(ctx, &batch) } -// ProcessTransformables aggregates all transactions contained in -// "in", returning the input with any metricsets requiring immediate -// publication appended. +// ProcessBatch aggregates all transactions contained in "b", adding to it any +// metricsets requiring immediate publication appended. // -// This method is expected to be used immediately prior to publishing -// the events, so that the metricsets requiring immediate publication -// can be included in the same batch. -func (a *Aggregator) ProcessTransformables(in []transform.Transformable) []transform.Transformable { - out := in - for _, tf := range in { - if tx, ok := tf.(*model.Transaction); ok { - if metricset := a.AggregateTransaction(tx); metricset != nil { - out = append(out, metricset) - } +// This method is expected to be used immediately prior to publishing the +// events, so that the metricsets requiring immediate publication can be +// included in the same batch. +func (a *Aggregator) ProcessBatch(ctx context.Context, b *model.Batch) error { + for _, event := range *b { + if event.Processor != model.TransactionProcessor { + continue + } + if metricsetEvent := a.AggregateTransaction(event); metricsetEvent.Metricset != nil { + *b = append(*b, metricsetEvent) } } - return out + return nil } // AggregateTransaction aggregates transaction metrics. // // If the transaction cannot be aggregated due to the maximum number -// of transaction groups being exceeded, then a *model.Metricset will +// of transaction groups being exceeded, then a metricset APMEvent will // be returned which should be published immediately, along with the -// transaction. Otherwise, the returned metricset will be nil. -func (a *Aggregator) AggregateTransaction(tx *model.Transaction) *model.Metricset { - key := a.makeTransactionAggregationKey(tx) +// transaction. Otherwise, the returned event will be the zero value. +func (a *Aggregator) AggregateTransaction(event model.APMEvent) model.APMEvent { + if event.Transaction.RepresentativeCount <= 0 { + return model.APMEvent{} + } + + key := a.makeTransactionAggregationKey(event) hash := key.hash() - count := transactionCount(tx) - duration := time.Duration(tx.Duration * float64(time.Millisecond)) - if a.updateTransactionMetrics(key, hash, count, duration) { - return nil + count := transactionCount(event.Transaction) + if a.updateTransactionMetrics(key, hash, event.Transaction.RepresentativeCount, event.Event.Duration) { + return model.APMEvent{} } // Too many aggregation keys: could not update metrics, so immediately // publish a single-value metric document. - // - // TODO(axw) log a warning with a rate-limit, increment a counter. + a.tooManyGroupsLogger.Warn(` +Transaction group limit reached, falling back to sending individual metric documents. +This is typically caused by ineffective transaction grouping, e.g. by creating many +unique transaction names.`[1:], + ) + atomic.AddInt64(&a.metrics.overflowed, 1) counts := []int64{int64(math.Round(count))} - values := []float64{float64(durationMicros(duration))} - metricset := makeMetricset(key, hash, time.Now(), counts, values) - return &metricset + values := []float64{float64(event.Event.Duration.Microseconds())} + return makeMetricset(key, hash, time.Now(), counts[0], counts, values) } func (a *Aggregator) updateTransactionMetrics(key transactionAggregationKey, hash uint64, count float64, duration time.Duration) bool { @@ -301,8 +322,8 @@ func (a *Aggregator) updateTransactionMetrics(key transactionAggregationKey, has entry.transactionAggregationKey = key if entry.transactionMetrics.histogram == nil { entry.transactionMetrics.histogram = hdrhistogram.New( - durationMicros(minDuration), - durationMicros(maxDuration), + minDuration.Microseconds(), + maxDuration.Microseconds(), a.config.HDRHistogramSignificantFigures, ) } else { @@ -315,85 +336,70 @@ func (a *Aggregator) updateTransactionMetrics(key transactionAggregationKey, has return true } -func (a *Aggregator) makeTransactionAggregationKey(tx *model.Transaction) transactionAggregationKey { - var userAgentName string - if tx.Type == "page-load" { - // The APM app in Kibana has a special case for "page-load" - // transaction types, rendering distributions by country and - // browser. We use the same logic to decide whether or not - // to include user_agent.name in the aggregation key. - userAgentName = a.userAgentLookup.getUserAgentName(tx.Metadata.UserAgent.Original) - } - +func (a *Aggregator) makeTransactionAggregationKey(event model.APMEvent) transactionAggregationKey { return transactionAggregationKey{ - traceRoot: tx.ParentID == "", - transactionName: tx.Name, - transactionOutcome: tx.Outcome, - transactionResult: tx.Result, - transactionType: tx.Type, - - agentName: tx.Metadata.Service.Agent.Name, - serviceEnvironment: tx.Metadata.Service.Environment, - serviceName: tx.Metadata.Service.Name, - serviceVersion: tx.Metadata.Service.Version, - - hostname: tx.Metadata.System.Hostname(), - containerID: tx.Metadata.System.Container.ID, - kubernetesPodName: tx.Metadata.System.Kubernetes.PodName, - - userAgentName: userAgentName, - - // TODO(axw) clientCountryISOCode, requires geoIP lookup in apm-server. + traceRoot: event.Parent.ID == "", + transactionName: event.Transaction.Name, + transactionResult: event.Transaction.Result, + transactionType: event.Transaction.Type, + eventOutcome: event.Event.Outcome, + + agentName: event.Agent.Name, + serviceEnvironment: event.Service.Environment, + serviceName: event.Service.Name, + serviceVersion: event.Service.Version, + + hostname: event.Host.Hostname, + containerID: event.Container.ID, + kubernetesPodName: event.Kubernetes.PodName, } } -// makeMetricset makes a Metricset from key, counts, and values, with timestamp ts. -func makeMetricset(key transactionAggregationKey, hash uint64, ts time.Time, counts []int64, values []float64) model.Metricset { - out := model.Metricset{ - Timestamp: ts, - Metadata: model.Metadata{ - Service: model.Service{ - Name: key.serviceName, - Version: key.serviceVersion, - Environment: key.serviceEnvironment, - Agent: model.Agent{Name: key.agentName}, - }, - System: model.System{ - DetectedHostname: key.hostname, - Container: model.Container{ID: key.containerID}, - Kubernetes: model.Kubernetes{PodName: key.kubernetesPodName}, - }, - UserAgent: model.UserAgent{ - Name: key.userAgentName, - }, - // TODO(axw) include client.geo.country_iso_code somewhere +// makeMetricset makes a metricset event from key, counts, and values, with timestamp ts. +func makeMetricset( + key transactionAggregationKey, hash uint64, ts time.Time, totalCount int64, counts []int64, values []float64, +) model.APMEvent { + // Record a timeseries instance ID, which should be uniquely identify the aggregation key. + var timeseriesInstanceID strings.Builder + timeseriesInstanceID.WriteString(key.serviceName) + timeseriesInstanceID.WriteRune(':') + timeseriesInstanceID.WriteString(key.transactionName) + timeseriesInstanceID.WriteRune(':') + timeseriesInstanceID.WriteString(fmt.Sprintf("%x", hash)) + + return model.APMEvent{ + Timestamp: ts, + Agent: model.Agent{Name: key.agentName}, + Container: model.Container{ID: key.containerID}, + Kubernetes: model.Kubernetes{PodName: key.kubernetesPodName}, + Service: model.Service{ + Name: key.serviceName, + Version: key.serviceVersion, + Environment: key.serviceEnvironment, }, - Event: model.MetricsetEventCategorization{ - Outcome: key.transactionOutcome, + Host: model.Host{ + Hostname: key.hostname, }, - Transaction: model.MetricsetTransaction{ + Event: model.Event{ + Outcome: key.eventOutcome, + }, + Processor: model.MetricsetProcessor, + Metricset: &model.Metricset{ + Name: metricsetName, + DocCount: totalCount, + TimeseriesInstanceID: timeseriesInstanceID.String(), + }, + Transaction: &model.Transaction{ Name: key.transactionName, Type: key.transactionType, Result: key.transactionResult, Root: key.traceRoot, + DurationHistogram: model.Histogram{ + Counts: counts, + Values: values, + }, }, - Samples: []model.Sample{{ - Name: "transaction.duration.histogram", - Counts: counts, - Values: values, - }}, } - - // Record an timeseries instance ID, which should be uniquely identify the aggregation key. - var timeseriesInstanceID strings.Builder - timeseriesInstanceID.WriteString(key.serviceName) - timeseriesInstanceID.WriteRune(':') - timeseriesInstanceID.WriteString(key.transactionName) - timeseriesInstanceID.WriteRune(':') - timeseriesInstanceID.WriteString(fmt.Sprintf("%x", hash)) - out.TimeseriesInstanceID = timeseriesInstanceID.String() - - return out } type metrics struct { @@ -415,11 +421,10 @@ type metricsMapEntry struct { transactionMetrics } +// NOTE(axw) the dimensions should be kept in sync with docs/metricset-indices.asciidoc, type transactionAggregationKey struct { - traceRoot bool - agentName string - // TODO(axw) requires geoIP lookup in apm-server. - //clientCountryISOCode string + traceRoot bool + agentName string containerID string hostname string kubernetesPodName string @@ -427,10 +432,9 @@ type transactionAggregationKey struct { serviceName string serviceVersion string transactionName string - transactionOutcome string transactionResult string transactionType string - userAgentName string + eventOutcome string } func (k *transactionAggregationKey) hash() uint64 { @@ -439,7 +443,6 @@ func (k *transactionAggregationKey) hash() uint64 { h.WriteString("1") } h.WriteString(k.agentName) - // TODO(axw) clientCountryISOCode h.WriteString(k.containerID) h.WriteString(k.hostname) h.WriteString(k.kubernetesPodName) @@ -447,10 +450,9 @@ func (k *transactionAggregationKey) hash() uint64 { h.WriteString(k.serviceName) h.WriteString(k.serviceVersion) h.WriteString(k.transactionName) - h.WriteString(k.transactionOutcome) h.WriteString(k.transactionResult) h.WriteString(k.transactionType) - h.WriteString(k.userAgentName) + h.WriteString(k.eventOutcome) return h.Sum64() } @@ -460,10 +462,10 @@ type transactionMetrics struct { func (m *transactionMetrics) recordDuration(d time.Duration, n float64) { count := int64(math.Round(n * histogramCountScale)) - m.histogram.RecordValuesAtomic(durationMicros(d), count) + m.histogram.RecordValuesAtomic(d.Microseconds(), count) } -func (m *transactionMetrics) histogramBuckets() (counts []int64, values []float64) { +func (m *transactionMetrics) histogramBuckets() (totalCount int64, counts []int64, values []float64) { // From https://www.elastic.co/guide/en/elasticsearch/reference/current/histogram.html: // // "For the High Dynamic Range (HDR) histogram mode, the values array represents @@ -476,11 +478,12 @@ func (m *transactionMetrics) histogramBuckets() (counts []int64, values []float6 if b.Count <= 0 { continue } - count := math.Round(float64(b.Count) / histogramCountScale) - counts = append(counts, int64(count)) + count := int64(math.Round(float64(b.Count) / histogramCountScale)) + counts = append(counts, count) values = append(values, float64(b.To)) + totalCount += count } - return counts, values + return totalCount, counts, values } func transactionCount(tx *model.Transaction) float64 { @@ -489,7 +492,3 @@ func transactionCount(tx *model.Transaction) float64 { } return 1 } - -func durationMicros(d time.Duration) int64 { - return int64(d / time.Microsecond) -} diff --git a/x-pack/apm-server/aggregation/txmetrics/aggregator_test.go b/x-pack/apm-server/aggregation/txmetrics/aggregator_test.go index 9ac726247f1..a07d178142b 100644 --- a/x-pack/apm-server/aggregation/txmetrics/aggregator_test.go +++ b/x-pack/apm-server/aggregation/txmetrics/aggregator_test.go @@ -19,14 +19,13 @@ import ( "go.uber.org/zap/zaptest/observer" "github.com/elastic/apm-server/model" - "github.com/elastic/apm-server/publish" - "github.com/elastic/apm-server/transform" "github.com/elastic/apm-server/x-pack/apm-server/aggregation/txmetrics" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/monitoring" ) func TestNewAggregatorConfigInvalid(t *testing.T) { - report := makeErrReporter(nil) + batchProcessor := makeErrBatchProcessor(nil) type test struct { config txmetrics.AggregatorConfig @@ -35,34 +34,26 @@ func TestNewAggregatorConfigInvalid(t *testing.T) { for _, test := range []test{{ config: txmetrics.AggregatorConfig{}, - err: "Report unspecified", + err: "BatchProcessor unspecified", }, { config: txmetrics.AggregatorConfig{ - Report: report, + BatchProcessor: batchProcessor, }, err: "MaxTransactionGroups unspecified or negative", }, { config: txmetrics.AggregatorConfig{ - Report: report, + BatchProcessor: batchProcessor, MaxTransactionGroups: 1, }, err: "MetricsInterval unspecified or negative", }, { config: txmetrics.AggregatorConfig{ - Report: report, + BatchProcessor: batchProcessor, MaxTransactionGroups: 1, MetricsInterval: time.Nanosecond, HDRHistogramSignificantFigures: 6, }, err: "HDRHistogramSignificantFigures (6) outside range [1,5]", - }, { - config: txmetrics.AggregatorConfig{ - Report: report, - MaxTransactionGroups: 1, - MetricsInterval: time.Nanosecond, - HDRHistogramSignificantFigures: 5, - }, - err: "RUMUserAgentLRUSize unspecified or negative", }} { agg, err := txmetrics.NewAggregator(test.config) require.Error(t, err) @@ -72,115 +63,152 @@ func TestNewAggregatorConfigInvalid(t *testing.T) { } func TestProcessTransformablesOverflow(t *testing.T) { - reqs := make(chan publish.PendingReq, 1) + batches := make(chan model.Batch, 1) + + core, observed := observer.New(zapcore.DebugLevel) + logger := logp.NewLogger("foo", zap.WrapCore(func(in zapcore.Core) zapcore.Core { + return zapcore.NewTee(in, core) + })) agg, err := txmetrics.NewAggregator(txmetrics.AggregatorConfig{ - Report: makeChanReporter(reqs), + BatchProcessor: makeChanBatchProcessor(batches), MaxTransactionGroups: 2, MetricsInterval: time.Microsecond, HDRHistogramSignificantFigures: 1, - RUMUserAgentLRUSize: 1, + Logger: logger, }) require.NoError(t, err) // The first two transaction groups will not require immediate publication, // as we have configured the txmetrics with a maximum of two buckets. - var input []transform.Transformable - for i := 0; i < 10; i++ { - input = append(input, &model.Transaction{Name: "foo"}) - input = append(input, &model.Transaction{Name: "bar"}) + batch := make(model.Batch, 20) + for i := 0; i < len(batch); i += 2 { + batch[i] = model.APMEvent{ + Processor: model.TransactionProcessor, + Transaction: &model.Transaction{Name: "foo", RepresentativeCount: 1}, + } + batch[i+1] = model.APMEvent{ + Processor: model.TransactionProcessor, + Transaction: &model.Transaction{Name: "bar", RepresentativeCount: 1}, + } } - output := agg.ProcessTransformables(input) - assert.Equal(t, input, output) + err = agg.ProcessBatch(context.Background(), &batch) + require.NoError(t, err) + assert.Empty(t, batchMetricsets(t, batch)) // The third transaction group will return a metricset for immediate publication. for i := 0; i < 2; i++ { - input = append(input, &model.Transaction{ - Name: "baz", - Duration: float64(time.Minute / time.Millisecond), + batch = append(batch, model.APMEvent{ + Processor: model.TransactionProcessor, + Event: model.Event{Duration: time.Minute}, + Transaction: &model.Transaction{ + Name: "baz", + RepresentativeCount: 1, + }, }) } - output = agg.ProcessTransformables(input) - assert.Len(t, output, len(input)+2) - assert.Equal(t, input, output[:len(input)]) - - for _, tf := range output[len(input):] { - m, ok := tf.(*model.Metricset) - require.True(t, ok) - require.NotNil(t, m) - require.False(t, m.Timestamp.IsZero()) - - m.Timestamp = time.Time{} - assert.Equal(t, &model.Metricset{ - Metadata: model.Metadata{}, - Transaction: model.MetricsetTransaction{ + err = agg.ProcessBatch(context.Background(), &batch) + require.NoError(t, err) + metricsets := batchMetricsets(t, batch) + assert.Len(t, metricsets, 2) + + for _, m := range metricsets { + assert.Equal(t, model.APMEvent{ + Processor: model.MetricsetProcessor, + Metricset: &model.Metricset{ + Name: "transaction", + TimeseriesInstanceID: ":baz:bc30224a3738a508", + DocCount: 1, + }, + Transaction: &model.Transaction{ Name: "baz", Root: true, + DurationHistogram: model.Histogram{ + Counts: []int64{1}, + Values: []float64{float64(time.Minute / time.Microsecond)}, + }, }, - Samples: []model.Sample{{ - Name: "transaction.duration.histogram", - Counts: []int64{1}, - Values: []float64{float64(time.Minute / time.Microsecond)}, - }}, - TimeseriesInstanceID: ":baz:bc30224a3738a508", }, m) } + + expectedMonitoring := monitoring.MakeFlatSnapshot() + expectedMonitoring.Ints["txmetrics.active_groups"] = 2 + expectedMonitoring.Ints["txmetrics.overflowed"] = 2 // third group is processed twice + + registry := monitoring.NewRegistry() + monitoring.NewFunc(registry, "txmetrics", agg.CollectMonitoring) + assert.Equal(t, expectedMonitoring, monitoring.CollectFlatSnapshot( + registry, + monitoring.Full, + false, // expvar + )) + + overflowLogEntries := observed.FilterMessageSnippet("Transaction group limit reached") + assert.Equal(t, 1, overflowLogEntries.Len()) // rate limited } func TestAggregatorRun(t *testing.T) { - reqs := make(chan publish.PendingReq, 1) + batches := make(chan model.Batch, 1) agg, err := txmetrics.NewAggregator(txmetrics.AggregatorConfig{ - Report: makeChanReporter(reqs), + BatchProcessor: makeChanBatchProcessor(batches), MaxTransactionGroups: 2, MetricsInterval: 10 * time.Millisecond, HDRHistogramSignificantFigures: 1, - RUMUserAgentLRUSize: 1, }) require.NoError(t, err) for i := 0; i < 1000; i++ { - metricset := agg.AggregateTransaction(&model.Transaction{Name: "T-1000"}) - require.Nil(t, metricset) + metricset := agg.AggregateTransaction(model.APMEvent{ + Processor: model.TransactionProcessor, + Transaction: &model.Transaction{ + Name: "T-1000", + RepresentativeCount: 1, + }, + }) + require.Zero(t, metricset) } for i := 0; i < 800; i++ { - metricset := agg.AggregateTransaction(&model.Transaction{Name: "T-800"}) - require.Nil(t, metricset) + metricset := agg.AggregateTransaction(model.APMEvent{ + Processor: model.TransactionProcessor, + Transaction: &model.Transaction{ + Name: "T-800", + RepresentativeCount: 1, + }, + }) + require.Zero(t, metricset) } go agg.Run() defer agg.Stop(context.Background()) - req := expectPublish(t, reqs) - require.Len(t, req.Transformables, 2) - metricsets := make([]*model.Metricset, len(req.Transformables)) - for i, tf := range req.Transformables { - metricsets[i] = tf.(*model.Metricset) - } + batch := expectBatch(t, batches) + metricsets := batchMetricsets(t, batch) + require.Len(t, metricsets, 2) sort.Slice(metricsets, func(i, j int) bool { return metricsets[i].Transaction.Name < metricsets[j].Transaction.Name }) assert.Equal(t, "T-1000", metricsets[0].Transaction.Name) - assert.Equal(t, []int64{1000}, metricsets[0].Samples[0].Counts) + assert.Equal(t, []int64{1000}, metricsets[0].Transaction.DurationHistogram.Counts) assert.Equal(t, "T-800", metricsets[1].Transaction.Name) - assert.Equal(t, []int64{800}, metricsets[1].Samples[0].Counts) + assert.Equal(t, []int64{800}, metricsets[1].Transaction.DurationHistogram.Counts) select { - case <-reqs: + case <-batches: t.Fatal("unexpected publish") case <-time.After(100 * time.Millisecond): } } func TestAggregatorRunPublishErrors(t *testing.T) { - reqs := make(chan publish.PendingReq, 1) - chanReporter := makeChanReporter(reqs) - reportErr := errors.New("report failed") - report := func(ctx context.Context, req publish.PendingReq) error { - if err := chanReporter(ctx, req); err != nil { + batches := make(chan model.Batch, 1) + chanBatchProcessor := makeChanBatchProcessor(batches) + processBatchErr := errors.New("report failed") + var batchProcessor model.ProcessBatchFunc = func(ctx context.Context, batch *model.Batch) error { + if err := chanBatchProcessor(ctx, batch); err != nil { return err } - return reportErr + return processBatchErr } core, observed := observer.New(zapcore.DebugLevel) @@ -189,11 +217,10 @@ func TestAggregatorRunPublishErrors(t *testing.T) { })) agg, err := txmetrics.NewAggregator(txmetrics.AggregatorConfig{ - Report: report, + BatchProcessor: batchProcessor, MaxTransactionGroups: 2, MetricsInterval: 10 * time.Millisecond, HDRHistogramSignificantFigures: 1, - RUMUserAgentLRUSize: 1, Logger: logger, }) require.NoError(t, err) @@ -202,9 +229,15 @@ func TestAggregatorRunPublishErrors(t *testing.T) { defer agg.Stop(context.Background()) for i := 0; i < 2; i++ { - metricset := agg.AggregateTransaction(&model.Transaction{Name: "T-1000"}) - require.Nil(t, metricset) - expectPublish(t, reqs) + metricset := agg.AggregateTransaction(model.APMEvent{ + Processor: model.TransactionProcessor, + Transaction: &model.Transaction{ + Name: "T-1000", + RepresentativeCount: 1, + }, + }) + require.Zero(t, metricset) + expectBatch(t, batches) } // Wait for aggregator to stop before checking logs, to ensure we don't race with logging. @@ -215,35 +248,48 @@ func TestAggregatorRunPublishErrors(t *testing.T) { for _, record := range logs { require.Len(t, record.Context, 1) assert.Equal(t, "error", record.Context[0].Key) - assert.Equal(t, reportErr, record.Context[0].Interface) + assert.Equal(t, processBatchErr, record.Context[0].Interface) } } func TestAggregateRepresentativeCount(t *testing.T) { - reqs := make(chan publish.PendingReq, 1) - + batches := make(chan model.Batch, 1) agg, err := txmetrics.NewAggregator(txmetrics.AggregatorConfig{ - Report: makeChanReporter(reqs), + BatchProcessor: makeChanBatchProcessor(batches), MaxTransactionGroups: 1, MetricsInterval: time.Microsecond, HDRHistogramSignificantFigures: 1, - RUMUserAgentLRUSize: 1, }) require.NoError(t, err) // Record a transaction group so subsequent calls yield immediate metricsets, // and to demonstrate that fractional transaction counts are accumulated. - agg.AggregateTransaction(&model.Transaction{Name: "fnord", RepresentativeCount: 1}) - agg.AggregateTransaction(&model.Transaction{Name: "fnord", RepresentativeCount: 1.5}) + agg.AggregateTransaction(model.APMEvent{ + Processor: model.TransactionProcessor, + Transaction: &model.Transaction{Name: "fnord", RepresentativeCount: 1}, + }) + agg.AggregateTransaction(model.APMEvent{ + Processor: model.TransactionProcessor, + Transaction: &model.Transaction{Name: "fnord", RepresentativeCount: 1.5}, + }) + + // For non-positive RepresentativeCounts, no metrics will be accumulated. + for _, representativeCount := range []float64{-1, 0} { + m := agg.AggregateTransaction(model.APMEvent{ + Processor: model.TransactionProcessor, + Transaction: &model.Transaction{ + Name: "foo", + RepresentativeCount: representativeCount, + }, + }) + assert.Zero(t, m) + } for _, test := range []struct { representativeCount float64 expectedCount int64 }{{ - representativeCount: 0, - expectedCount: 1, - }, { - representativeCount: -1, + representativeCount: 1, expectedCount: 1, }, { representativeCount: 2, @@ -252,25 +298,31 @@ func TestAggregateRepresentativeCount(t *testing.T) { representativeCount: 1.50, // round half away from zero expectedCount: 2, }} { - m := agg.AggregateTransaction(&model.Transaction{ - Name: "foo", - RepresentativeCount: test.representativeCount, + m := agg.AggregateTransaction(model.APMEvent{ + Processor: model.TransactionProcessor, + Transaction: &model.Transaction{ + Name: "foo", + RepresentativeCount: test.representativeCount, + }, }) - require.NotNil(t, m) + require.NotNil(t, m.Metricset) m.Timestamp = time.Time{} - assert.Equal(t, &model.Metricset{ - Metadata: model.Metadata{}, - TimeseriesInstanceID: ":foo:1db641f187113b17", - Transaction: model.MetricsetTransaction{ + assert.Equal(t, model.APMEvent{ + Processor: model.MetricsetProcessor, + Metricset: &model.Metricset{ + Name: "transaction", + TimeseriesInstanceID: ":foo:1db641f187113b17", + DocCount: test.expectedCount, + }, + Transaction: &model.Transaction{ Name: "foo", Root: true, + DurationHistogram: model.Histogram{ + Counts: []int64{test.expectedCount}, + Values: []float64{0}, + }, }, - Samples: []model.Sample{{ - Name: "transaction.duration.histogram", - Counts: []int64{test.expectedCount}, - Values: []float64{0}, - }}, }, m) } @@ -281,11 +333,13 @@ func TestAggregateRepresentativeCount(t *testing.T) { // group were accumulated with some degree of accuracy. i.e. we should // receive round(1+1.5)=3; the fractional values should not have been // truncated. - req := expectPublish(t, reqs) - require.Len(t, req.Transformables, 1) - metricset := req.Transformables[0].(*model.Metricset) - require.Len(t, metricset.Samples, 1) - assert.Equal(t, []int64{3 /*round(1+1.5)*/}, metricset.Samples[0].Counts) + batch := expectBatch(t, batches) + metricsets := batchMetricsets(t, batch) + require.Len(t, metricsets, 1) + require.Nil(t, metricsets[0].Metricset.Samples) + require.NotNil(t, metricsets[0].Transaction) + durationHistogram := metricsets[0].Transaction.DurationHistogram + assert.Equal(t, []int64{3 /*round(1+1.5)*/}, durationHistogram.Counts) } func TestHDRHistogramSignificantFigures(t *testing.T) { @@ -298,20 +352,15 @@ func TestHDRHistogramSignificantFigures(t *testing.T) { func testHDRHistogramSignificantFigures(t *testing.T, sigfigs int) { t.Run(fmt.Sprintf("%d_sigfigs", sigfigs), func(t *testing.T) { - reqs := make(chan publish.PendingReq, 1) + batches := make(chan model.Batch, 1) agg, err := txmetrics.NewAggregator(txmetrics.AggregatorConfig{ - Report: makeChanReporter(reqs), + BatchProcessor: makeChanBatchProcessor(batches), MaxTransactionGroups: 2, MetricsInterval: 10 * time.Millisecond, HDRHistogramSignificantFigures: sigfigs, - RUMUserAgentLRUSize: 1, }) require.NoError(t, err) - durationMillis := func(d time.Duration) float64 { - return float64(d) / float64(time.Millisecond) - } - // The following values will be recorded in either 1, 2, 3, 4, or 5 // buckets according to the configured number of significant figures. for _, duration := range []time.Duration{ @@ -321,180 +370,182 @@ func testHDRHistogramSignificantFigures(t *testing.T, sigfigs int) { 101110 * time.Microsecond, 101111 * time.Microsecond, } { - metricset := agg.AggregateTransaction(&model.Transaction{ - Name: "T-1000", - Duration: durationMillis(duration), + metricset := agg.AggregateTransaction(model.APMEvent{ + Processor: model.TransactionProcessor, + Event: model.Event{Duration: duration}, + Transaction: &model.Transaction{ + Name: "T-1000", + RepresentativeCount: 1, + }, }) - require.Nil(t, metricset) + require.Zero(t, metricset) } go agg.Run() defer agg.Stop(context.Background()) - req := expectPublish(t, reqs) - require.Len(t, req.Transformables, 1) + batch := expectBatch(t, batches) + metricsets := batchMetricsets(t, batch) + require.Len(t, metricsets, 1) - metricset := req.Transformables[0].(*model.Metricset) - require.Len(t, metricset.Samples, 1) - assert.Len(t, metricset.Samples[0].Counts, len(metricset.Samples[0].Values)) - assert.Len(t, metricset.Samples[0].Counts, sigfigs) + require.Nil(t, metricsets[0].Metricset.Samples) + require.NotNil(t, metricsets[0].Transaction) + durationHistogram := metricsets[0].Transaction.DurationHistogram + assert.Len(t, durationHistogram.Counts, len(durationHistogram.Values)) + assert.Len(t, durationHistogram.Counts, sigfigs) }) } func TestAggregationFields(t *testing.T) { - reqs := make(chan publish.PendingReq, 1) + batches := make(chan model.Batch, 1) agg, err := txmetrics.NewAggregator(txmetrics.AggregatorConfig{ - Report: makeChanReporter(reqs), + BatchProcessor: makeChanBatchProcessor(batches), MaxTransactionGroups: 1000, MetricsInterval: 100 * time.Millisecond, HDRHistogramSignificantFigures: 1, - RUMUserAgentLRUSize: 1, }) require.NoError(t, err) go agg.Run() defer agg.Stop(context.Background()) - input := model.Transaction{RepresentativeCount: 1} + input := model.APMEvent{ + Processor: model.TransactionProcessor, + Transaction: &model.Transaction{RepresentativeCount: 1}, + } inputFields := []*string{ - &input.Name, - &input.Outcome, - &input.Result, - &input.Type, - &input.Metadata.Service.Agent.Name, - &input.Metadata.Service.Environment, - &input.Metadata.Service.Name, - &input.Metadata.Service.Version, - &input.Metadata.System.Container.ID, - &input.Metadata.System.Kubernetes.PodName, + &input.Transaction.Name, + &input.Transaction.Result, + &input.Transaction.Type, + &input.Event.Outcome, + &input.Agent.Name, + &input.Service.Environment, + &input.Service.Name, + &input.Service.Version, + &input.Container.ID, + &input.Kubernetes.PodName, } - var expected []model.Metricset + var expected []model.APMEvent addExpectedCount := func(expectedCount int64) { - expected = append(expected, model.Metricset{ - Metadata: input.Metadata, - Event: model.MetricsetEventCategorization{ - Outcome: input.Outcome, - }, - Transaction: model.MetricsetTransaction{ - Name: input.Name, - Type: input.Type, - Result: input.Result, - Root: input.ParentID == "", - }, - Samples: []model.Sample{{ - Name: "transaction.duration.histogram", + expectedEvent := input + expectedEvent.Transaction = nil + expectedEvent.Event.Outcome = input.Event.Outcome + expectedEvent.Processor = model.MetricsetProcessor + expectedEvent.Metricset = &model.Metricset{ + Name: "transaction", + DocCount: expectedCount, + } + expectedEvent.Transaction = &model.Transaction{ + Name: input.Transaction.Name, + Type: input.Transaction.Type, + Result: input.Transaction.Result, + Root: input.Parent.ID == "", + DurationHistogram: model.Histogram{ Counts: []int64{expectedCount}, Values: []float64{0}, - }}, - }) + }, + } + expected = append(expected, expectedEvent) } for _, field := range inputFields { for _, value := range []string{"something", "anything"} { *field = value - assert.Nil(t, agg.AggregateTransaction(&input)) - assert.Nil(t, agg.AggregateTransaction(&input)) + assert.Zero(t, agg.AggregateTransaction(input)) + assert.Zero(t, agg.AggregateTransaction(input)) addExpectedCount(2) } } - // Hostname is complex: if any kubernetes fields are set, then - // it is taken from Kubernetes.Node.Name, and DetectedHostname - // is ignored. - input.Metadata.System.Kubernetes.PodName = "" - for _, value := range []string{"something", "anything"} { - input.Metadata.System.DetectedHostname = value - assert.Nil(t, agg.AggregateTransaction(&input)) - assert.Nil(t, agg.AggregateTransaction(&input)) - addExpectedCount(2) - } + if false { + // Hostname is complex: if any kubernetes fields are set, then + // it is taken from Kubernetes.Node.Name, and DetectedHostname + // is ignored. + input.Kubernetes.PodName = "" + for _, value := range []string{"something", "anything"} { + input.Host.Hostname = value + assert.Zero(t, agg.AggregateTransaction(input)) + assert.Zero(t, agg.AggregateTransaction(input)) + addExpectedCount(2) + } - // ParentID only impacts aggregation as far as grouping root and - // non-root traces. - for _, value := range []string{"something", "anything"} { - input.ParentID = value - assert.Nil(t, agg.AggregateTransaction(&input)) - assert.Nil(t, agg.AggregateTransaction(&input)) - } - addExpectedCount(4) - - var output []model.Metricset - req := expectPublish(t, reqs) - for _, tf := range req.Transformables { - ms := tf.(*model.Metricset) - ms.Timestamp = time.Time{} - ms.TimeseriesInstanceID = "" - output = append(output, *ms) + // Parent.ID only impacts aggregation as far as grouping root and + // non-root traces. + for _, value := range []string{"something", "anything"} { + input.Parent.ID = value + assert.Zero(t, agg.AggregateTransaction(input)) + assert.Zero(t, agg.AggregateTransaction(input)) + } + addExpectedCount(4) } - assert.ElementsMatch(t, expected, output) -} -func BenchmarkAggregateTransaction(b *testing.B) { - agg, err := txmetrics.NewAggregator(txmetrics.AggregatorConfig{ - Report: makeErrReporter(nil), - MaxTransactionGroups: 1000, - MetricsInterval: time.Minute, - HDRHistogramSignificantFigures: 2, - RUMUserAgentLRUSize: 1, - }) - require.NoError(b, err) - - tx := &model.Transaction{ - Name: "T-1000", - Duration: 1, + batch := expectBatch(t, batches) + metricsets := batchMetricsets(t, batch) + for _, event := range metricsets { + event.Metricset.TimeseriesInstanceID = "" } - - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - agg.AggregateTransaction(tx) - } - }) + assert.ElementsMatch(t, expected, metricsets) } -func BenchmarkAggregateTransactionUserAgent(b *testing.B) { +func BenchmarkAggregateTransaction(b *testing.B) { agg, err := txmetrics.NewAggregator(txmetrics.AggregatorConfig{ - Report: makeErrReporter(nil), + BatchProcessor: makeErrBatchProcessor(nil), MaxTransactionGroups: 1000, MetricsInterval: time.Minute, HDRHistogramSignificantFigures: 2, - RUMUserAgentLRUSize: 1, }) require.NoError(b, err) - tx := &model.Transaction{ - Name: "T-1000", - Duration: 1, + event := model.APMEvent{ + Processor: model.TransactionProcessor, + Event: model.Event{Duration: time.Millisecond}, + Transaction: &model.Transaction{ + Name: "T-1000", + RepresentativeCount: 1, + }, } - tx.Metadata.UserAgent.Original = "Mozilla/5.0 (X11; Linux x86_64; rv:2.0) Gecko/20110408 conkeror/0.9.3" b.RunParallel(func(pb *testing.PB) { for pb.Next() { - agg.AggregateTransaction(tx) + agg.AggregateTransaction(event) } }) } -func makeErrReporter(err error) publish.Reporter { - return func(context.Context, publish.PendingReq) error { return err } +func makeErrBatchProcessor(err error) model.ProcessBatchFunc { + return func(context.Context, *model.Batch) error { return err } } -func makeChanReporter(ch chan<- publish.PendingReq) publish.Reporter { - return func(ctx context.Context, req publish.PendingReq) error { +func makeChanBatchProcessor(ch chan<- model.Batch) model.ProcessBatchFunc { + return func(ctx context.Context, batch *model.Batch) error { select { case <-ctx.Done(): return ctx.Err() - case ch <- req: + case ch <- *batch: return nil } } } -func expectPublish(t *testing.T, ch <-chan publish.PendingReq) publish.PendingReq { +func expectBatch(t *testing.T, ch <-chan model.Batch) model.Batch { t.Helper() select { - case req := <-ch: - return req + case batch := <-ch: + return batch case <-time.After(time.Second): t.Fatal("expected publish") } panic("unreachable") } + +func batchMetricsets(t testing.TB, batch model.Batch) []model.APMEvent { + var metricsets []model.APMEvent + for _, event := range batch { + if event.Metricset == nil { + continue + } + require.NotZero(t, event.Timestamp) + event.Timestamp = time.Time{} + metricsets = append(metricsets, event) + } + return metricsets +} diff --git a/x-pack/apm-server/aggregation/txmetrics/useragent.go b/x-pack/apm-server/aggregation/txmetrics/useragent.go deleted file mode 100644 index 151563b6378..00000000000 --- a/x-pack/apm-server/aggregation/txmetrics/useragent.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package txmetrics - -import ( - lru "github.com/hashicorp/golang-lru" - "github.com/ua-parser/uap-go/uaparser" -) - -// userAgentLookup provides support for parsing User-Agent strings, to enable -// aggregating "page-load" transactions by browser name. -type userAgentLookup struct { - lru *lru.Cache - parser *uaparser.Parser -} - -func newUserAgentLookup(lruSize int) (*userAgentLookup, error) { - lru, err := lru.New(lruSize) - if err != nil { - return nil, err - } - return &userAgentLookup{ - lru: lru, - // We use a static list of patterns. - parser: uaparser.NewFromSaved(), - }, nil -} - -// getUserAgentName returns the ECS `user_agent.name` value for -// the given User-Agent string. User-Agent parsing (pattern matching) -// is expensive, so we use an LRU cache to avoid it. -func (ual *userAgentLookup) getUserAgentName(userAgent string) string { - lruValue, ok := ual.lru.Get(userAgent) - if ok { - return lruValue.(string) - } - var userAgentName string - if ua := ual.parser.ParseUserAgent(userAgent); ua != nil { - userAgentName = ua.Family - } - ual.lru.Add(userAgent, userAgentName) - return userAgentName -} diff --git a/x-pack/apm-server/cmd/root.go b/x-pack/apm-server/cmd/root.go index b79745d62c2..4561f667a92 100644 --- a/x-pack/apm-server/cmd/root.go +++ b/x-pack/apm-server/cmd/root.go @@ -7,7 +7,7 @@ package cmd import ( "github.com/elastic/beats/v7/libbeat/beat" libbeatcmd "github.com/elastic/beats/v7/libbeat/cmd" - xpackcmd "github.com/elastic/beats/v7/x-pack/libbeat/cmd" + _ "github.com/elastic/beats/v7/x-pack/libbeat/include" // Fleet, processors "github.com/elastic/apm-server/cmd" _ "github.com/elastic/apm-server/x-pack/apm-server/include" // include assets @@ -15,8 +15,9 @@ import ( // NewXPackRootCommand returns the Elastic licensed "apm-server" root command. func NewXPackRootCommand(newBeat beat.Creator) *libbeatcmd.BeatsRootCmd { - rootCmd := cmd.NewRootCommand(newBeat) - xpackcmd.AddXPack(rootCmd, rootCmd.Name()) + settings := cmd.DefaultSettings() + settings.ElasticLicensed = true + rootCmd := cmd.NewRootCommand(newBeat, settings) if enrollCmd, _, err := rootCmd.Find([]string{"enroll"}); err == nil { // error is ok => enroll has already been removed rootCmd.RemoveCommand(enrollCmd) diff --git a/x-pack/apm-server/fields/_meta/fields.yml b/x-pack/apm-server/fields/_meta/fields.yml index 7aa1b9a3c59..090895f8457 100644 --- a/x-pack/apm-server/fields/_meta/fields.yml +++ b/x-pack/apm-server/fields/_meta/fields.yml @@ -25,7 +25,9 @@ short_config: true fields: - name: metricset.period + unit: ms type: long + description: Current data collection period for this event in milliseconds. - name: span type: group dynamic: false @@ -36,5 +38,8 @@ fields: - name: response_time.count type: long + description: Number of aggregated outgoing requests. - name: response_time.sum.us + unit: micros type: long + description: Aggregated duration of outgoing requests, in microseconds. diff --git a/x-pack/apm-server/include/fields.go b/x-pack/apm-server/include/fields.go index 082b2382346..594e4af7d9a 100644 --- a/x-pack/apm-server/include/fields.go +++ b/x-pack/apm-server/include/fields.go @@ -17,7 +17,7 @@ func init() { } // AssetXPackFields returns asset data. -// This is the base64 encoded gzipped contents of x-pack/apm-server. +// This is the base64 encoded zlib format compressed contents of x-pack/apm-server. func AssetXPackFields() string { - return "eJyckkFunkAMhfecwso6cAAWlXqASJHafeQOD34rMDO1PU3/21cQSCClv9qyw2N/8zzv1fSMa0ucp9qVo3FwSbGe4CrB6p+Zw3NF5OIjWrr7/PhAX9/76OG1764i6mBBJc/llj5VRERz945KK/WeOHZnB7VlBOklUNaUoS6w+4Wk+F5EJQ40SkA0dNSDvSiMrIQLsZFfQBcxT4PyRL1g7MivGU1FZJek/hRS7GVoybWgotcWa5cLaoo8od2rWuq0IFoaNJW8Vrpr5ElCSz2PhrW4wdbfjdcV5R3sDHcUsh9+2+btZJs/OzkxYPseFTUPg2JgR7d7pdQffNjUWlNVu2RY5tuR+JL5b7IwczaviRVUFiOTzv68zO4qO2ZRs98wtyUpIztiuNI3+AsQSaK5lglx3sWgPyTA/snlVQO8yVBJ3cHqMcXh0D3L/r8wfMgCzCUuD9yssm/G4hT9e1reL1BYTtHw5DKhCalEP8Tgw363hq1MTbE/Tf8KAAD//0/aTyw=" + return "eJyck0Fv2zAMhe/+FUTPsX+ADwOKnTsU2O6FKj87RG1JI6l2+feDHLtxFqNbl1so6vGJ33NNLzi15NJUm7igzhvHUE8wYa/1r+T8S0VkbCNaurt/fKAflz56OPfdVUQd1AunUm7pS0VEVLo3qrSoHsiFbu+g1gTPPXtKEhPEGHqYlQQ/MwuHgUb2CIqOejjLAiXN/khOyY6gI6vFQdxEPWPsyE4JTUWkxyj25GPoeWjJJKOic4u284CagpvQbl3NdZolWhok5rRUulNwE/uWejcqluIqtvxd9bosbiO2J3dtZHv5/TXvJ+v9vZMdAOvvUVC7YRAMztBtthT7Kw6rW22qapMMTe7jSHxP7l+yUHRW1uQElGeQUQqft0JXnKGYKryhpnNSRmcI/kTPsDcgEAc1yRNCeYtCXtlDP0V58QBrEoRjtywrB7aWJr0iP8YwVDsb/ppFEIw6Z458HEecd3hWnF9lR1bCa+niQBOPIyt8DN3s9mKn7OX/0vZH2KDGYSbYLHv5MHe70rdxvAwQaIpB8WQ8ofExB7vK2c3Gdvb2LU/PkAJ5k8iYbYhzABbwzV+na56arFdzFn7sJeqnfd1f3KyfQTF54+xwZllmrCx/BwAA//9RTKg4" } diff --git a/x-pack/apm-server/main.go b/x-pack/apm-server/main.go index 986f0912c86..c9b1572fa1f 100644 --- a/x-pack/apm-server/main.go +++ b/x-pack/apm-server/main.go @@ -12,13 +12,23 @@ import ( "golang.org/x/sync/errgroup" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/monitoring" + "github.com/elastic/beats/v7/libbeat/paths" "github.com/elastic/apm-server/beater" - "github.com/elastic/apm-server/publish" - "github.com/elastic/apm-server/transform" + "github.com/elastic/apm-server/model" "github.com/elastic/apm-server/x-pack/apm-server/aggregation/spanmetrics" "github.com/elastic/apm-server/x-pack/apm-server/aggregation/txmetrics" "github.com/elastic/apm-server/x-pack/apm-server/cmd" + "github.com/elastic/apm-server/x-pack/apm-server/sampling" +) + +var ( + aggregationMonitoringRegistry = monitoring.Default.NewRegistry("apm-server.aggregation") + + // Note: this registry is created in github.com/elastic/apm-server/sampling. That package + // will hopefully disappear in the future, when agents no longer send unsampled transactions. + samplingMonitoringRegistry = monitoring.Default.GetRegistry("apm-server.sampling") ) type namedProcessor struct { @@ -27,7 +37,7 @@ type namedProcessor struct { } type processor interface { - ProcessTransformables([]transform.Transformable) []transform.Transformable + model.BatchProcessor Run() error Stop(context.Context) error } @@ -40,33 +50,92 @@ func newProcessors(args beater.ServerParams) ([]namedProcessor, error) { const name = "transaction metrics aggregation" args.Logger.Infof("creating %s with config: %+v", name, args.Config.Aggregation.Transactions) agg, err := txmetrics.NewAggregator(txmetrics.AggregatorConfig{ - Report: args.Reporter, + BatchProcessor: args.BatchProcessor, MaxTransactionGroups: args.Config.Aggregation.Transactions.MaxTransactionGroups, MetricsInterval: args.Config.Aggregation.Transactions.Interval, HDRHistogramSignificantFigures: args.Config.Aggregation.Transactions.HDRHistogramSignificantFigures, - RUMUserAgentLRUSize: args.Config.Aggregation.Transactions.RUMUserAgentLRUSize, }) if err != nil { return nil, errors.Wrapf(err, "error creating %s", name) } processors = append(processors, namedProcessor{name: name, processor: agg}) + aggregationMonitoringRegistry.Remove("txmetrics") + monitoring.NewFunc(aggregationMonitoringRegistry, "txmetrics", agg.CollectMonitoring, monitoring.Report) } if args.Config.Aggregation.ServiceDestinations.Enabled { const name = "service destinations aggregation" args.Logger.Infof("creating %s with config: %+v", name, args.Config.Aggregation.ServiceDestinations) spanAggregator, err := spanmetrics.NewAggregator(spanmetrics.AggregatorConfig{ - Report: args.Reporter, - Interval: args.Config.Aggregation.ServiceDestinations.Interval, - MaxGroups: args.Config.Aggregation.ServiceDestinations.MaxGroups, + BatchProcessor: args.BatchProcessor, + Interval: args.Config.Aggregation.ServiceDestinations.Interval, + MaxGroups: args.Config.Aggregation.ServiceDestinations.MaxGroups, }) if err != nil { return nil, errors.Wrapf(err, "error creating %s", name) } processors = append(processors, namedProcessor{name: name, processor: spanAggregator}) } + if args.Config.Sampling.Tail.Enabled { + const name = "tail sampler" + sampler, err := newTailSamplingProcessor(args) + if err != nil { + return nil, errors.Wrapf(err, "error creating %s", name) + } + samplingMonitoringRegistry.Remove("tail") + monitoring.NewFunc(samplingMonitoringRegistry, "tail", sampler.CollectMonitoring, monitoring.Report) + processors = append(processors, namedProcessor{name: name, processor: sampler}) + } return processors, nil } +func newTailSamplingProcessor(args beater.ServerParams) (*sampling.Processor, error) { + if !args.Config.DataStreams.Enabled { + return nil, errors.New("tail-based sampling requires data streams") + } + + tailSamplingConfig := args.Config.Sampling.Tail + es, err := args.NewElasticsearchClient(tailSamplingConfig.ESConfig) + if err != nil { + return nil, errors.Wrap(err, "failed to create Elasticsearch client for tail-sampling") + } + + policies := make([]sampling.Policy, len(tailSamplingConfig.Policies)) + for i, in := range tailSamplingConfig.Policies { + policies[i] = sampling.Policy{ + PolicyCriteria: sampling.PolicyCriteria{ + ServiceName: in.Service.Name, + ServiceEnvironment: in.Service.Environment, + TraceName: in.Trace.Name, + TraceOutcome: in.Trace.Outcome, + }, + SampleRate: in.SampleRate, + } + } + return sampling.NewProcessor(sampling.Config{ + BeatID: args.Info.ID.String(), + BatchProcessor: args.BatchProcessor, + LocalSamplingConfig: sampling.LocalSamplingConfig{ + FlushInterval: tailSamplingConfig.Interval, + MaxDynamicServices: 1000, + Policies: policies, + IngestRateDecayFactor: tailSamplingConfig.IngestRateDecayFactor, + }, + RemoteSamplingConfig: sampling.RemoteSamplingConfig{ + Elasticsearch: es, + SampledTracesDataStream: sampling.DataStreamConfig{ + Type: "traces", + Dataset: "apm.sampled", + Namespace: args.Namespace, + }, + }, + StorageConfig: sampling.StorageConfig{ + StorageDir: paths.Resolve(paths.Data, tailSamplingConfig.StorageDir), + StorageGCInterval: tailSamplingConfig.StorageGCInterval, + TTL: tailSamplingConfig.TTL, + }, + }) +} + // runServerWithProcessors runs the APM Server and the given list of processors. // // newProcessors returns a list of processors which will process events in @@ -76,13 +145,11 @@ func runServerWithProcessors(ctx context.Context, runServer beater.RunServerFunc return runServer(ctx, args) } - origReport := args.Reporter - args.Reporter = func(ctx context.Context, req publish.PendingReq) error { - for _, p := range processors { - req.Transformables = p.ProcessTransformables(req.Transformables) - } - return origReport(ctx, req) + batchProcessors := make([]model.BatchProcessor, len(processors)) + for i, p := range processors { + batchProcessors[i] = p } + runServer = beater.WrapRunServerWithProcessors(runServer, batchProcessors...) g, ctx := errgroup.WithContext(ctx) for _, p := range processors { @@ -114,16 +181,18 @@ func runServerWithProcessors(ctx context.Context, runServer beater.RunServerFunc return g.Wait() } -var rootCmd = cmd.NewXPackRootCommand(beater.NewCreator(beater.CreatorParams{ - WrapRunServer: func(runServer beater.RunServerFunc) beater.RunServerFunc { - return func(ctx context.Context, args beater.ServerParams) error { - processors, err := newProcessors(args) - if err != nil { - return err - } - return runServerWithProcessors(ctx, runServer, args, processors...) +func wrapRunServer(runServer beater.RunServerFunc) beater.RunServerFunc { + return func(ctx context.Context, args beater.ServerParams) error { + processors, err := newProcessors(args) + if err != nil { + return err } - }, + return runServerWithProcessors(ctx, runServer, args, processors...) + } +} + +var rootCmd = cmd.NewXPackRootCommand(beater.NewCreator(beater.CreatorParams{ + WrapRunServer: wrapRunServer, })) func main() { diff --git a/x-pack/apm-server/main_test.go b/x-pack/apm-server/main_test.go index 967130b069c..7525988dfd9 100644 --- a/x-pack/apm-server/main_test.go +++ b/x-pack/apm-server/main_test.go @@ -7,8 +7,24 @@ package main // This file is mandatory as otherwise the apm-server.test binary is not generated correctly. import ( + "context" "flag" "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/pkg/errors" + "go.elastic.co/apm/apmtest" + + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/monitoring" + "github.com/elastic/beats/v7/libbeat/paths" + + "github.com/elastic/apm-server/beater" + "github.com/elastic/apm-server/beater/config" + "github.com/elastic/apm-server/elasticsearch" + "github.com/elastic/apm-server/model/modelprocessor" ) var systemTest *bool @@ -28,3 +44,44 @@ func TestSystem(t *testing.T) { main() } } + +func TestMonitoring(t *testing.T) { + // samplingMonitoringRegistry will be nil, as under normal circumstances + // we rely on apm-server/sampling to create the registry. + samplingMonitoringRegistry = monitoring.NewRegistry() + + var aggregationMonitoringSnapshot, tailSamplingMonitoringSnapshot monitoring.FlatSnapshot + runServerError := errors.New("runServer") + runServer := func(ctx context.Context, args beater.ServerParams) error { + aggregationMonitoringSnapshot = monitoring.CollectFlatSnapshot(aggregationMonitoringRegistry, monitoring.Full, false) + tailSamplingMonitoringSnapshot = monitoring.CollectFlatSnapshot(samplingMonitoringRegistry, monitoring.Full, false) + return runServerError + } + runServer = wrapRunServer(runServer) + + home := t.TempDir() + err := paths.InitPaths(&paths.Path{Home: home}) + require.NoError(t, err) + + cfg := config.DefaultConfig() + cfg.DataStreams.Enabled = true + cfg.Aggregation.Transactions.Enabled = true + cfg.Sampling.Tail.Enabled = true + cfg.Sampling.Tail.Policies = []config.TailSamplingPolicy{{SampleRate: 0.1}} + + // Call the wrapped runServer twice, to ensure metric registration does not panic. + for i := 0; i < 2; i++ { + err := runServer(context.Background(), beater.ServerParams{ + Config: cfg, + Logger: logp.NewLogger(""), + Tracer: apmtest.DiscardTracer, + BatchProcessor: modelprocessor.Nop{}, + Managed: true, + Namespace: "default", + NewElasticsearchClient: elasticsearch.NewClient, + }) + assert.Equal(t, runServerError, err) + assert.NotEqual(t, monitoring.MakeFlatSnapshot(), aggregationMonitoringSnapshot) + assert.NotEqual(t, monitoring.MakeFlatSnapshot(), tailSamplingMonitoringSnapshot) + } +} diff --git a/x-pack/apm-server/sampling/config.go b/x-pack/apm-server/sampling/config.go new file mode 100644 index 00000000000..60aaf6754ad --- /dev/null +++ b/x-pack/apm-server/sampling/config.go @@ -0,0 +1,233 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package sampling + +import ( + "time" + + "github.com/pkg/errors" + + "github.com/elastic/apm-server/elasticsearch" + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/x-pack/apm-server/sampling/pubsub" +) + +// Config holds configuration for Processor. +type Config struct { + // BeatID holds the unique ID of this apm-server. + BeatID string + + // BatchProcessor holds the model.BatchProcessor, for asynchronously processing + // tail-sampled trace events. + BatchProcessor model.BatchProcessor + + LocalSamplingConfig + RemoteSamplingConfig + StorageConfig +} + +// LocalSamplingConfig holds Processor configuration related to local reservoir sampling. +type LocalSamplingConfig struct { + // FlushInterval holds the local sampling interval. + // + // This controls how long it takes for servers to become aware of each other's + // sampled trace IDs, and so should be in the order of tens of seconds, or low + // minutes. In order not to lose sampled trace events, FlushInterval should be + // no greater than half of the TTL. + FlushInterval time.Duration + + // MaxDynamicServices holds the maximum number of dynamic services to track. + // + // Once MaxDynamicServices is reached, root transactions from a service that + // does not have an explicit policy defined may be dropped. + MaxDynamicServices int + + // Policies holds local tail-sampling policies. Policies are matched in the + // order provided. Policies should therefore be ordered from most to least + // specific. + // + // Policies must include at least one policy that matches all traces, to ensure + // that dropping non-matching traces is intentional. + Policies []Policy + + // IngestRateDecayFactor holds the ingest rate decay factor, used for calculating + // the exponentially weighted moving average (EWMA) ingest rate for each trace + // group. + IngestRateDecayFactor float64 +} + +// RemoteSamplingConfig holds Processor configuration related to publishing and +// subscribing to remote sampling decisions. +type RemoteSamplingConfig struct { + // Elasticsearch holds the Elasticsearch client to use for publishing + // and subscribing to remote sampling decisions. + Elasticsearch elasticsearch.Client + + // SampledTracesDataStream holds the identifiers for the Elasticsearch + // data stream for storing and searching sampled trace IDs. + SampledTracesDataStream DataStreamConfig +} + +// DataStreamConfig holds configuration to identify a data stream. +type DataStreamConfig struct { + // Type holds the data stream's type. + Type string + + // Dataset holds the data stream's dataset. + Dataset string + + // Namespace holds the data stream's namespace. + Namespace string +} + +// StorageConfig holds Processor configuration related to event storage. +type StorageConfig struct { + // StorageDir holds the directory in which event storage will be maintained. + StorageDir string + + // StorageGCInterval holds the amount of time between storage garbage collections. + StorageGCInterval time.Duration + + // TTL holds the amount of time before events and sampling decisions + // are expired from local storage. + TTL time.Duration + + // ValueLogFileSize holds the size for Badger value log files. + // If unspecified, then the default value of 128MB will be used. + ValueLogFileSize int64 +} + +// Policy holds a tail-sampling policy: criteria for matching root transactions, +// and the sampling parameters to apply to their traces. +type Policy struct { + PolicyCriteria + + // SampleRate holds the tail-based sample rate to use for traces that + // match this policy. + SampleRate float64 +} + +// PolicyCriteria holds the criteria for matching root transactions to a +// tail-sampling policy. +// +// All criteria are optional. If a field is empty, it will be excluded from +// the comparison. If none are specified, then the policy will match all +// transactions. +type PolicyCriteria struct { + // ServiceName holds the service name for which this policy applies. + // + // If unspecified, transactions from differing services will be + // grouped separately for sampling purposes. This can be used for + // defining a default/catch-all policy. + ServiceName string + + // ServiceEnvironment holds the service environment for which this + // policy applies. + // + // If unspecified, transactions from differing environments (but still + // from the same service *name*) will be grouped together for sampling + // purposes. + ServiceEnvironment string + + // TraceOutcome holds the root transaction outcome for which this + // policy applies. + // + // If unspecified, root transactions with differing outcomes will be + // grouped together for sampling purposes. + TraceOutcome string + + // TraceName holds the root transaction name for which this policy + // applies. + // + // If unspecified, root transactions with differing names (but still + // from the same service) will be grouped together for sampling purposes, + // similar to head-based sampling. + TraceName string +} + +// Validate validates the configuration. +func (config Config) Validate() error { + if config.BeatID == "" { + return errors.New("BeatID unspecified") + } + if config.BatchProcessor == nil { + return errors.New("BatchProcessor unspecified") + } + if err := config.LocalSamplingConfig.validate(); err != nil { + return errors.Wrap(err, "invalid local sampling config") + } + if err := config.RemoteSamplingConfig.validate(); err != nil { + return errors.Wrap(err, "invalid remote sampling config") + } + if err := config.StorageConfig.validate(); err != nil { + return errors.Wrap(err, "invalid storage config") + } + return nil +} + +func (config LocalSamplingConfig) validate() error { + if config.FlushInterval <= 0 { + return errors.New("FlushInterval unspecified or negative") + } + if config.MaxDynamicServices <= 0 { + return errors.New("MaxDynamicServices unspecified or negative") + } + if len(config.Policies) == 0 { + return errors.New("Policies unspecified") + } + var anyDefaultPolicy bool + for i, policy := range config.Policies { + if err := policy.validate(); err != nil { + return errors.Wrapf(err, "Policy %d invalid", i) + } + if policy.PolicyCriteria == (PolicyCriteria{}) { + anyDefaultPolicy = true + } + } + if !anyDefaultPolicy { + return errors.New("Policies does not contain a default (empty criteria) policy") + } + if config.IngestRateDecayFactor <= 0 || config.IngestRateDecayFactor > 1 { + return errors.New("IngestRateDecayFactor unspecified or out of range (0,1]") + } + return nil +} + +func (config RemoteSamplingConfig) validate() error { + if config.Elasticsearch == nil { + return errors.New("Elasticsearch unspecified") + } + if err := config.SampledTracesDataStream.validate(); err != nil { + return errors.New("SampledTracesDataStream unspecified or invalid") + } + return nil +} + +func (config DataStreamConfig) validate() error { + return pubsub.DataStreamConfig(config).Validate() +} + +func (config StorageConfig) validate() error { + if config.StorageDir == "" { + return errors.New("StorageDir unspecified") + } + if config.StorageGCInterval <= 0 { + return errors.New("StorageGCInterval unspecified or negative") + } + if config.TTL <= 0 { + return errors.New("TTL unspecified or negative") + } + return nil +} + +func (p Policy) validate() error { + // TODO(axw) allow sampling rate of 1.0 (100%), which would + // cause the root transaction to be indexed, and a sampling + // decision to be written to local storage, immediately. + if p.SampleRate < 0 || p.SampleRate >= 1 { + return errors.New("SampleRate unspecified or out of range [0,1)") + } + return nil +} diff --git a/x-pack/apm-server/sampling/config_test.go b/x-pack/apm-server/sampling/config_test.go new file mode 100644 index 00000000000..1349a2d7621 --- /dev/null +++ b/x-pack/apm-server/sampling/config_test.go @@ -0,0 +1,78 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package sampling_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/elasticsearch" + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/x-pack/apm-server/sampling" +) + +func TestNewProcessorConfigInvalid(t *testing.T) { + var config sampling.Config + assertInvalidConfigError := func(expectedError string) { + t.Helper() + agg, err := sampling.NewProcessor(config) + require.Error(t, err) + require.Nil(t, agg) + assert.EqualError(t, err, "invalid tail-sampling config: "+expectedError) + } + assertInvalidConfigError("BeatID unspecified") + config.BeatID = "beat" + + assertInvalidConfigError("BatchProcessor unspecified") + config.BatchProcessor = struct{ model.BatchProcessor }{} + + assertInvalidConfigError("invalid local sampling config: FlushInterval unspecified or negative") + config.FlushInterval = 1 + + assertInvalidConfigError("invalid local sampling config: MaxDynamicServices unspecified or negative") + config.MaxDynamicServices = 1 + + assertInvalidConfigError("invalid local sampling config: Policies unspecified") + config.Policies = []sampling.Policy{{ + PolicyCriteria: sampling.PolicyCriteria{ServiceName: "foo"}, + }} + assertInvalidConfigError("invalid local sampling config: Policies does not contain a default (empty criteria) policy") + config.Policies[0].PolicyCriteria = sampling.PolicyCriteria{} + for _, invalid := range []float64{-1, 1.0, 2.0} { + config.Policies[0].SampleRate = invalid + assertInvalidConfigError("invalid local sampling config: Policy 0 invalid: SampleRate unspecified or out of range [0,1)") + } + config.Policies[0].SampleRate = 0.5 + + for _, invalid := range []float64{-1, 0, 2.0} { + config.IngestRateDecayFactor = invalid + assertInvalidConfigError("invalid local sampling config: IngestRateDecayFactor unspecified or out of range (0,1]") + } + config.IngestRateDecayFactor = 0.5 + + assertInvalidConfigError("invalid remote sampling config: Elasticsearch unspecified") + var elasticsearchClient struct { + elasticsearch.Client + } + config.Elasticsearch = elasticsearchClient + + assertInvalidConfigError("invalid remote sampling config: SampledTracesDataStream unspecified or invalid") + config.SampledTracesDataStream = sampling.DataStreamConfig{ + Type: "traces", + Dataset: "sampled", + Namespace: "testing", + } + + assertInvalidConfigError("invalid storage config: StorageDir unspecified") + config.StorageDir = "tbs" + + assertInvalidConfigError("invalid storage config: StorageGCInterval unspecified or negative") + config.StorageGCInterval = 1 + + assertInvalidConfigError("invalid storage config: TTL unspecified or negative") + config.TTL = 1 +} diff --git a/x-pack/apm-server/sampling/eventstorage/codec.go b/x-pack/apm-server/sampling/eventstorage/codec.go deleted file mode 100644 index 64078d0743f..00000000000 --- a/x-pack/apm-server/sampling/eventstorage/codec.go +++ /dev/null @@ -1,5 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package eventstorage diff --git a/x-pack/apm-server/sampling/eventstorage/jsoncodec.go b/x-pack/apm-server/sampling/eventstorage/jsoncodec.go index 4e040db1d2d..77d7f26b8d1 100644 --- a/x-pack/apm-server/sampling/eventstorage/jsoncodec.go +++ b/x-pack/apm-server/sampling/eventstorage/jsoncodec.go @@ -17,22 +17,12 @@ import ( // JSONCodec is an implementation of Codec, using JSON encoding. type JSONCodec struct{} -// DecodeSpan decodes data as JSON into span. -func (JSONCodec) DecodeSpan(data []byte, span *model.Span) error { - return jsoniter.ConfigFastest.Unmarshal(data, span) +// DecodeEvent decodes data as JSON into event. +func (JSONCodec) DecodeEvent(data []byte, event *model.APMEvent) error { + return jsoniter.ConfigFastest.Unmarshal(data, event) } -// DecodeTransaction decodes data as JSON into tx. -func (JSONCodec) DecodeTransaction(data []byte, tx *model.Transaction) error { - return jsoniter.ConfigFastest.Unmarshal(data, tx) -} - -// EncodeSpan encodes span as JSON. -func (JSONCodec) EncodeSpan(span *model.Span) ([]byte, error) { - return json.Marshal(span) -} - -// EncodeTransaction encodes tx as JSON. -func (JSONCodec) EncodeTransaction(tx *model.Transaction) ([]byte, error) { - return json.Marshal(tx) +// EncodeEvent encodes event as JSON. +func (JSONCodec) EncodeEvent(event *model.APMEvent) ([]byte, error) { + return json.Marshal(event) } diff --git a/x-pack/apm-server/sampling/eventstorage/sharded.go b/x-pack/apm-server/sampling/eventstorage/sharded.go index c0cb1089a75..f45f6c6f6a3 100644 --- a/x-pack/apm-server/sampling/eventstorage/sharded.go +++ b/x-pack/apm-server/sampling/eventstorage/sharded.go @@ -51,19 +51,14 @@ func (s *ShardedReadWriter) Flush() error { return result } -// ReadEvents calls Writer.ReadEvents, using a sharded, locked, Writer. -func (s *ShardedReadWriter) ReadEvents(traceID string, out *model.Batch) error { - return s.getWriter(traceID).ReadEvents(traceID, out) +// ReadTraceEvents calls Writer.ReadTraceEvents, using a sharded, locked, Writer. +func (s *ShardedReadWriter) ReadTraceEvents(traceID string, out *model.Batch) error { + return s.getWriter(traceID).ReadTraceEvents(traceID, out) } -// WriteTransaction calls Writer.WriteTransaction, using a sharded, locked, Writer. -func (s *ShardedReadWriter) WriteTransaction(tx *model.Transaction) error { - return s.getWriter(tx.TraceID).WriteTransaction(tx) -} - -// WriteSpan calls Writer.WriteSpan, using a sharded, locked, Writer. -func (s *ShardedReadWriter) WriteSpan(span *model.Span) error { - return s.getWriter(span.TraceID).WriteSpan(span) +// WriteTraceEvent calls Writer.WriteTraceEvent, using a sharded, locked, Writer. +func (s *ShardedReadWriter) WriteTraceEvent(traceID, id string, event *model.APMEvent) error { + return s.getWriter(traceID).WriteTraceEvent(traceID, id, event) } // WriteTraceSampled calls Writer.WriteTraceSampled, using a sharded, locked, Writer. @@ -76,6 +71,11 @@ func (s *ShardedReadWriter) IsTraceSampled(traceID string) (bool, error) { return s.getWriter(traceID).IsTraceSampled(traceID) } +// DeleteTraceEvent calls Writer.DeleteTraceEvent, using a sharded, locked, Writer. +func (s *ShardedReadWriter) DeleteTraceEvent(traceID, id string) error { + return s.getWriter(traceID).DeleteTraceEvent(traceID, id) +} + // getWriter returns an event storage writer for the given trace ID. // // This method is idempotent, which is necessary to avoid transaction @@ -104,22 +104,16 @@ func (rw *lockedReadWriter) Flush() error { return rw.rw.Flush() } -func (rw *lockedReadWriter) ReadEvents(traceID string, out *model.Batch) error { - rw.mu.Lock() - defer rw.mu.Unlock() - return rw.rw.ReadEvents(traceID, out) -} - -func (rw *lockedReadWriter) WriteTransaction(tx *model.Transaction) error { +func (rw *lockedReadWriter) ReadTraceEvents(traceID string, out *model.Batch) error { rw.mu.Lock() defer rw.mu.Unlock() - return rw.rw.WriteTransaction(tx) + return rw.rw.ReadTraceEvents(traceID, out) } -func (rw *lockedReadWriter) WriteSpan(s *model.Span) error { +func (rw *lockedReadWriter) WriteTraceEvent(traceID, id string, event *model.APMEvent) error { rw.mu.Lock() defer rw.mu.Unlock() - return rw.rw.WriteSpan(s) + return rw.rw.WriteTraceEvent(traceID, id, event) } func (rw *lockedReadWriter) WriteTraceSampled(traceID string, sampled bool) error { @@ -133,3 +127,9 @@ func (rw *lockedReadWriter) IsTraceSampled(traceID string) (bool, error) { defer rw.mu.Unlock() return rw.rw.IsTraceSampled(traceID) } + +func (rw *lockedReadWriter) DeleteTraceEvent(traceID, id string) error { + rw.mu.Lock() + defer rw.mu.Unlock() + return rw.rw.DeleteTraceEvent(traceID, id) +} diff --git a/x-pack/apm-server/sampling/eventstorage/sharded_bench_test.go b/x-pack/apm-server/sampling/eventstorage/sharded_bench_test.go index fd39b0dde15..86f15d80923 100644 --- a/x-pack/apm-server/sampling/eventstorage/sharded_bench_test.go +++ b/x-pack/apm-server/sampling/eventstorage/sharded_bench_test.go @@ -22,10 +22,12 @@ func BenchmarkShardedWriteTransactionUncontended(b *testing.B) { defer sharded.Close() b.RunParallel(func(pb *testing.PB) { - traceUUID := uuid.Must(uuid.NewV4()) - transaction := &model.Transaction{TraceID: traceUUID.String(), ID: traceUUID.String()} + traceID := uuid.Must(uuid.NewV4()).String() + transaction := &model.APMEvent{ + Transaction: &model.Transaction{ID: traceID}, + } for pb.Next() { - if err := sharded.WriteTransaction(transaction); err != nil { + if err := sharded.WriteTraceEvent(traceID, traceID, transaction); err != nil { b.Fatal(err) } } @@ -41,13 +43,15 @@ func BenchmarkShardedWriteTransactionContended(b *testing.B) { // Use a single trace ID, causing all events to go through // the same sharded writer, contending for a single lock. - traceUUID := uuid.Must(uuid.NewV4()) + traceID := uuid.Must(uuid.NewV4()).String() b.RunParallel(func(pb *testing.PB) { - transactionUUID := uuid.Must(uuid.NewV4()) - transaction := &model.Transaction{TraceID: traceUUID.String(), ID: transactionUUID.String()} + transactionID := uuid.Must(uuid.NewV4()).String() + transaction := &model.APMEvent{ + Transaction: &model.Transaction{ID: transactionID}, + } for pb.Next() { - if err := sharded.WriteTransaction(transaction); err != nil { + if err := sharded.WriteTraceEvent(traceID, transactionID, transaction); err != nil { b.Fatal(err) } } diff --git a/x-pack/apm-server/sampling/eventstorage/storage.go b/x-pack/apm-server/sampling/eventstorage/storage.go index 1e6648f2d89..bcb8949d5d7 100644 --- a/x-pack/apm-server/sampling/eventstorage/storage.go +++ b/x-pack/apm-server/sampling/eventstorage/storage.go @@ -18,8 +18,7 @@ const ( // over time, to avoid misinterpreting historical data. entryMetaTraceSampled = 's' entryMetaTraceUnsampled = 'u' - entryMetaTransaction = 'T' - entryMetaSpan = 'S' + entryMetaTraceEvent = 'e' ) // ErrNotFound is returned by by the Storage.IsTraceSampled method, @@ -36,10 +35,8 @@ type Storage struct { // Codec provides methods for encoding and decoding events. type Codec interface { - DecodeSpan([]byte, *model.Span) error - DecodeTransaction([]byte, *model.Transaction) error - EncodeSpan(*model.Span) ([]byte, error) - EncodeTransaction(*model.Transaction) ([]byte, error) + DecodeEvent([]byte, *model.APMEvent) error + EncodeEvent(*model.APMEvent) ([]byte, error) } // New returns a new Storage using db and codec. @@ -133,34 +130,17 @@ func (rw *ReadWriter) IsTraceSampled(traceID string) (bool, error) { return item.UserMeta() == entryMetaTraceSampled, nil } -// WriteTransaction writes tx to storage. +// WriteTraceEvent writes a trace event to storage. // -// WriteTransaction may return before the write is committed to storage. +// WriteTraceEvent may return before the write is committed to storage. // Call Flush to ensure the write is committed. -func (rw *ReadWriter) WriteTransaction(tx *model.Transaction) error { - key := append(append([]byte(tx.TraceID), ':'), tx.ID...) - data, err := rw.s.codec.EncodeTransaction(tx) +func (rw *ReadWriter) WriteTraceEvent(traceID string, id string, event *model.APMEvent) error { + key := append(append([]byte(traceID), ':'), id...) + data, err := rw.s.codec.EncodeEvent(event) if err != nil { return err } - return rw.writeEvent(key[:], data, entryMetaTransaction) -} - -// WriteSpan writes span to storage. -// -// WriteSpan may return before the write is committed to storage. -// Call Flush to ensure the write is committed. -func (rw *ReadWriter) WriteSpan(span *model.Span) error { - key := append(append([]byte(span.TraceID), ':'), span.ID...) - data, err := rw.s.codec.EncodeSpan(span) - if err != nil { - return err - } - return rw.writeEvent(key[:], data, entryMetaSpan) -} - -func (rw *ReadWriter) writeEvent(key, value []byte, meta byte) error { - return rw.writeEntry(badger.NewEntry(key, value).WithMeta(meta).WithTTL(rw.s.ttl)) + return rw.writeEntry(badger.NewEntry(key[:], data).WithMeta(entryMetaTraceEvent).WithTTL(rw.s.ttl)) } func (rw *ReadWriter) writeEntry(e *badger.Entry) error { @@ -175,13 +155,18 @@ func (rw *ReadWriter) writeEntry(e *badger.Entry) error { return rw.txn.SetEntry(e) } -// ReadEvents reads events with the given trace ID from storage into a batch. +// DeleteTraceEvent deletes the trace event from storage. +func (rw *ReadWriter) DeleteTraceEvent(traceID, id string) error { + key := append(append([]byte(traceID), ':'), id...) + return rw.txn.Delete(key) +} + +// ReadTraceEvents reads trace events with the given trace ID from storage into out. // -// ReadEvents may implicitly commit the current transaction when the number -// of pending writes exceeds a threshold. This is due to how Badger internally -// iterates over uncommitted writes, where it will sort keys for each new -// iterator. -func (rw *ReadWriter) ReadEvents(traceID string, out *model.Batch) error { +// ReadTraceEvents may implicitly commit the current transaction when the number of +// pending writes exceeds a threshold. This is due to how Badger internally iterates +// over uncommitted writes, where it will sort keys for each new iterator. +func (rw *ReadWriter) ReadTraceEvents(traceID string, out *model.Batch) error { opts := badger.DefaultIteratorOptions rw.readKeyBuf = append(append(rw.readKeyBuf[:0], traceID...), ':') opts.Prefix = rw.readKeyBuf @@ -203,22 +188,14 @@ func (rw *ReadWriter) ReadEvents(traceID string, out *model.Batch) error { continue } switch item.UserMeta() { - case entryMetaTransaction: - var event model.Transaction - if err := item.Value(func(data []byte) error { - return rw.s.codec.DecodeTransaction(data, &event) - }); err != nil { - return err - } - out.Transactions = append(out.Transactions, &event) - case entryMetaSpan: - var event model.Span + case entryMetaTraceEvent: + var event model.APMEvent if err := item.Value(func(data []byte) error { - return rw.s.codec.DecodeSpan(data, &event) + return rw.s.codec.DecodeEvent(data, &event) }); err != nil { return err } - out.Spans = append(out.Spans, &event) + *out = append(*out, event) default: // Unknown entry meta: ignore. continue diff --git a/x-pack/apm-server/sampling/eventstorage/storage_bench_test.go b/x-pack/apm-server/sampling/eventstorage/storage_bench_test.go index 90a9e60a527..8a9241163be 100644 --- a/x-pack/apm-server/sampling/eventstorage/storage_bench_test.go +++ b/x-pack/apm-server/sampling/eventstorage/storage_bench_test.go @@ -25,16 +25,15 @@ func BenchmarkWriteTransaction(b *testing.B) { readWriter := store.NewReadWriter() defer readWriter.Close() - traceID := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} - transactionID := []byte{1, 2, 3, 4, 5, 6, 7, 8} - transaction := &model.Transaction{ - TraceID: hex.EncodeToString(traceID), - ID: hex.EncodeToString(transactionID), + traceID := hex.EncodeToString([]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}) + transactionID := hex.EncodeToString([]byte{1, 2, 3, 4, 5, 6, 7, 8}) + transaction := &model.APMEvent{ + Transaction: &model.Transaction{ID: transactionID}, } b.ResetTimer() for i := 0; i < b.N; i++ { - if err := readWriter.WriteTransaction(transaction); err != nil { + if err := readWriter.WriteTraceEvent(traceID, transactionID, transaction); err != nil { b.Fatal(err) } } @@ -53,7 +52,7 @@ func BenchmarkWriteTransaction(b *testing.B) { } func BenchmarkReadEvents(b *testing.B) { - traceUUID := uuid.Must(uuid.NewV4()) + traceID := uuid.Must(uuid.NewV4()).String() test := func(b *testing.B, codec eventstorage.Codec) { // Test with varying numbers of events in the trace. @@ -67,12 +66,13 @@ func BenchmarkReadEvents(b *testing.B) { defer readWriter.Close() for i := 0; i < count; i++ { - transactionUUID := uuid.Must(uuid.NewV4()) - transaction := &model.Transaction{ - TraceID: traceUUID.String(), - ID: transactionUUID.String(), + transactionID := uuid.Must(uuid.NewV4()).String() + transaction := &model.APMEvent{ + Transaction: &model.Transaction{ + ID: transactionID, + }, } - if err := readWriter.WriteTransaction(transaction); err != nil { + if err := readWriter.WriteTraceEvent(traceID, transactionID, transaction); err != nil { b.Fatal(err) } } @@ -84,14 +84,14 @@ func BenchmarkReadEvents(b *testing.B) { b.ResetTimer() var batch model.Batch for i := 0; i < b.N; i++ { - batch.Reset() - if err := readWriter.ReadEvents(traceUUID.String(), &batch); err != nil { + batch = batch[:0] + if err := readWriter.ReadTraceEvents(traceID, &batch); err != nil { b.Fatal(err) } - if batch.Len() != count { + if len(batch) != count { panic(fmt.Errorf( "event count mismatch: expected %d, got %d", - count, batch.Len(), + count, len(batch), )) } } @@ -156,7 +156,5 @@ func BenchmarkIsTraceSampled(b *testing.B) { type nopCodec struct{} -func (nopCodec) DecodeSpan(data []byte, span *model.Span) error { return nil } -func (nopCodec) DecodeTransaction(data []byte, tx *model.Transaction) error { return nil } -func (nopCodec) EncodeSpan(*model.Span) ([]byte, error) { return nil, nil } -func (nopCodec) EncodeTransaction(*model.Transaction) ([]byte, error) { return nil, nil } +func (nopCodec) DecodeEvent(data []byte, event *model.APMEvent) error { return nil } +func (nopCodec) EncodeEvent(*model.APMEvent) ([]byte, error) { return nil, nil } diff --git a/x-pack/apm-server/sampling/eventstorage/storage_test.go b/x-pack/apm-server/sampling/eventstorage/storage_test.go index 8febfffd7f1..fd0f2f054f2 100644 --- a/x-pack/apm-server/sampling/eventstorage/storage_test.go +++ b/x-pack/apm-server/sampling/eventstorage/storage_test.go @@ -23,7 +23,7 @@ func TestWriteEvents(t *testing.T) { // - 1 transaction and 1 span // - 1 transaction and 100 spans // - // The latter test will cause ReadEvents to implicitly call flush. + // The latter test will cause ReadTraceEvents to implicitly call flush. t.Run("no_flush", func(t *testing.T) { testWriteEvents(t, 1) }) @@ -39,67 +39,67 @@ func testWriteEvents(t *testing.T, numSpans int) { readWriter := store.NewShardedReadWriter() defer readWriter.Close() - before := time.Now() - - traceUUID := uuid.Must(uuid.NewV4()) - transactionUUID := uuid.Must(uuid.NewV4()) - transaction := &model.Transaction{ - TraceID: traceUUID.String(), - ID: transactionUUID.String(), + beforeWrite := time.Now() + traceID := uuid.Must(uuid.NewV4()).String() + transactionID := uuid.Must(uuid.NewV4()).String() + transaction := model.APMEvent{ + Transaction: &model.Transaction{ID: transactionID}, } - assert.NoError(t, readWriter.WriteTransaction(transaction)) + assert.NoError(t, readWriter.WriteTraceEvent(traceID, transactionID, &transaction)) - var spans []*model.Span + var spanEvents []model.APMEvent for i := 0; i < numSpans; i++ { - spanUUID := uuid.Must(uuid.NewV4()) - span := &model.Span{ - TraceID: traceUUID.String(), - ID: spanUUID.String(), + spanID := uuid.Must(uuid.NewV4()).String() + span := model.APMEvent{ + Span: &model.Span{ID: spanID}, } - assert.NoError(t, readWriter.WriteSpan(span)) - spans = append(spans, span) + assert.NoError(t, readWriter.WriteTraceEvent(traceID, spanID, &span)) + spanEvents = append(spanEvents, span) } + afterWrite := time.Now() // We can read our writes without flushing. var batch model.Batch - assert.NoError(t, readWriter.ReadEvents(traceUUID.String(), &batch)) - assert.ElementsMatch(t, []*model.Transaction{transaction}, batch.Transactions) - assert.ElementsMatch(t, spans, batch.Spans) + assert.NoError(t, readWriter.ReadTraceEvents(traceID, &batch)) + assert.ElementsMatch(t, append(spanEvents, transaction), batch) // Flush in order for the writes to be visible to other readers. assert.NoError(t, readWriter.Flush()) - var recorded []interface{} + var recorded []model.APMEvent assert.NoError(t, db.View(func(txn *badger.Txn) error { iter := txn.NewIterator(badger.IteratorOptions{ - Prefix: []byte(traceUUID.String()), + Prefix: []byte(traceID), }) defer iter.Close() for iter.Rewind(); iter.Valid(); iter.Next() { item := iter.Item() expiresAt := item.ExpiresAt() expiryTime := time.Unix(int64(expiresAt), 0) + + // The expiry time should be somewhere between when we + // started and finished writing + the TTL. The expiry time + // is recorded as seconds since the Unix epoch, hence the + // truncation. + lowerBound := beforeWrite.Add(ttl).Truncate(time.Second) + upperBound := afterWrite.Add(ttl).Truncate(time.Second) assert.Condition(t, func() bool { - return !before.After(expiryTime) && !expiryTime.After(before.Add(ttl)) - }) + return !lowerBound.After(expiryTime) + }, "expiry time %s is before %s", expiryTime, lowerBound) + assert.Condition(t, func() bool { + return !expiryTime.After(upperBound) + }, "expiry time %s is after %s", expiryTime, upperBound) - var value interface{} - switch meta := item.UserMeta(); meta { - case 'T': - value = &model.Transaction{} - case 'S': - value = &model.Span{} - default: - t.Fatalf("invalid meta %q", meta) - } + var event model.APMEvent + require.Equal(t, "e", string(item.UserMeta())) assert.NoError(t, item.Value(func(data []byte) error { - return json.Unmarshal(data, value) + return json.Unmarshal(data, &event) })) - recorded = append(recorded, value) + recorded = append(recorded, event) } return nil })) - assert.ElementsMatch(t, batch.Transformables(), recorded) + assert.ElementsMatch(t, batch, recorded) } func TestWriteTraceSampled(t *testing.T) { @@ -152,7 +152,7 @@ func TestWriteTraceSampled(t *testing.T) { }, sampled) } -func TestReadEvents(t *testing.T) { +func TestReadTraceEvents(t *testing.T) { db := newBadgerDB(t, badgerOptions) ttl := time.Minute store := eventstorage.New(db, eventstorage.JSONCodec{}, ttl) @@ -160,14 +160,14 @@ func TestReadEvents(t *testing.T) { traceID := [...]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} require.NoError(t, db.Update(func(txn *badger.Txn) error { key := append(traceID[:], ":12345678"...) - value := []byte(`{"name":"transaction"}`) - if err := txn.SetEntry(badger.NewEntry(key, value).WithMeta('T')); err != nil { + value := []byte(`{"transaction":{"name":"transaction"}}`) + if err := txn.SetEntry(badger.NewEntry(key, value).WithMeta('e')); err != nil { return err } key = append(traceID[:], ":87654321"...) - value = []byte(`{"name":"span"}`) - if err := txn.SetEntry(badger.NewEntry(key, value).WithMeta('S')); err != nil { + value = []byte(`{"span":{"name":"span"}}`) + if err := txn.SetEntry(badger.NewEntry(key, value).WithMeta('e')); err != nil { return err } @@ -175,7 +175,7 @@ func TestReadEvents(t *testing.T) { // proceeding colon, causing it to be ignored. key = append(traceID[:], "nocolon"...) value = []byte(`not-json`) - if err := txn.SetEntry(badger.NewEntry(key, value).WithMeta('S')); err != nil { + if err := txn.SetEntry(badger.NewEntry(key, value).WithMeta('e')); err != nil { return err } @@ -192,12 +192,14 @@ func TestReadEvents(t *testing.T) { defer reader.Close() var events model.Batch - assert.NoError(t, reader.ReadEvents(string(traceID[:]), &events)) - assert.Equal(t, []*model.Transaction{{Name: "transaction"}}, events.Transactions) - assert.Equal(t, []*model.Span{{Name: "span"}}, events.Spans) + assert.NoError(t, reader.ReadTraceEvents(string(traceID[:]), &events)) + assert.Equal(t, model.Batch{ + {Transaction: &model.Transaction{Name: "transaction"}}, + {Span: &model.Span{Name: "span"}}, + }, events) } -func TestReadEventsDecodeError(t *testing.T) { +func TestReadTraceEventsDecodeError(t *testing.T) { db := newBadgerDB(t, badgerOptions) ttl := time.Minute store := eventstorage.New(db, eventstorage.JSONCodec{}, ttl) @@ -206,7 +208,7 @@ func TestReadEventsDecodeError(t *testing.T) { require.NoError(t, db.Update(func(txn *badger.Txn) error { key := append(traceID[:], ":12345678"...) value := []byte(`wat`) - if err := txn.SetEntry(badger.NewEntry(key, value).WithMeta('T')); err != nil { + if err := txn.SetEntry(badger.NewEntry(key, value).WithMeta('e')); err != nil { return err } return nil @@ -216,7 +218,7 @@ func TestReadEventsDecodeError(t *testing.T) { defer reader.Close() var events model.Batch - err := reader.ReadEvents(string(traceID[:]), &events) + err := reader.ReadTraceEvents(string(traceID[:]), &events) assert.Error(t, err) } diff --git a/x-pack/apm-server/sampling/groups.go b/x-pack/apm-server/sampling/groups.go new file mode 100644 index 00000000000..b9b0bb23e0a --- /dev/null +++ b/x-pack/apm-server/sampling/groups.go @@ -0,0 +1,234 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package sampling + +import ( + "errors" + "math" + "math/rand" + "sync" + "time" + + "github.com/elastic/apm-server/model" +) + +const minReservoirSize = 1000 + +var ( + errTooManyTraceGroups = errors.New("too many trace groups") + errNoMatchingPolicy = errors.New("no matching policy") +) + +// traceGroups maintains a collection of trace groups. +type traceGroups struct { + // ingestRateDecayFactor is λ, the decay factor used for calculating the + // exponentially weighted moving average ingest rate for each trace group. + ingestRateDecayFactor float64 + + // maxDynamicServiceGroups holds the maximum number of dynamic service groups + // to maintain. Once this is reached, no new dynamic service groups will + // be created, and events may be dropped. + maxDynamicServiceGroups int + + mu sync.RWMutex + policyGroups []policyGroup + numDynamicServiceGroups int +} + +type policyGroup struct { + policy Policy + g *traceGroup // nil for catch-all + dynamic map[string]*traceGroup // nil for static +} + +func (g *policyGroup) match(transactionEvent *model.APMEvent) bool { + if g.policy.ServiceName != "" && g.policy.ServiceName != transactionEvent.Service.Name { + return false + } + if g.policy.ServiceEnvironment != "" && g.policy.ServiceEnvironment != transactionEvent.Service.Environment { + return false + } + if g.policy.TraceOutcome != "" && g.policy.TraceOutcome != transactionEvent.Event.Outcome { + return false + } + if g.policy.TraceName != "" && g.policy.TraceName != transactionEvent.Transaction.Name { + return false + } + return true +} + +func newTraceGroups( + policies []Policy, + maxDynamicServiceGroups int, + ingestRateDecayFactor float64, +) *traceGroups { + groups := &traceGroups{ + ingestRateDecayFactor: ingestRateDecayFactor, + maxDynamicServiceGroups: maxDynamicServiceGroups, + policyGroups: make([]policyGroup, len(policies)), + } + for i, policy := range policies { + pg := policyGroup{policy: policy} + if policy.ServiceName != "" { + pg.g = newTraceGroup(policy.SampleRate) + } else { + pg.dynamic = make(map[string]*traceGroup) + } + groups.policyGroups[i] = pg + } + return groups +} + +// traceGroup represents a single trace group, including a measurement of the +// observed ingest rate, a trace ID weighted random sampling reservoir. +type traceGroup struct { + // samplingFraction holds the configured fraction of traces in this + // trace group to sample, as a fraction in the range (0,1). + samplingFraction float64 + + mu sync.Mutex + // reservoir holds a random sample of root transactions observed + // for this trace group, weighted by duration. + reservoir *weightedRandomSample + // total holds the total number of root transactions observed for + // this trace group, including those that are not added to the + // reservoir. This is used to update ingestRate. + total int + // ingestRate holds the exponentially weighted moving average number + // of root transactions observed for this trace group per tail + // sampling interval. This is read and written only by the periodic + // finalizeSampledTraces calls. + ingestRate float64 +} + +func newTraceGroup(samplingFraction float64) *traceGroup { + return &traceGroup{ + samplingFraction: samplingFraction, + reservoir: newWeightedRandomSample( + rand.New(rand.NewSource(time.Now().UnixNano())), + minReservoirSize, + ), + } +} + +// sampleTrace will return true if the root transaction is admitted to +// the in-memory sampling reservoir, and false otherwise. +// +// If the transaction is not admitted due to the transaction group limit +// having been reached, sampleTrace will return errTooManyTraceGroups. +func (g *traceGroups) sampleTrace(transactionEvent *model.APMEvent) (bool, error) { + group, err := g.getTraceGroup(transactionEvent) + if err != nil { + return false, err + } + return group.sampleTrace(transactionEvent) +} + +func (g *traceGroups) getTraceGroup(transactionEvent *model.APMEvent) (*traceGroup, error) { + var pg *policyGroup + for i := range g.policyGroups { + if g.policyGroups[i].match(transactionEvent) { + pg = &g.policyGroups[i] + break + } + } + if pg == nil { + return nil, errNoMatchingPolicy + } + if pg.g != nil { + return pg.g, nil + } + + g.mu.Lock() + defer g.mu.Unlock() + + group, ok := pg.dynamic[transactionEvent.Service.Name] + if !ok { + if g.numDynamicServiceGroups == g.maxDynamicServiceGroups { + return nil, errTooManyTraceGroups + } + g.numDynamicServiceGroups++ + group = newTraceGroup(pg.policy.SampleRate) + pg.dynamic[transactionEvent.Service.Name] = group + } + return group, nil +} + +func (g *traceGroup) sampleTrace(transactionEvent *model.APMEvent) (bool, error) { + if g.samplingFraction == 0 { + return false, nil + } + g.mu.Lock() + defer g.mu.Unlock() + g.total++ + return g.reservoir.Sample( + transactionEvent.Event.Duration.Seconds(), + transactionEvent.Trace.ID, + ), nil +} + +// finalizeSampledTraces locks the groups, appends their current trace IDs to +// traceIDs, and returns the extended slice. On return the groups' sampling +// reservoirs will be reset. +// +// If the maximum number of groups has been reached, then any dynamically +// created groups with the minimum reservoir size (low ingest or sampling rate) +// may be removed. These groups may also be removed if they have seen no +// activity in this interval. +func (g *traceGroups) finalizeSampledTraces(traceIDs []string) []string { + g.mu.Lock() + defer g.mu.Unlock() + maxDynamicServiceGroupsReached := g.numDynamicServiceGroups == g.maxDynamicServiceGroups + for _, pg := range g.policyGroups { + if pg.g != nil { + traceIDs = pg.g.finalizeSampledTraces(traceIDs, g.ingestRateDecayFactor) + continue + } + for serviceName, group := range pg.dynamic { + total := group.total + traceIDs = group.finalizeSampledTraces(traceIDs, g.ingestRateDecayFactor) + if (maxDynamicServiceGroupsReached || total == 0) && group.reservoir.Size() == minReservoirSize { + g.numDynamicServiceGroups-- + delete(pg.dynamic, serviceName) + } + } + } + return traceIDs +} + +// finalizeSampledTraces appends the group's current trace IDs to traceIDs, and +// returns the extended slice. On return the groups' sampling reservoirs will be +// reset. +func (g *traceGroup) finalizeSampledTraces(traceIDs []string, ingestRateDecayFactor float64) []string { + g.mu.Lock() + defer g.mu.Unlock() + + if g.ingestRate == 0 { + g.ingestRate = float64(g.total) + } else { + g.ingestRate *= 1 - ingestRateDecayFactor + g.ingestRate += ingestRateDecayFactor * float64(g.total) + } + desiredTotal := int(math.Round(g.samplingFraction * float64(g.total))) + g.total = 0 + + for n := g.reservoir.Len(); n > desiredTotal; n-- { + // The reservoir is larger than the desired fraction of the + // observed total number of traces in this interval. Pop the + // lowest weighted traces to limit to the desired total. + g.reservoir.Pop() + } + traceIDs = append(traceIDs, g.reservoir.Values()...) + + // Resize the reservoir, so that it can hold the desired fraction of + // the observed ingest rate. + newReservoirSize := int(math.Round(g.samplingFraction * g.ingestRate)) + if newReservoirSize < minReservoirSize { + newReservoirSize = minReservoirSize + } + g.reservoir.Reset() + g.reservoir.Resize(newReservoirSize) + return traceIDs +} diff --git a/x-pack/apm-server/sampling/groups_test.go b/x-pack/apm-server/sampling/groups_test.go new file mode 100644 index 00000000000..906b38869ed --- /dev/null +++ b/x-pack/apm-server/sampling/groups_test.go @@ -0,0 +1,287 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package sampling + +import ( + "fmt" + "testing" + "time" + + "github.com/gofrs/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/model" +) + +func TestTraceGroupsPolicies(t *testing.T) { + makeTransaction := func(serviceName, serviceEnvironment, traceOutcome, traceName string) *model.APMEvent { + return &model.APMEvent{ + Service: model.Service{ + Name: serviceName, + Environment: serviceEnvironment, + }, + Event: model.Event{ + Outcome: traceOutcome, + }, + Processor: model.TransactionProcessor, + Trace: model.Trace{ID: uuid.Must(uuid.NewV4()).String()}, + Transaction: &model.Transaction{ + Name: traceName, + ID: uuid.Must(uuid.NewV4()).String(), + }, + } + } + makePolicy := func(sampleRate float64, serviceName, serviceEnvironment, traceOutcome, traceName string) Policy { + return Policy{ + SampleRate: sampleRate, + PolicyCriteria: PolicyCriteria{ + ServiceName: serviceName, + ServiceEnvironment: serviceEnvironment, + TraceName: traceName, + TraceOutcome: traceOutcome, + }, + } + } + + const ( + staticServiceName = "static-service" + dynamicServiceName = "dynamic-service" + ) + policies := []Policy{ + makePolicy(0.4, staticServiceName, "production", "error", "GET /healthcheck"), + makePolicy(0.3, staticServiceName, "production", "error", ""), + makePolicy(0.2, staticServiceName, "production", "", ""), + makePolicy(0.1, staticServiceName, "", "", ""), + } + + // Clone the policies without ServiceName set, so we have identical catch-all policies + // that will match the dynamic service name. + for _, policy := range policies[:] { + policy.ServiceName = "" + policies = append(policies, policy) + } + groups := newTraceGroups(policies, 1000, 1.0) + + assertSampleRate := func(sampleRate float64, serviceName, serviceEnvironment, traceOutcome, traceName string) { + tx := makeTransaction(serviceName, serviceEnvironment, traceOutcome, traceName) + const N = 1000 + for i := 0; i < N; i++ { + if _, err := groups.sampleTrace(tx); err != nil { + t.Fatal(err) + } + } + sampled := groups.finalizeSampledTraces(nil) + assert.Len(t, sampled, int(sampleRate*N)) + } + + for _, serviceName := range []string{staticServiceName, dynamicServiceName} { + assertSampleRate(0.1, serviceName, "testing", "error", "GET /healthcheck") + assertSampleRate(0.2, serviceName, "production", "success", "GET /healthcheck") + assertSampleRate(0.3, serviceName, "production", "error", "GET /") + assertSampleRate(0.4, serviceName, "production", "error", "GET /healthcheck") + } +} + +func TestTraceGroupsMax(t *testing.T) { + const ( + maxDynamicServices = 100 + ingestRateCoefficient = 1.0 + ) + policies := []Policy{{SampleRate: 1.0}} + groups := newTraceGroups(policies, maxDynamicServices, ingestRateCoefficient) + + for i := 0; i < maxDynamicServices; i++ { + serviceName := fmt.Sprintf("service_group_%d", i) + for i := 0; i < minReservoirSize; i++ { + admitted, err := groups.sampleTrace(&model.APMEvent{ + Service: model.Service{ + Name: serviceName, + }, + Processor: model.TransactionProcessor, + Trace: model.Trace{ID: uuid.Must(uuid.NewV4()).String()}, + Transaction: &model.Transaction{ + Name: "whatever", + ID: uuid.Must(uuid.NewV4()).String(), + }, + }) + require.NoError(t, err) + assert.True(t, admitted) + } + } + + admitted, err := groups.sampleTrace(&model.APMEvent{ + Processor: model.TransactionProcessor, + Trace: model.Trace{ID: uuid.Must(uuid.NewV4()).String()}, + Transaction: &model.Transaction{ + Name: "overflow", + ID: uuid.Must(uuid.NewV4()).String(), + }, + }) + assert.Equal(t, errTooManyTraceGroups, err) + assert.False(t, admitted) +} + +func TestTraceGroupReservoirResize(t *testing.T) { + const ( + maxDynamicServices = 1 + ingestRateCoefficient = 0.75 + ) + policies := []Policy{{SampleRate: 0.2}} + groups := newTraceGroups(policies, maxDynamicServices, ingestRateCoefficient) + + sendTransactions := func(n int) { + for i := 0; i < n; i++ { + groups.sampleTrace(&model.APMEvent{ + Processor: model.TransactionProcessor, + Trace: model.Trace{ID: "0102030405060708090a0b0c0d0e0f10"}, + Transaction: &model.Transaction{ID: "0102030405060708"}, + }) + } + } + + // All groups start out with a reservoir size of 1000. + sendTransactions(10000) + assert.Len(t, groups.finalizeSampledTraces(nil), 1000) // initial reservoir size + + // We sent 10000 initially, and we send 20000 each subsequent iteration. + // The number of sampled trace IDs will converge on 4000 (0.2*20000). + for i, expected := range []int{ + 2000, // 0.2 * 10000 (initial ingest rate) + 3500, // 0.2 * (0.25*10000 + 0.75*20000) + 3875, // 0.2 * (0.25*17500 + 0.75*20000) + 3969, // etc. + 3992, + 3998, + 4000, + 4000, + } { + sendTransactions(20000) + assert.Len(t, groups.finalizeSampledTraces(nil), expected, fmt.Sprintf("iteration %d", i)) + } +} + +func TestTraceGroupReservoirResizeMinimum(t *testing.T) { + const ( + maxDynamicServices = 1 + ingestRateCoefficient = 1.0 + ) + policies := []Policy{{SampleRate: 0.1}} + groups := newTraceGroups(policies, maxDynamicServices, ingestRateCoefficient) + + sendTransactions := func(n int) { + for i := 0; i < n; i++ { + groups.sampleTrace(&model.APMEvent{ + Processor: model.TransactionProcessor, + Trace: model.Trace{ID: "0102030405060708090a0b0c0d0e0f10"}, + Transaction: &model.Transaction{ID: "0102030405060708"}, + }) + } + } + + sendTransactions(10000) + assert.Len(t, groups.finalizeSampledTraces(nil), 1000) // initial reservoir size + + // The reservoir would normally be resized to fit the desired sampling + // rate, but will never be resized to less than the minimum (1000). + sendTransactions(1000) + assert.Len(t, groups.finalizeSampledTraces(nil), 100) + + sendTransactions(10000) + assert.Len(t, groups.finalizeSampledTraces(nil), 1000) // min reservoir size +} + +func TestTraceGroupsRemoval(t *testing.T) { + const ( + maxDynamicServices = 2 + ingestRateCoefficient = 1.0 + ) + policies := []Policy{ + {PolicyCriteria: PolicyCriteria{ServiceName: "defined"}, SampleRate: 0.5}, + {SampleRate: 0.5}, + {PolicyCriteria: PolicyCriteria{ServiceName: "defined_later"}, SampleRate: 0.5}, + } + groups := newTraceGroups(policies, maxDynamicServices, ingestRateCoefficient) + + for i := 0; i < 10000; i++ { + _, err := groups.sampleTrace(&model.APMEvent{ + Service: model.Service{Name: "many"}, + Processor: model.TransactionProcessor, + Transaction: &model.Transaction{}, + }) + assert.NoError(t, err) + } + _, err := groups.sampleTrace(&model.APMEvent{ + Service: model.Service{Name: "few"}, + Processor: model.TransactionProcessor, + Transaction: &model.Transaction{}, + }) + assert.NoError(t, err) + + _, err = groups.sampleTrace(&model.APMEvent{ + Service: model.Service{Name: "another"}, + Processor: model.TransactionProcessor, + Transaction: &model.Transaction{}, + }) + assert.Equal(t, errTooManyTraceGroups, err) + + // When there is a policy with an explicitly defined service name, that + // will not be affected by the limit... + _, err = groups.sampleTrace(&model.APMEvent{ + Service: model.Service{Name: "defined"}, + Processor: model.TransactionProcessor, + Transaction: &model.Transaction{}, + }) + assert.NoError(t, err) + + // ...unless the policy with an explicitly defined service name comes after + // a matching dynamic policy. + _, err = groups.sampleTrace(&model.APMEvent{ + Service: model.Service{Name: "defined_later"}, + Processor: model.TransactionProcessor, + Transaction: &model.Transaction{}, + }) + assert.Equal(t, errTooManyTraceGroups, err) + + // Finalizing should remove the "few" trace group, since its reservoir + // size is at the minimum, and the number of groups is at the maximum. + groups.finalizeSampledTraces(nil) + + // We should now be able to add another trace group. + _, err = groups.sampleTrace(&model.APMEvent{ + Service: model.Service{Name: "another"}, + Processor: model.TransactionProcessor, + Transaction: &model.Transaction{}, + }) + assert.NoError(t, err) +} + +func BenchmarkTraceGroups(b *testing.B) { + const ( + maxDynamicServices = 1000 + ingestRateCoefficient = 1.0 + ) + policies := []Policy{{SampleRate: 1.0}} + groups := newTraceGroups(policies, maxDynamicServices, ingestRateCoefficient) + + b.RunParallel(func(pb *testing.PB) { + // Transaction identifiers are different for each goroutine, simulating + // multiple agentss. This should demonstrate low contention. + // + // Duration is non-zero to ensure transactions have a non-zero chance of + // being sampled. + tx := model.APMEvent{ + Processor: model.TransactionProcessor, + Event: model.Event{Duration: time.Second}, + Transaction: &model.Transaction{ + Name: uuid.Must(uuid.NewV4()).String(), + }, + } + for pb.Next() { + groups.sampleTrace(&tx) + tx.Event.Duration += time.Second + } + }) +} diff --git a/x-pack/apm-server/sampling/processor.go b/x-pack/apm-server/sampling/processor.go new file mode 100644 index 00000000000..7bd3731eb67 --- /dev/null +++ b/x-pack/apm-server/sampling/processor.go @@ -0,0 +1,534 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package sampling + +import ( + "context" + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "sync" + "sync/atomic" + "time" + + "github.com/dgraph-io/badger/v2" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" + + logs "github.com/elastic/apm-server/log" + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/x-pack/apm-server/sampling/eventstorage" + "github.com/elastic/apm-server/x-pack/apm-server/sampling/pubsub" + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/monitoring" +) + +const ( + badgerValueLogFileSize = 128 * 1024 * 1024 + + // subscriberPositionFile holds the file name used for persisting + // the subscriber position across server restarts. + subscriberPositionFile = "subscriber_position.json" + + // tooManyGroupsLoggerRateLimit is the maximum frequency at which + // "too many groups" log messages are logged. + tooManyGroupsLoggerRateLimit = time.Minute +) + +// ErrStopped is returned when calling ProcessBatch on a stopped Processor. +var ErrStopped = errors.New("processor is stopped") + +// Processor is a tail-sampling event processor. +type Processor struct { + config Config + logger *logp.Logger + tooManyGroupsLogger *logp.Logger + groups *traceGroups + + storageMu sync.RWMutex + db *badger.DB + storage *eventstorage.ShardedReadWriter + eventMetrics *eventMetrics // heap-allocated for 64-bit alignment + + stopMu sync.Mutex + stopping chan struct{} + stopped chan struct{} +} + +type eventMetrics struct { + processed int64 + dropped int64 + stored int64 +} + +// NewProcessor returns a new Processor, for tail-sampling trace events. +func NewProcessor(config Config) (*Processor, error) { + if err := config.Validate(); err != nil { + return nil, errors.Wrap(err, "invalid tail-sampling config") + } + + logger := logp.NewLogger(logs.Sampling) + badgerOpts := badger.DefaultOptions(config.StorageDir) + badgerOpts.ValueLogFileSize = config.ValueLogFileSize + if badgerOpts.ValueLogFileSize == 0 { + badgerOpts.ValueLogFileSize = badgerValueLogFileSize + } + badgerOpts.Logger = eventstorage.LogpAdaptor{Logger: logger} + db, err := badger.Open(badgerOpts) + if err != nil { + return nil, err + } + + eventCodec := eventstorage.JSONCodec{} + storage := eventstorage.New(db, eventCodec, config.TTL) + readWriter := storage.NewShardedReadWriter() + + p := &Processor{ + config: config, + logger: logger, + tooManyGroupsLogger: logger.WithOptions(logs.WithRateLimit(tooManyGroupsLoggerRateLimit)), + groups: newTraceGroups(config.Policies, config.MaxDynamicServices, config.IngestRateDecayFactor), + db: db, + storage: readWriter, + eventMetrics: &eventMetrics{}, + stopping: make(chan struct{}), + stopped: make(chan struct{}), + } + return p, nil +} + +// CollectMonitoring may be called to collect monitoring metrics related to +// tail-sampling. It is intended to be used with libbeat/monitoring.NewFunc. +// +// The metrics should be added to the "apm-server.sampling.tail" registry. +func (p *Processor) CollectMonitoring(_ monitoring.Mode, V monitoring.Visitor) { + V.OnRegistryStart() + defer V.OnRegistryFinished() + + // TODO(axw) it might be nice to also report some metrics about: + // + // - The time between receiving events and when they are indexed. + // This could be accomplished by recording the time when the + // payload was received in the ECS field `event.created`. The + // final metric would ideally be a distribution, which is not + // currently an option in libbeat/monitoring. + + p.groups.mu.RLock() + numDynamicGroups := p.groups.numDynamicServiceGroups + p.groups.mu.RUnlock() + monitoring.ReportInt(V, "dynamic_service_groups", int64(numDynamicGroups)) + + monitoring.ReportNamespace(V, "storage", func() { + p.storageMu.RLock() + defer p.storageMu.RUnlock() + lsmSize, valueLogSize := p.db.Size() + monitoring.ReportInt(V, "lsm_size", int64(lsmSize)) + monitoring.ReportInt(V, "value_log_size", int64(valueLogSize)) + }) + monitoring.ReportNamespace(V, "events", func() { + monitoring.ReportInt(V, "processed", atomic.LoadInt64(&p.eventMetrics.processed)) + monitoring.ReportInt(V, "dropped", atomic.LoadInt64(&p.eventMetrics.dropped)) + monitoring.ReportInt(V, "stored", atomic.LoadInt64(&p.eventMetrics.stored)) + }) +} + +// ProcessBatch tail-samples transactions and spans. +// +// Any events remaining in the batch after the processor returns +// will be published immediately. This includes: +// +// - Non-trace events (errors, metricsets) +// - Trace events which are already known to have been tail-sampled +// - Transactions which are head-based unsampled +// +// All other trace events will either be dropped (e.g. known to not +// be tail-sampled), or stored for possible later publication. +func (p *Processor) ProcessBatch(ctx context.Context, batch *model.Batch) error { + p.storageMu.RLock() + defer p.storageMu.RUnlock() + if p.storage == nil { + return ErrStopped + } + events := *batch + for i := 0; i < len(events); i++ { + event := &events[i] + var report, stored bool + switch event.Processor { + case model.TransactionProcessor: + var err error + atomic.AddInt64(&p.eventMetrics.processed, 1) + report, stored, err = p.processTransaction(event) + if err != nil { + return err + } + case model.SpanProcessor: + var err error + atomic.AddInt64(&p.eventMetrics.processed, 1) + report, stored, err = p.processSpan(event) + if err != nil { + return err + } + } + if !report { + // We shouldn't report this event, so remove it from the slice. + n := len(events) + events[i], events[n-1] = events[n-1], events[i] + events = events[:n-1] + i-- + } + p.updateProcessorMetrics(report, stored) + } + *batch = events + return nil +} + +func (p *Processor) updateProcessorMetrics(report, stored bool) { + if stored { + atomic.AddInt64(&p.eventMetrics.stored, 1) + } else if !report { + // We only increment the "dropped" counter if + // we neither reported nor stored the event, so + // we can track how many events are definitely + // dropped and indexing isn't just deferred until + // later. + // + // The counter does not include events that are + // implicitly dropped, i.e. stored and never + // indexed. + atomic.AddInt64(&p.eventMetrics.dropped, 1) + } +} + +func (p *Processor) processTransaction(event *model.APMEvent) (report, stored bool, _ error) { + if !event.Transaction.Sampled { + // (Head-based) unsampled transactions are passed through + // by the tail sampler. + return true, false, nil + } + + traceSampled, err := p.storage.IsTraceSampled(event.Trace.ID) + switch err { + case nil: + // Tail-sampling decision has been made: report the transaction + // if it was sampled. + report := traceSampled + return report, false, nil + case eventstorage.ErrNotFound: + // Tail-sampling decision has not yet been made. + break + default: + return false, false, err + } + + if event.Parent.ID != "" { + // Non-root transaction: write to local storage while we wait + // for a sampling decision. + return false, true, p.storage.WriteTraceEvent( + event.Trace.ID, event.Transaction.ID, event, + ) + } + + // Root transaction: apply reservoir sampling. + reservoirSampled, err := p.groups.sampleTrace(event) + if err == errTooManyTraceGroups { + // Too many trace groups, drop the transaction. + p.tooManyGroupsLogger.Warn(` +Tail-sampling service group limit reached, discarding trace events. +This is caused by having many unique service names while relying on +sampling policies without service name specified. +`[1:]) + return false, false, nil + } else if err != nil { + return false, false, err + } + + if !reservoirSampled { + // Write the non-sampling decision to storage to avoid further + // writes for the trace ID, and then drop the transaction. + // + // This is a local optimisation only. To avoid creating network + // traffic and load on Elasticsearch for uninteresting root + // transactions, we do not propagate this to other APM Servers. + return false, false, p.storage.WriteTraceSampled(event.Trace.ID, false) + } + + // The root transaction was admitted to the sampling reservoir, so we + // can proceed to write the transaction to storage; we may index it later, + // after finalising the sampling decision. + return false, true, p.storage.WriteTraceEvent( + event.Trace.ID, event.Transaction.ID, event, + ) +} + +func (p *Processor) processSpan(event *model.APMEvent) (report, stored bool, _ error) { + traceSampled, err := p.storage.IsTraceSampled(event.Trace.ID) + if err != nil { + if err == eventstorage.ErrNotFound { + // Tail-sampling decision has not yet been made, write event to local storage. + return false, true, p.storage.WriteTraceEvent( + event.Trace.ID, event.Span.ID, event, + ) + } + return false, false, err + } + // Tail-sampling decision has been made, report or drop the event. + if !traceSampled { + return false, false, nil + } + return true, false, nil +} + +// Stop stops the processor, flushing and closing the event storage. +func (p *Processor) Stop(ctx context.Context) error { + p.stopMu.Lock() + if p.storage == nil { + // Already fully stopped. + p.stopMu.Unlock() + return nil + } + select { + case <-p.stopping: + // already stopping + default: + close(p.stopping) + } + p.stopMu.Unlock() + + // Wait for Run to return. + select { + case <-ctx.Done(): + return ctx.Err() + case <-p.stopped: + } + + // Lock storage before stopping, to prevent closing storage while + // ProcessBatch is using it. + p.storageMu.Lock() + defer p.storageMu.Unlock() + + if err := p.storage.Flush(); err != nil { + return err + } + p.storage.Close() + if err := p.db.Close(); err != nil { + return err + } + p.storage = nil + return nil +} + +// Run runs the tail-sampling processor. This method is responsible for: +// +// - periodically making, and then publishing, local sampling decisions +// - subscribing to remote sampling decisions +// - reacting to both local and remote sampling decisions by reading +// related events from local storage, and then reporting them +// +// Run returns when a fatal error occurs or the Stop method is invoked. +func (p *Processor) Run() error { + p.storageMu.RLock() + defer p.storageMu.RUnlock() + defer func() { + p.stopMu.Lock() + defer p.stopMu.Unlock() + select { + case <-p.stopped: + default: + close(p.stopped) + } + }() + + // NOTE(axw) the user can configure the tail-sampling flush interval, + // but cannot directly control the bulk indexing flush interval. The + // bulk indexing is expected to complete soon after the tail-sampling + // flush interval. + bulkIndexerFlushInterval := 5 * time.Second + if bulkIndexerFlushInterval > p.config.FlushInterval { + bulkIndexerFlushInterval = p.config.FlushInterval + } + + initialSubscriberPosition, err := readSubscriberPosition(p.config.StorageDir) + if err != nil { + return err + } + subscriberPositions := make(chan pubsub.SubscriberPosition) + pubsub, err := pubsub.New(pubsub.Config{ + BeatID: p.config.BeatID, + Client: p.config.Elasticsearch, + DataStream: pubsub.DataStreamConfig(p.config.SampledTracesDataStream), + Logger: p.logger, + + // Issue pubsub subscriber search requests at twice the frequency + // of publishing, so each server observes each other's sampled + // trace IDs soon after they are published. + SearchInterval: p.config.FlushInterval / 2, + FlushInterval: bulkIndexerFlushInterval, + }) + if err != nil { + return err + } + + remoteSampledTraceIDs := make(chan string) + localSampledTraceIDs := make(chan string) + publishSampledTraceIDs := make(chan string) + g, ctx := errgroup.WithContext(context.Background()) + g.Go(func() error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-p.stopping: + return context.Canceled + } + }) + g.Go(func() error { + // This goroutine is responsible for periodically garbage + // collecting the Badger value log, using the recommended + // discard ratio of 0.5. + ticker := time.NewTicker(p.config.StorageGCInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + const discardRatio = 0.5 + if err := p.db.RunValueLogGC(discardRatio); err != nil && err != badger.ErrNoRewrite { + return err + } + } + } + }) + g.Go(func() error { + defer close(subscriberPositions) + return pubsub.SubscribeSampledTraceIDs(ctx, initialSubscriberPosition, remoteSampledTraceIDs, subscriberPositions) + }) + g.Go(func() error { + return pubsub.PublishSampledTraceIDs(ctx, publishSampledTraceIDs) + }) + g.Go(func() error { + ticker := time.NewTicker(p.config.FlushInterval) + defer ticker.Stop() + var traceIDs []string + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + p.logger.Debug("finalizing local sampling reservoirs") + traceIDs = p.groups.finalizeSampledTraces(traceIDs) + if len(traceIDs) == 0 { + continue + } + var g errgroup.Group + g.Go(func() error { return sendTraceIDs(ctx, publishSampledTraceIDs, traceIDs) }) + g.Go(func() error { return sendTraceIDs(ctx, localSampledTraceIDs, traceIDs) }) + if err := g.Wait(); err != nil { + return err + } + traceIDs = traceIDs[:0] + } + } + }) + g.Go(func() error { + // TODO(axw) pace the publishing over the flush interval? + // Alternatively we can rely on backpressure from the reporter, + // removing the artificial one second timeout from publisher code + // and just waiting as long as it takes here. + for { + var remoteDecision bool + var traceID string + select { + case <-ctx.Done(): + return ctx.Err() + case traceID = <-remoteSampledTraceIDs: + p.logger.Debug("received remotely sampled trace ID") + remoteDecision = true + case traceID = <-localSampledTraceIDs: + } + if err := p.storage.WriteTraceSampled(traceID, true); err != nil { + return err + } + var events model.Batch + if err := p.storage.ReadTraceEvents(traceID, &events); err != nil { + return err + } + if n := len(events); n > 0 { + p.logger.Debugf("reporting %d events", n) + if remoteDecision { + // Remote decisions may be received multiple times, + // e.g. if this server restarts and resubscribes to + // remote sampling decisions before they have been + // deleted. We delete events from local storage so + // we don't publish duplicates; delivery is therefore + // at-most-once, not guaranteed. + for _, event := range events { + switch event.Processor { + case model.TransactionProcessor: + if err := p.storage.DeleteTraceEvent(event.Trace.ID, event.Transaction.ID); err != nil { + return errors.Wrap(err, "failed to delete transaction from local storage") + } + case model.SpanProcessor: + if err := p.storage.DeleteTraceEvent(event.Trace.ID, event.Span.ID); err != nil { + return errors.Wrap(err, "failed to delete span from local storage") + } + } + } + } + if err := p.config.BatchProcessor.ProcessBatch(ctx, &events); err != nil { + p.logger.With(logp.Error(err)).Warn("failed to report events") + } + } + } + }) + g.Go(func() error { + // Write subscriber position to a file on disk, to support resuming + // on apm-server restart without reprocessing all indices. + for { + select { + case <-ctx.Done(): + return ctx.Err() + case pos := <-subscriberPositions: + if err := writeSubscriberPosition(p.config.StorageDir, pos); err != nil { + return err + } + } + } + }) + if err := g.Wait(); err != nil && err != context.Canceled { + return err + } + return nil +} + +func readSubscriberPosition(storageDir string) (pubsub.SubscriberPosition, error) { + var pos pubsub.SubscriberPosition + data, err := ioutil.ReadFile(filepath.Join(storageDir, subscriberPositionFile)) + if errors.Is(err, os.ErrNotExist) { + return pos, nil + } else if err != nil { + return pos, err + } + return pos, json.Unmarshal(data, &pos) +} + +func writeSubscriberPosition(storageDir string, pos pubsub.SubscriberPosition) error { + data, err := json.Marshal(pos) + if err != nil { + return err + } + return ioutil.WriteFile(filepath.Join(storageDir, subscriberPositionFile), data, 0644) +} + +func sendTraceIDs(ctx context.Context, out chan<- string, traceIDs []string) error { + for _, traceID := range traceIDs { + select { + case <-ctx.Done(): + return ctx.Err() + case out <- traceID: + } + } + return nil +} diff --git a/x-pack/apm-server/sampling/processor_bench_test.go b/x-pack/apm-server/sampling/processor_bench_test.go new file mode 100644 index 00000000000..5ddca5fc3a8 --- /dev/null +++ b/x-pack/apm-server/sampling/processor_bench_test.go @@ -0,0 +1,61 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package sampling_test + +import ( + "context" + cryptorand "crypto/rand" + "encoding/binary" + "encoding/hex" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/x-pack/apm-server/sampling" +) + +func BenchmarkProcess(b *testing.B) { + processor, err := sampling.NewProcessor(newTempdirConfig(b)) + require.NoError(b, err) + go processor.Run() + defer processor.Stop(context.Background()) + + b.RunParallel(func(pb *testing.PB) { + var seed int64 + err := binary.Read(cryptorand.Reader, binary.LittleEndian, &seed) + assert.NoError(b, err) + rng := rand.New(rand.NewSource(seed)) + + var traceID [16]byte + for pb.Next() { + binary.LittleEndian.PutUint64(traceID[:8], rng.Uint64()) + binary.LittleEndian.PutUint64(traceID[8:], rng.Uint64()) + transactionID := traceID[:8] + spanID := traceID[8:] + trace := model.Trace{ID: hex.EncodeToString(traceID[:])} + transaction := &model.Transaction{ + ID: hex.EncodeToString(transactionID), + } + spanParent := model.Parent{ + ID: hex.EncodeToString(transactionID), + } + span := &model.Span{ + ID: hex.EncodeToString(spanID), + } + batch := model.Batch{ + {Trace: trace, Transaction: transaction}, + {Trace: trace, Span: span, Parent: spanParent}, + {Trace: trace, Span: span, Parent: spanParent}, + {Trace: trace, Span: span, Parent: spanParent}, + } + if err := processor.ProcessBatch(context.Background(), &batch); err != nil { + b.Fatal(err) + } + } + }) +} diff --git a/x-pack/apm-server/sampling/processor_test.go b/x-pack/apm-server/sampling/processor_test.go new file mode 100644 index 00000000000..648bfc2b1b4 --- /dev/null +++ b/x-pack/apm-server/sampling/processor_test.go @@ -0,0 +1,772 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package sampling_test + +import ( + "context" + "fmt" + "io/ioutil" + "math/rand" + "os" + "path" + "path/filepath" + "sort" + "strings" + "testing" + "time" + + "github.com/dgraph-io/badger/v2" + "github.com/gofrs/uuid" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/apm-server/model" + "github.com/elastic/apm-server/x-pack/apm-server/sampling" + "github.com/elastic/apm-server/x-pack/apm-server/sampling/eventstorage" + "github.com/elastic/apm-server/x-pack/apm-server/sampling/pubsub/pubsubtest" + "github.com/elastic/beats/v7/libbeat/monitoring" +) + +func TestProcessUnsampled(t *testing.T) { + processor, err := sampling.NewProcessor(newTempdirConfig(t)) + require.NoError(t, err) + go processor.Run() + defer processor.Stop(context.Background()) + + in := model.Batch{{ + Processor: model.TransactionProcessor, + Trace: model.Trace{ + ID: "0102030405060708090a0b0c0d0e0f10", + }, + Transaction: &model.Transaction{ + ID: "0102030405060708", + Sampled: false, + }, + }} + out := in[:] + err = processor.ProcessBatch(context.Background(), &out) + require.NoError(t, err) + + // Unsampled transaction should be reported immediately. + assert.Equal(t, in, out) +} + +func TestProcessAlreadyTailSampled(t *testing.T) { + config := newTempdirConfig(t) + + // Seed event storage with a tail-sampling decisions, to show that + // subsequent events in the trace will be reported immediately. + trace1 := model.Trace{ID: "0102030405060708090a0b0c0d0e0f10"} + trace2 := model.Trace{ID: "0102030405060708090a0b0c0d0e0f11"} + withBadger(t, config.StorageDir, func(db *badger.DB) { + storage := eventstorage.New(db, eventstorage.JSONCodec{}, time.Minute) + writer := storage.NewReadWriter() + defer writer.Close() + assert.NoError(t, writer.WriteTraceSampled(trace1.ID, true)) + assert.NoError(t, writer.Flush()) + + storage = eventstorage.New(db, eventstorage.JSONCodec{}, -1) // expire immediately + writer = storage.NewReadWriter() + defer writer.Close() + assert.NoError(t, writer.WriteTraceSampled(trace2.ID, true)) + assert.NoError(t, writer.Flush()) + }) + + processor, err := sampling.NewProcessor(config) + require.NoError(t, err) + go processor.Run() + defer processor.Stop(context.Background()) + + transaction1 := model.APMEvent{ + Processor: model.TransactionProcessor, + Trace: trace1, + Transaction: &model.Transaction{ + ID: "0102030405060708", + Sampled: true, + }, + } + span1 := model.APMEvent{ + Processor: model.SpanProcessor, + Trace: trace1, + Span: &model.Span{ + ID: "0102030405060709", + }, + } + transaction2 := model.APMEvent{ + Processor: model.TransactionProcessor, + Trace: trace2, + Transaction: &model.Transaction{ + ID: "0102030405060710", + Sampled: true, + }, + } + span2 := model.APMEvent{ + Processor: model.SpanProcessor, + Trace: trace2, + Span: &model.Span{ + ID: "0102030405060711", + }, + } + + batch := model.Batch{transaction1, transaction2, span1, span2} + err = processor.ProcessBatch(context.Background(), &batch) + require.NoError(t, err) + + // Tail sampling decision already made. The first transaction and span should be + // reported immediately, whereas the second ones should be written storage since + // they were received after the trace sampling entry expired. + assert.Equal(t, model.Batch{transaction1, span1}, batch) + + expectedMonitoring := monitoring.MakeFlatSnapshot() + expectedMonitoring.Ints["sampling.events.processed"] = 4 + expectedMonitoring.Ints["sampling.events.stored"] = 2 + expectedMonitoring.Ints["sampling.events.dropped"] = 0 + assertMonitoring(t, processor, expectedMonitoring, `sampling.events.*`) + + // Stop the processor so we can access the database. + assert.NoError(t, processor.Stop(context.Background())) + withBadger(t, config.StorageDir, func(db *badger.DB) { + storage := eventstorage.New(db, eventstorage.JSONCodec{}, time.Minute) + reader := storage.NewReadWriter() + defer reader.Close() + + var batch model.Batch + err := reader.ReadTraceEvents(trace1.ID, &batch) + assert.NoError(t, err) + assert.Zero(t, batch) + + err = reader.ReadTraceEvents(trace2.ID, &batch) + assert.NoError(t, err) + assert.Equal(t, model.Batch{transaction2, span2}, batch) + }) +} + +func TestProcessLocalTailSampling(t *testing.T) { + config := newTempdirConfig(t) + config.Policies = []sampling.Policy{{SampleRate: 0.5}} + config.FlushInterval = 10 * time.Millisecond + published := make(chan string) + config.Elasticsearch = pubsubtest.Client(pubsubtest.PublisherChan(published), nil) + + processor, err := sampling.NewProcessor(config) + require.NoError(t, err) + + trace1 := model.Trace{ID: "0102030405060708090a0b0c0d0e0f10"} + trace2 := model.Trace{ID: "0102030405060708090a0b0c0d0e0f11"} + trace1Events := model.Batch{{ + Processor: model.TransactionProcessor, + Trace: trace1, + Event: model.Event{Duration: 123 * time.Millisecond}, + Transaction: &model.Transaction{ + ID: "0102030405060708", + Sampled: true, + }, + }, { + Processor: model.SpanProcessor, + Trace: trace1, + Event: model.Event{Duration: 123 * time.Millisecond}, + Span: &model.Span{ + ID: "0102030405060709", + }, + }} + trace2Events := model.Batch{{ + Processor: model.TransactionProcessor, + Trace: trace2, + Event: model.Event{Duration: 456 * time.Millisecond}, + Transaction: &model.Transaction{ + ID: "0102030405060710", + Sampled: true, + }, + }, { + Processor: model.SpanProcessor, + Trace: trace2, + Event: model.Event{Duration: 456 * time.Millisecond}, + Span: &model.Span{ + ID: "0102030405060711", + }, + }} + + in := append(trace1Events[:], trace2Events...) + err = processor.ProcessBatch(context.Background(), &in) + require.NoError(t, err) + assert.Empty(t, in) + + // Start periodic tail-sampling. We start the processor after processing + // events to ensure all events are processed before any local sampling + // decisions are made, such that we have a single tail-sampling decision + // to check. + go processor.Run() + defer processor.Stop(context.Background()) + + // We have configured 50% tail-sampling, so we expect a single trace ID + // to be published. Sampling is non-deterministic (weighted random), so + // we can't anticipate a specific trace ID. + + var sampledTraceID string + select { + case sampledTraceID = <-published: + case <-time.After(10 * time.Second): + t.Fatal("timed out waiting for publication") + } + select { + case <-published: + t.Fatal("unexpected publication") + case <-time.After(50 * time.Millisecond): + } + + unsampledTraceID := trace2.ID + sampledTraceEvents := trace1Events + unsampledTraceEvents := trace2Events + if sampledTraceID == trace2.ID { + unsampledTraceID = trace1.ID + unsampledTraceEvents = trace1Events + sampledTraceEvents = trace2Events + } + + expectedMonitoring := monitoring.MakeFlatSnapshot() + expectedMonitoring.Ints["sampling.events.processed"] = 4 + expectedMonitoring.Ints["sampling.events.stored"] = 4 + expectedMonitoring.Ints["sampling.events.dropped"] = 0 + assertMonitoring(t, processor, expectedMonitoring, `sampling.events.*`) + + // Stop the processor so we can access the database. + assert.NoError(t, processor.Stop(context.Background())) + withBadger(t, config.StorageDir, func(db *badger.DB) { + storage := eventstorage.New(db, eventstorage.JSONCodec{}, time.Minute) + reader := storage.NewReadWriter() + defer reader.Close() + + sampled, err := reader.IsTraceSampled(sampledTraceID) + assert.NoError(t, err) + assert.True(t, sampled) + + sampled, err = reader.IsTraceSampled(unsampledTraceID) + assert.Equal(t, eventstorage.ErrNotFound, err) + assert.False(t, sampled) + + var batch model.Batch + err = reader.ReadTraceEvents(sampledTraceID, &batch) + assert.NoError(t, err) + assert.Equal(t, sampledTraceEvents, batch) + + // Even though the trace is unsampled, the events will be + // available in storage until the TTL expires, as they're + // written there first. + batch = batch[:0] + err = reader.ReadTraceEvents(unsampledTraceID, &batch) + assert.NoError(t, err) + assert.Equal(t, unsampledTraceEvents, batch) + }) +} + +func TestProcessLocalTailSamplingUnsampled(t *testing.T) { + config := newTempdirConfig(t) + config.FlushInterval = time.Minute + processor, err := sampling.NewProcessor(config) + require.NoError(t, err) + go processor.Run() + defer processor.Stop(context.Background()) + + // Process root transactions until one is rejected. + traceIDs := make([]string, 10000) + for i := range traceIDs { + traceID := uuid.Must(uuid.NewV4()).String() + traceIDs[i] = traceID + batch := model.Batch{{ + Processor: model.TransactionProcessor, + Trace: model.Trace{ID: traceID}, + Event: model.Event{Duration: time.Millisecond}, + Transaction: &model.Transaction{ + ID: traceID, + Sampled: true, + }, + }} + err := processor.ProcessBatch(context.Background(), &batch) + require.NoError(t, err) + assert.Empty(t, batch) + } + + // Stop the processor so we can access the database. + assert.NoError(t, processor.Stop(context.Background())) + withBadger(t, config.StorageDir, func(db *badger.DB) { + storage := eventstorage.New(db, eventstorage.JSONCodec{}, time.Minute) + reader := storage.NewReadWriter() + defer reader.Close() + + var anyUnsampled bool + for _, traceID := range traceIDs { + sampled, err := reader.IsTraceSampled(traceID) + if err == eventstorage.ErrNotFound { + // No sampling decision made yet. + } else { + assert.NoError(t, err) + assert.False(t, sampled) + anyUnsampled = true + break + } + } + assert.True(t, anyUnsampled) + }) +} + +func TestProcessLocalTailSamplingPolicyOrder(t *testing.T) { + config := newTempdirConfig(t) + config.Policies = []sampling.Policy{{ + PolicyCriteria: sampling.PolicyCriteria{TraceName: "trace_name"}, + SampleRate: 0.5, + }, { + PolicyCriteria: sampling.PolicyCriteria{ServiceName: "service_name"}, + SampleRate: 0.1, + }, { + PolicyCriteria: sampling.PolicyCriteria{}, + SampleRate: 0, + }} + config.FlushInterval = 10 * time.Millisecond + published := make(chan string) + config.Elasticsearch = pubsubtest.Client(pubsubtest.PublisherChan(published), nil) + + processor, err := sampling.NewProcessor(config) + require.NoError(t, err) + + // Send transactions which would match either policy defined above. + rng := rand.New(rand.NewSource(0)) + service := model.Service{Name: "service_name"} + numTransactions := 100 + events := make(model.Batch, numTransactions) + for i := range events { + var traceIDBytes [16]byte + _, err := rng.Read(traceIDBytes[:]) + require.NoError(t, err) + events[i] = model.APMEvent{ + Service: service, + Processor: model.TransactionProcessor, + Trace: model.Trace{ID: fmt.Sprintf("%x", traceIDBytes[:])}, + Event: model.Event{Duration: 123 * time.Millisecond}, + Transaction: &model.Transaction{ + Name: "trace_name", + ID: fmt.Sprintf("%x", traceIDBytes[8:]), + Sampled: true, + }, + } + } + + err = processor.ProcessBatch(context.Background(), &events) + require.NoError(t, err) + assert.Empty(t, events) + + // Start periodic tail-sampling. We start the processor after processing + // events to ensure all events are processed before any local sampling + // decisions are made, such that we have a single tail-sampling decision + // to check. + go processor.Run() + defer processor.Stop(context.Background()) + + // The first matching policy should win, and sample 50%. + for i := 0; i < numTransactions/2; i++ { + select { + case <-published: + case <-time.After(10 * time.Second): + t.Fatal("timed out waiting for publication") + } + } + select { + case <-published: + t.Fatal("unexpected publication") + case <-time.After(50 * time.Millisecond): + } +} + +func TestProcessRemoteTailSampling(t *testing.T) { + config := newTempdirConfig(t) + config.Policies = []sampling.Policy{{SampleRate: 0.5}} + config.FlushInterval = 10 * time.Millisecond + + var published []string + var publisher pubsubtest.PublisherFunc = func(ctx context.Context, traceID string) error { + published = append(published, traceID) + return nil + } + subscriberChan := make(chan string) + subscriber := pubsubtest.SubscriberChan(subscriberChan) + config.Elasticsearch = pubsubtest.Client(publisher, subscriber) + + reported := make(chan model.Batch) + config.BatchProcessor = model.ProcessBatchFunc(func(ctx context.Context, batch *model.Batch) error { + select { + case <-ctx.Done(): + return ctx.Err() + case reported <- *batch: + return nil + } + }) + + processor, err := sampling.NewProcessor(config) + require.NoError(t, err) + go processor.Run() + defer processor.Stop(context.Background()) + + traceID1 := "0102030405060708090a0b0c0d0e0f10" + traceID2 := "0102030405060708090a0b0c0d0e0f11" + trace1Events := model.Batch{{ + Processor: model.SpanProcessor, + Trace: model.Trace{ID: traceID1}, + Event: model.Event{Duration: 123 * time.Millisecond}, + Span: &model.Span{ + ID: "0102030405060709", + }, + }} + + in := trace1Events[:] + err = processor.ProcessBatch(context.Background(), &in) + require.NoError(t, err) + assert.Empty(t, in) + + // Simulate receiving remote sampling decisions multiple times, + // to show that we don't report duplicate events. + subscriberChan <- traceID2 + subscriberChan <- traceID1 + subscriberChan <- traceID2 + subscriberChan <- traceID1 + + var events model.Batch + select { + case events = <-reported: + case <-time.After(10 * time.Second): + t.Fatal("timed out waiting for reporting") + } + select { + case <-reported: + t.Fatal("unexpected reporting") + case <-time.After(50 * time.Millisecond): + } + + // Stop the processor so we can access the database. + assert.NoError(t, processor.Stop(context.Background())) + assert.Empty(t, published) // remote decisions don't get republished + + expectedMonitoring := monitoring.MakeFlatSnapshot() + expectedMonitoring.Ints["sampling.events.processed"] = 1 + expectedMonitoring.Ints["sampling.events.stored"] = 1 + expectedMonitoring.Ints["sampling.events.dropped"] = 0 + assertMonitoring(t, processor, expectedMonitoring, `sampling.events.*`) + + assert.Equal(t, trace1Events, events) + + withBadger(t, config.StorageDir, func(db *badger.DB) { + storage := eventstorage.New(db, eventstorage.JSONCodec{}, time.Minute) + reader := storage.NewReadWriter() + defer reader.Close() + + sampled, err := reader.IsTraceSampled(traceID1) + assert.NoError(t, err) + assert.True(t, sampled) + + sampled, err = reader.IsTraceSampled(traceID2) + assert.NoError(t, err) + assert.True(t, sampled) + + var batch model.Batch + err = reader.ReadTraceEvents(traceID1, &batch) + assert.NoError(t, err) + assert.Zero(t, batch) // events are deleted from local storage + + batch = model.Batch{} + err = reader.ReadTraceEvents(traceID2, &batch) + assert.NoError(t, err) + assert.Empty(t, batch) + }) +} + +func TestGroupsMonitoring(t *testing.T) { + config := newTempdirConfig(t) + config.MaxDynamicServices = 5 + config.FlushInterval = time.Minute + config.Policies[0].SampleRate = 0.99 + + processor, err := sampling.NewProcessor(config) + require.NoError(t, err) + go processor.Run() + defer processor.Stop(context.Background()) + + for i := 0; i < config.MaxDynamicServices+1; i++ { + err := processor.ProcessBatch(context.Background(), &model.Batch{{ + Service: model.Service{Name: fmt.Sprintf("service_%d", i)}, + Processor: model.TransactionProcessor, + Trace: model.Trace{ID: uuid.Must(uuid.NewV4()).String()}, + Event: model.Event{Duration: 123 * time.Millisecond}, + Transaction: &model.Transaction{ + ID: "0102030405060709", + Sampled: true, + }, + }}) + require.NoError(t, err) + } + + expectedMonitoring := monitoring.MakeFlatSnapshot() + expectedMonitoring.Ints["sampling.dynamic_service_groups"] = int64(config.MaxDynamicServices) + expectedMonitoring.Ints["sampling.events.processed"] = int64(config.MaxDynamicServices) + 1 + expectedMonitoring.Ints["sampling.events.stored"] = int64(config.MaxDynamicServices) + expectedMonitoring.Ints["sampling.events.dropped"] = 1 // final event dropped, after service limit reached + assertMonitoring(t, processor, expectedMonitoring, `sampling.events.*`, `sampling.dynamic_service_groups`) +} + +func TestStorageMonitoring(t *testing.T) { + config := newTempdirConfig(t) + + processor, err := sampling.NewProcessor(config) + require.NoError(t, err) + go processor.Run() + defer processor.Stop(context.Background()) + for i := 0; i < 100; i++ { + traceID := uuid.Must(uuid.NewV4()).String() + batch := model.Batch{{ + Processor: model.TransactionProcessor, + Trace: model.Trace{ID: traceID}, + Event: model.Event{Duration: 123 * time.Millisecond}, + Transaction: &model.Transaction{ + ID: traceID, + Sampled: true, + }, + }} + err := processor.ProcessBatch(context.Background(), &batch) + require.NoError(t, err) + assert.Empty(t, batch) + } + + // Stop the processor and create a new one, which will reopen storage + // and calculate the storage size. Otherwise we must wait for a minute + // (hard-coded in badger) for storage metrics to be updated. + processor.Stop(context.Background()) + processor, err = sampling.NewProcessor(config) + require.NoError(t, err) + + metrics := collectProcessorMetrics(processor) + assert.NotZero(t, metrics.Ints, "sampling.storage.lsm_size") + assert.NotZero(t, metrics.Ints, "sampling.storage.value_log_size") +} + +func TestStorageGC(t *testing.T) { + if testing.Short() { + t.Skip("skipping slow test") + } + + config := newTempdirConfig(t) + config.TTL = 10 * time.Millisecond + config.FlushInterval = 10 * time.Millisecond + config.ValueLogFileSize = 1024 * 1024 + + writeBatch := func(n int) { + config.StorageGCInterval = time.Minute // effectively disable + processor, err := sampling.NewProcessor(config) + require.NoError(t, err) + go processor.Run() + defer processor.Stop(context.Background()) + for i := 0; i < n; i++ { + traceID := uuid.Must(uuid.NewV4()).String() + batch := model.Batch{{ + Processor: model.SpanProcessor, + Trace: model.Trace{ID: traceID}, + Event: model.Event{Duration: 123 * time.Millisecond}, + Span: &model.Span{ + ID: traceID, + }, + }} + err := processor.ProcessBatch(context.Background(), &batch) + require.NoError(t, err) + assert.Empty(t, batch) + } + } + + vlogFilenames := func() []string { + dir, _ := os.Open(config.StorageDir) + names, _ := dir.Readdirnames(-1) + defer dir.Close() + + var vlogs []string + for _, name := range names { + if strings.HasSuffix(name, ".vlog") { + vlogs = append(vlogs, name) + } + } + sort.Strings(vlogs) + return vlogs + } + + // Process spans until more than one value log file has been created, + // but the first one does not exist (has been garbage collected). + for len(vlogFilenames()) < 2 { + writeBatch(50000) + } + + config.StorageGCInterval = 10 * time.Millisecond + processor, err := sampling.NewProcessor(config) + require.NoError(t, err) + go processor.Run() + defer processor.Stop(context.Background()) + + deadline := time.Now().Add(10 * time.Second) + for time.Now().Before(deadline) { + vlogs := vlogFilenames() + if len(vlogs) == 0 || vlogs[0] != "000000.vlog" { + // garbage collected + return + } + time.Sleep(10 * time.Millisecond) + } + t.Fatal("timed out waiting for value log garbage collection") +} + +func TestProcessRemoteTailSamplingPersistence(t *testing.T) { + config := newTempdirConfig(t) + config.Policies = []sampling.Policy{{SampleRate: 0.5}} + config.FlushInterval = 10 * time.Millisecond + + subscriberChan := make(chan string) + subscriber := pubsubtest.SubscriberChan(subscriberChan) + config.Elasticsearch = pubsubtest.Client(nil, subscriber) + + processor, err := sampling.NewProcessor(config) + require.NoError(t, err) + go processor.Run() + defer processor.Stop(context.Background()) + + // Wait for subscriber_position.json to be written to the storage directory. + subscriberPositionFile := filepath.Join(config.StorageDir, "subscriber_position.json") + data, info := waitFileModified(t, subscriberPositionFile, time.Time{}) + assert.Equal(t, "{}", string(data)) + + subscriberChan <- "0102030405060708090a0b0c0d0e0f10" + data, _ = waitFileModified(t, subscriberPositionFile, info.ModTime()) + assert.Equal(t, `{"index_name":1}`, string(data)) +} + +func withBadger(tb testing.TB, storageDir string, f func(db *badger.DB)) { + badgerOpts := badger.DefaultOptions(storageDir) + badgerOpts.Logger = nil + db, err := badger.Open(badgerOpts) + require.NoError(tb, err) + f(db) + assert.NoError(tb, db.Close()) +} + +func newTempdirConfig(tb testing.TB) sampling.Config { + tempdir, err := ioutil.TempDir("", "samplingtest") + require.NoError(tb, err) + tb.Cleanup(func() { os.RemoveAll(tempdir) }) + return sampling.Config{ + BeatID: "local-apm-server", + BatchProcessor: model.ProcessBatchFunc(func(context.Context, *model.Batch) error { return nil }), + LocalSamplingConfig: sampling.LocalSamplingConfig{ + FlushInterval: time.Second, + MaxDynamicServices: 1000, + IngestRateDecayFactor: 0.9, + Policies: []sampling.Policy{ + {SampleRate: 0.1}, + }, + }, + RemoteSamplingConfig: sampling.RemoteSamplingConfig{ + Elasticsearch: pubsubtest.Client(nil, nil), + SampledTracesDataStream: sampling.DataStreamConfig{ + Type: "traces", + Dataset: "sampled", + Namespace: "testing", + }, + }, + StorageConfig: sampling.StorageConfig{ + StorageDir: tempdir, + StorageGCInterval: time.Second, + TTL: 30 * time.Minute, + }, + } +} + +func assertMonitoring(t testing.TB, p *sampling.Processor, expected monitoring.FlatSnapshot, matches ...string) { + t.Helper() + actual := collectProcessorMetrics(p) + matchAny := func(k string) bool { return true } + if len(matches) > 0 { + matchAny = func(k string) bool { + for _, pattern := range matches { + matched, err := path.Match(pattern, k) + if err != nil { + panic(err) + } + if matched { + return true + } + } + return false + } + } + for k := range actual.Bools { + if !matchAny(k) { + delete(actual.Bools, k) + } + } + for k := range actual.Ints { + if !matchAny(k) { + delete(actual.Ints, k) + } + } + for k := range actual.Floats { + if !matchAny(k) { + delete(actual.Floats, k) + } + } + for k := range actual.Strings { + if !matchAny(k) { + delete(actual.Strings, k) + } + } + for k := range actual.StringSlices { + if !matchAny(k) { + delete(actual.StringSlices, k) + } + } + assert.Equal(t, expected, actual) +} + +func collectProcessorMetrics(p *sampling.Processor) monitoring.FlatSnapshot { + registry := monitoring.NewRegistry() + monitoring.NewFunc(registry, "sampling", p.CollectMonitoring) + return monitoring.CollectFlatSnapshot( + registry, + monitoring.Full, + false, // expvar + ) +} + +// waitFileModified waits up to 10 seconds for filename to exist and for its +// modification time to be greater than "after", and returns the file content +// and file info (including modification time). +func waitFileModified(tb testing.TB, filename string, after time.Time) ([]byte, os.FileInfo) { + // Wait for subscriber_position.json to be written to the storage directory. + timeout := time.NewTimer(10 * time.Second) + defer timeout.Stop() + ticker := time.NewTicker(50 * time.Millisecond) + defer ticker.Stop() + for { + + select { + case <-ticker.C: + info, err := os.Stat(filename) + if errors.Is(err, os.ErrNotExist) { + continue + } else if err != nil { + tb.Fatal(err) + } + if info.ModTime().After(after) { + data, err := ioutil.ReadFile(filename) + if err != nil { + tb.Fatal(err) + } + return data, info + } + case <-timeout.C: + tb.Fatalf("timed out waiting for %q to be modified", filename) + } + } +} diff --git a/x-pack/apm-server/sampling/pubsub/checkpoints.go b/x-pack/apm-server/sampling/pubsub/checkpoints.go new file mode 100644 index 00000000000..aac74af6eff --- /dev/null +++ b/x-pack/apm-server/sampling/pubsub/checkpoints.go @@ -0,0 +1,86 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package pubsub + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + + "github.com/pkg/errors" + + "github.com/elastic/go-elasticsearch/v7/esapi" + + "github.com/elastic/apm-server/elasticsearch" +) + +// getGlobalCheckpoints returns the current global checkpoint for each index +// underlying dataStream. Each index is required to have a single (primary) shard. +func getGlobalCheckpoints( + ctx context.Context, + client elasticsearch.Client, + dataStream string, +) (map[string]int64, error) { + indexGlobalCheckpoints := make(map[string]int64) + resp, err := esapi.IndicesStatsRequest{ + Index: []string{dataStream}, + Level: "shards", + // By default all metrics are returned; query just the "get" metric, + // which is very cheap. + Metric: []string{"get"}, + }.Do(ctx, client) + if err != nil { + return nil, errors.New("index stats request failed") + } + defer resp.Body.Close() + if resp.IsError() { + switch resp.StatusCode { + case http.StatusNotFound: + // Data stream does not yet exist. + return indexGlobalCheckpoints, nil + } + message, _ := ioutil.ReadAll(resp.Body) + return nil, fmt.Errorf("index stats request failed: %s", message) + } + + var stats dataStreamStats + if err := json.NewDecoder(resp.Body).Decode(&stats); err != nil { + return nil, err + } + + for index, indexStats := range stats.Indices { + if n := len(indexStats.Shards); n > 1 { + return nil, fmt.Errorf("expected 1 shard, got %d for index %q", n, index) + } + for _, shardStats := range indexStats.Shards { + for _, shardStats := range shardStats { + if shardStats.Routing.Primary { + indexGlobalCheckpoints[index] = shardStats.SeqNo.GlobalCheckpoint + break + } + } + } + } + return indexGlobalCheckpoints, nil +} + +type dataStreamStats struct { + Indices map[string]indexStats `json:"indices"` +} + +type indexStats struct { + Shards map[string][]shardStats `json:"shards"` +} + +type shardStats struct { + Routing struct { + Primary bool `json:"primary"` + } `json:"routing"` + SeqNo struct { + GlobalCheckpoint int64 `json:"global_checkpoint"` + } `json:"seq_no"` +} diff --git a/x-pack/apm-server/sampling/pubsub/config.go b/x-pack/apm-server/sampling/pubsub/config.go index e2ce4def791..b97e5abc8dd 100644 --- a/x-pack/apm-server/sampling/pubsub/config.go +++ b/x-pack/apm-server/sampling/pubsub/config.go @@ -5,22 +5,24 @@ package pubsub import ( + "fmt" "time" "github.com/pkg/errors" "github.com/elastic/beats/v7/libbeat/logp" - "github.com/elastic/go-elasticsearch/v7" + + "github.com/elastic/apm-server/elasticsearch" ) // Config holds configuration for Pubsub. type Config struct { // Client holds an Elasticsearch client, for indexing and searching for // trace ID observations. - Client *elasticsearch.Client + Client elasticsearch.Client - // Index holds the index name. - Index string + // DataStream holds the data stream. + DataStream DataStreamConfig // BeatID holds the APM Server's unique ID, used for filtering out // local observations in the subscriber. @@ -47,13 +49,25 @@ type Config struct { Logger *logp.Logger } +// DataStreamConfig holds data stream configuration for Pubsub. +type DataStreamConfig struct { + // Type holds the data stream's type. + Type string + + // Dataset holds the data stream's dataset. + Dataset string + + // Namespace holds the data stream's namespace. + Namespace string +} + // Validate validates the configuration. func (config Config) Validate() error { if config.Client == nil { return errors.New("Client unspecified") } - if config.Index == "" { - return errors.New("Index unspecified") + if err := config.DataStream.Validate(); err != nil { + return errors.Wrap(err, "DataStream unspecified or invalid") } if config.BeatID == "" { return errors.New("BeatID unspecified") @@ -66,3 +80,22 @@ func (config Config) Validate() error { } return nil } + +// Validate validates the configuration. +func (config DataStreamConfig) Validate() error { + if config.Type == "" { + return errors.New("Type unspecified") + } + if config.Dataset == "" { + return errors.New("Dataset unspecified") + } + if config.Namespace == "" { + return errors.New("Namespace unspecified") + } + return nil +} + +// String returns the data stream as a combined string. +func (config DataStreamConfig) String() string { + return fmt.Sprintf("%s-%s-%s", config.Type, config.Dataset, config.Namespace) +} diff --git a/x-pack/apm-server/sampling/pubsub/config_test.go b/x-pack/apm-server/sampling/pubsub/config_test.go index e7834a469b0..36cca74911d 100644 --- a/x-pack/apm-server/sampling/pubsub/config_test.go +++ b/x-pack/apm-server/sampling/pubsub/config_test.go @@ -11,12 +11,15 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/elastic/go-elasticsearch/v7" - + "github.com/elastic/apm-server/elasticsearch" "github.com/elastic/apm-server/x-pack/apm-server/sampling/pubsub" ) func TestConfigInvalid(t *testing.T) { + var elasticsearchClient struct { + elasticsearch.Client + } + type test struct { config pubsub.Config err string @@ -27,26 +30,55 @@ func TestConfigInvalid(t *testing.T) { err: "Client unspecified", }, { config: pubsub.Config{ - Client: &elasticsearch.Client{}, + Client: elasticsearchClient, + }, + err: "DataStream unspecified or invalid: Type unspecified", + }, { + config: pubsub.Config{ + Client: elasticsearchClient, + DataStream: pubsub.DataStreamConfig{ + Type: "type", + }, + }, + err: "DataStream unspecified or invalid: Dataset unspecified", + }, { + config: pubsub.Config{ + Client: elasticsearchClient, + DataStream: pubsub.DataStreamConfig{ + Type: "type", + Dataset: "dataset", + }, }, - err: "Index unspecified", + err: "DataStream unspecified or invalid: Namespace unspecified", }, { config: pubsub.Config{ - Client: &elasticsearch.Client{}, - Index: "index", + Client: elasticsearchClient, + DataStream: pubsub.DataStreamConfig{ + Type: "type", + Dataset: "dataset", + Namespace: "namespace", + }, }, err: "BeatID unspecified", }, { config: pubsub.Config{ - Client: &elasticsearch.Client{}, - Index: "index", + Client: elasticsearchClient, + DataStream: pubsub.DataStreamConfig{ + Type: "type", + Dataset: "dataset", + Namespace: "namespace", + }, BeatID: "beat_id", }, err: "SearchInterval unspecified or negative", }, { config: pubsub.Config{ - Client: &elasticsearch.Client{}, - Index: "index", + Client: elasticsearchClient, + DataStream: pubsub.DataStreamConfig{ + Type: "type", + Dataset: "dataset", + Namespace: "namespace", + }, BeatID: "beat_id", SearchInterval: time.Second, }, diff --git a/x-pack/apm-server/sampling/pubsub/position.go b/x-pack/apm-server/sampling/pubsub/position.go new file mode 100644 index 00000000000..085d9dd83c9 --- /dev/null +++ b/x-pack/apm-server/sampling/pubsub/position.go @@ -0,0 +1,35 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package pubsub + +import "encoding/json" + +// SubscriberPosition holds information for the subscriber to resume after the +// recently observed sampled trace IDs. +// +// The zero value is valid, and can be used to subscribe to all sampled trace IDs. +type SubscriberPosition struct { + // observedSeqnos maps index names to its greatest observed _seq_no. + observedSeqnos map[string]int64 +} + +// MarshalJSON marshals the subscriber position as JSON, for persistence. +func (p SubscriberPosition) MarshalJSON() ([]byte, error) { + return json.Marshal(p.observedSeqnos) +} + +// UnmarshalJSON unmarshals the subscriber position from JSON. +func (p *SubscriberPosition) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &p.observedSeqnos) +} + +func copyPosition(pos SubscriberPosition) SubscriberPosition { + observedSeqnos := make(map[string]int64, len(pos.observedSeqnos)) + for index, seqno := range pos.observedSeqnos { + observedSeqnos[index] = seqno + } + pos.observedSeqnos = observedSeqnos + return pos +} diff --git a/x-pack/apm-server/sampling/pubsub/pubsub.go b/x-pack/apm-server/sampling/pubsub/pubsub.go index 80f1f2ac2a7..dc1a80e8614 100644 --- a/x-pack/apm-server/sampling/pubsub/pubsub.go +++ b/x-pack/apm-server/sampling/pubsub/pubsub.go @@ -9,31 +9,40 @@ import ( "context" "encoding/json" "fmt" + "io" "io/ioutil" "net/http" + "sync" "time" "github.com/pkg/errors" "go.elastic.co/fastjson" + "golang.org/x/sync/errgroup" "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/go-elasticsearch/v7/esapi" "github.com/elastic/go-elasticsearch/v7/esutil" + "github.com/elastic/apm-server/elasticsearch" logs "github.com/elastic/apm-server/log" ) +// ErrClosed may be returned by Pubsub methods after the Close method is called. +var ErrClosed = errors.New("pubsub closed") + +var errIndexNotFound = errors.New("index not found") + // Pubsub provides a means of publishing and subscribing to sampled trace IDs, // using Elasticsearch for temporary storage. // // An independent process will periodically reap old documents in the index. type Pubsub struct { - config Config - indexer esutil.BulkIndexer + config Config } // New returns a new Pubsub which can publish and subscribe sampled trace IDs, -// using Elasticsearch for storage. +// using Elasticsearch for storage. The Pubsub.Close method must be called when +// it is no longer needed. // // Documents are expected to be indexed through a pipeline which sets the // `event.ingested` timestamp field. Another process will periodically reap @@ -45,160 +54,296 @@ func New(config Config) (*Pubsub, error) { if config.Logger == nil { config.Logger = logp.NewLogger(logs.Sampling) } - indexer, err := esutil.NewBulkIndexer(esutil.BulkIndexerConfig{ - Client: config.Client, - Index: config.Index, - FlushInterval: config.FlushInterval, + return &Pubsub{config: config}, nil +} + +// PublishSampledTraceIDs receives trace IDs from the traceIDs channel, +// indexing them into Elasticsearch. PublishSampledTraceIDs returns when +// ctx is canceled. +func (p *Pubsub) PublishSampledTraceIDs(ctx context.Context, traceIDs <-chan string) error { + indexer, err := p.config.Client.NewBulkIndexer(elasticsearch.BulkIndexerConfig{ + Index: p.config.DataStream.String(), + FlushInterval: p.config.FlushInterval, OnError: func(ctx context.Context, err error) { - config.Logger.With(logp.Error(err)).Debug("publishing sampled trace IDs failed") + p.config.Logger.With(logp.Error(err)).Debug("publishing sampled trace IDs failed") }, }) if err != nil { - return nil, err + return err } - return &Pubsub{config: config, indexer: indexer}, nil -} -// PublishSampledTraceIDs bulk indexes traceIDs into Elasticsearch. -func (p *Pubsub) PublishSampledTraceIDs(ctx context.Context, traceID ...string) error { - for _, id := range traceID { - var doc traceIDDocument - doc.Observer.ID = p.config.BeatID - doc.Trace.ID = id + var closeIndexerOnce sync.Once + var closeIndexerErr error + closeIndexer := func() error { + closeIndexerOnce.Do(func() { + ctx, cancel := context.WithTimeout(context.Background(), p.config.FlushInterval) + defer cancel() + closeIndexerErr = indexer.Close(ctx) + }) + return closeIndexerErr + } + defer closeIndexer() - var json fastjson.Writer - if err := doc.MarshalFastJSON(&json); err != nil { - return err - } - if err := p.indexer.Add(ctx, esutil.BulkIndexerItem{ - Action: "index", - Body: bytes.NewReader(json.Bytes()), - OnFailure: p.onBulkIndexerItemFailure, - }); err != nil { - return err + for { + select { + case <-ctx.Done(): + if err := ctx.Err(); err != context.Canceled { + return err + } + return closeIndexer() + case id := <-traceIDs: + var json fastjson.Writer + p.marshalTraceIDDocument(&json, id, time.Now(), p.config.DataStream) + if err := indexer.Add(ctx, elasticsearch.BulkIndexerItem{ + Action: "create", + Body: bytes.NewReader(json.Bytes()), + OnFailure: p.onBulkIndexerItemFailure, + }); err != nil { + return err + } } } - return nil } -func (p *Pubsub) onBulkIndexerItemFailure(ctx context.Context, item esutil.BulkIndexerItem, resp esutil.BulkIndexerResponseItem, err error) { - p.config.Logger.With(logp.Error(err)).Debug("publishing sampled trace ID failed") +func (p *Pubsub) onBulkIndexerItemFailure(ctx context.Context, item elasticsearch.BulkIndexerItem, resp elasticsearch.BulkIndexerResponseItem, err error) { + p.config.Logger.With(logp.Error(err)).Debug("publishing sampled trace ID failed", resp.Error) } -// SubscribeSampledTraceIDs subscribes to new sampled trace IDs, sending them to the -// traceIDs channel. -func (p *Pubsub) SubscribeSampledTraceIDs(ctx context.Context, traceIDs chan<- string) error { +// SubscribeSampledTraceIDs subscribes to sampled trace IDs after the given position, +// sending them to the traceIDs channel, and sending the most recently observed position +// (on change) to the positions channel. +func (p *Pubsub) SubscribeSampledTraceIDs( + ctx context.Context, + pos SubscriberPosition, + traceIDs chan<- string, + positions chan<- SubscriberPosition, +) error { ticker := time.NewTicker(p.config.SearchInterval) defer ticker.Stop() - // NOTE(axw) we should use the Changes API when it is implemented: - // https://github.com/elastic/elasticsearch/issues/1242 + // Only send positions on change. + var positionsOut chan<- SubscriberPosition + positionsOut = positions - var lastSeqNo int64 = 0 - var lastPrimaryTerm int64 = -1 + // Copy pos because it may be mutated by p.searchTraceIDs. + pos = copyPosition(pos) for { select { case <-ctx.Done(): return ctx.Err() + case positionsOut <- pos: + // Copy pos because it may be mutated by p.searchTraceIDs. + pos = copyPosition(pos) + positionsOut = nil case <-ticker.C: - } - for { - // Keep searching until there are no more new trace IDs. - n, err := p.searchTraceIDs(ctx, traceIDs, &lastSeqNo, &lastPrimaryTerm) + changed, err := p.searchTraceIDs(ctx, traceIDs, pos.observedSeqnos) if err != nil { // Errors may occur due to rate limiting, or while the index is // still being created, so just log and continue. p.config.Logger.With(logp.Error(err)).Debug("error searching for trace IDs") - break + continue } - if n == 0 { - // No more results, go back to sleep. - break + if changed { + positionsOut = positions } } } } -// searchTraceIDs searches for new sampled trace IDs (after lastPrimaryTerm and lastSeqNo), -// sending them to the out channel and returning the number of trace IDs sent. -func (p *Pubsub) searchTraceIDs(ctx context.Context, out chan<- string, lastSeqNo, lastPrimaryTerm *int64) (int, error) { - searchBody := map[string]interface{}{ - "size": 1000, - "seq_no_primary_term": true, - - // Search from the most recently observed sequence number, - // in case _primary_term has increased and _seq_no is reused. - "sort": []interface{}{map[string]interface{}{"_seq_no": "asc"}}, - "search_after": []interface{}{*lastSeqNo - 1}, - - "query": map[string]interface{}{ - // Filter out local observations. - "bool": map[string]interface{}{ - "must_not": map[string]interface{}{ - "term": map[string]interface{}{ - "observer.id": map[string]interface{}{ - "value": p.config.BeatID, - }, - }, - }, - }, - }, +// searchTraceIDs searches the configured data stream for new sampled trace IDs, sending them to the out channel. +// +// searchTraceIDs works by fetching the global checkpoint for each index backing the data stream, and comparing +// this to the most recently observed sequence number for the indices. If the global checkpoint is greater, then +// we search through every document with a sequence number greater than the most recently observed, and less than +// or equal to the global checkpoint. +// +// Immediately after observing an updated global checkpoint we will force-refresh indices to ensure all documents +// up to the global checkpoint are visible in proceeding searches. +func (p *Pubsub) searchTraceIDs(ctx context.Context, out chan<- string, observedSeqnos map[string]int64) (bool, error) { + globalCheckpoints, err := getGlobalCheckpoints(ctx, p.config.Client, p.config.DataStream.String()) + if err != nil { + return false, err } - req := esapi.SearchRequest{ - Index: []string{p.config.Index}, - Body: esutil.NewJSONReader(searchBody), + // Remove old indices from the observed _seq_no map. + for index := range observedSeqnos { + if _, ok := globalCheckpoints[index]; !ok { + delete(observedSeqnos, index) + } } - resp, err := req.Do(ctx, p.config.Client) + + // Force-refresh the indices with updated global checkpoints. + indices := make([]string, 0, len(globalCheckpoints)) + for index, globalCheckpoint := range globalCheckpoints { + observedSeqno, ok := observedSeqnos[index] + if ok && globalCheckpoint <= observedSeqno { + delete(globalCheckpoints, index) + continue + } + indices = append(indices, index) + } + if err := p.refreshIndices(ctx, indices); err != nil { + return false, err + } + + var changed bool + var observedSeqnosMu sync.Mutex + g, ctx := errgroup.WithContext(ctx) + for _, index := range indices { + globalCheckpoint := globalCheckpoints[index] + observedSeqno, ok := observedSeqnos[index] + if !ok { + observedSeqno = -1 + } + index := index // copy for closure + g.Go(func() error { + maxSeqno, err := p.searchIndexTraceIDs(ctx, out, index, observedSeqno, globalCheckpoint) + if err != nil { + return err + } + if maxSeqno > observedSeqno { + observedSeqnosMu.Lock() + observedSeqno = maxSeqno + observedSeqnos[index] = observedSeqno + changed = true + observedSeqnosMu.Unlock() + } + return nil + }) + } + return changed, g.Wait() +} + +func (p *Pubsub) refreshIndices(ctx context.Context, indices []string) error { + if len(indices) == 0 { + return nil + } + ignoreUnavailable := true + resp, err := esapi.IndicesRefreshRequest{ + Index: indices, + IgnoreUnavailable: &ignoreUnavailable, + }.Do(ctx, p.config.Client) if err != nil { - return 0, err + return err } defer resp.Body.Close() if resp.IsError() { - switch resp.StatusCode { - case http.StatusNotFound: - return 0, nil - } message, _ := ioutil.ReadAll(resp.Body) - return 0, fmt.Errorf("search request failed: %s", message) + return fmt.Errorf("index refresh request failed: %s", message) } + return nil +} - var result struct { - Hits struct { - Hits []struct { - SeqNo int64 `json:"_seq_no,omitempty"` - PrimaryTerm int64 `json:"_primary_term,omitempty"` - Source traceIDDocument `json:"_source"` - } +// searchIndexTraceIDs searches index sampled trace IDs, whose documents have a _seq_no +// greater than minSeqno and less than or equal to maxSeqno, and returns the greatest +// observed _seq_no. Sampled trace IDs are sent to out. +func (p *Pubsub) searchIndexTraceIDs(ctx context.Context, out chan<- string, index string, minSeqno, maxSeqno int64) (int64, error) { + var maxObservedSeqno int64 = -1 + for maxObservedSeqno < maxSeqno { + // Include only documents after the old global checkpoint, + // and up to and including the new global checkpoint. + filters := []map[string]interface{}{{ + "range": map[string]interface{}{ + "_seq_no": map[string]interface{}{ + "lte": maxSeqno, + }, + }, + }} + if minSeqno >= 0 { + filters = append(filters, map[string]interface{}{ + "range": map[string]interface{}{ + "_seq_no": map[string]interface{}{ + "gt": minSeqno, + }, + }, + }) } - } - if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { - return 0, err - } - if len(result.Hits.Hits) == 0 { - return 0, nil - } - var n int - maxPrimaryTerm := *lastPrimaryTerm - for _, hit := range result.Hits.Hits { - if hit.SeqNo < *lastSeqNo || (hit.SeqNo == *lastSeqNo && hit.PrimaryTerm <= *lastPrimaryTerm) { - continue + searchBody := map[string]interface{}{ + "size": 1000, + "sort": []interface{}{map[string]interface{}{"_seq_no": "asc"}}, + "seq_no_primary_term": true, + "track_total_hits": false, + "query": map[string]interface{}{ + "bool": map[string]interface{}{ + // Filter out local observations. + "must_not": map[string]interface{}{ + "term": map[string]interface{}{ + "observer.id": map[string]interface{}{ + "value": p.config.BeatID, + }, + }, + }, + "filter": filters, + }, + }, } - select { - case <-ctx.Done(): - return n, ctx.Err() - case out <- hit.Source.Trace.ID: - n++ + + var result struct { + Hits struct { + Hits []struct { + Seqno int64 `json:"_seq_no"` + Source traceIDDocument `json:"_source"` + Sort []interface{} `json:"sort"` + } + } + } + if err := p.doSearchRequest(ctx, index, esutil.NewJSONReader(searchBody), &result); err != nil { + if err == errIndexNotFound { + // Index was deleted. + break + } + return -1, err + } + if len(result.Hits.Hits) == 0 { + break } - if hit.PrimaryTerm > maxPrimaryTerm { - maxPrimaryTerm = hit.PrimaryTerm + for _, hit := range result.Hits.Hits { + select { + case <-ctx.Done(): + return -1, ctx.Err() + case out <- hit.Source.Trace.ID: + } } + maxObservedSeqno = result.Hits.Hits[len(result.Hits.Hits)-1].Seqno } - // we sort by hit.SeqNo, but not _primary_term (you can't?) - *lastSeqNo = result.Hits.Hits[len(result.Hits.Hits)-1].SeqNo - *lastPrimaryTerm = maxPrimaryTerm - return n, nil + return maxObservedSeqno, nil +} + +func (p *Pubsub) doSearchRequest(ctx context.Context, index string, body io.Reader, out interface{}) error { + resp, err := esapi.SearchRequest{ + Index: []string{index}, + Body: body, + }.Do(ctx, p.config.Client) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.IsError() { + if resp.StatusCode == http.StatusNotFound { + return errIndexNotFound + } + message, _ := ioutil.ReadAll(resp.Body) + return fmt.Errorf("search request failed: %s", message) + } + return json.NewDecoder(resp.Body).Decode(out) +} + +func (p *Pubsub) marshalTraceIDDocument(w *fastjson.Writer, traceID string, timestamp time.Time, dataStream DataStreamConfig) { + w.RawString(`{"@timestamp":"`) + w.Time(timestamp.UTC(), time.RFC3339Nano) + w.RawString(`","data_stream.type":`) + w.String(dataStream.Type) + w.RawString(`,"data_stream.dataset":`) + w.String(dataStream.Dataset) + w.RawString(`,"data_stream.namespace":`) + w.String(dataStream.Namespace) + w.RawString(`,"observer":{"id":`) + w.String(p.config.BeatID) + w.RawString(`},`) + w.RawString(`"trace":{"id":`) + w.String(traceID) + w.RawString(`}}`) } type traceIDDocument struct { @@ -216,13 +361,3 @@ type traceIDDocument struct { ID string `json:"id"` } `json:"trace"` } - -func (d *traceIDDocument) MarshalFastJSON(w *fastjson.Writer) error { - w.RawString(`{"observer":{"id":`) - w.String(d.Observer.ID) - w.RawString(`},`) - w.RawString(`"trace":{"id":`) - w.String(d.Trace.ID) - w.RawString(`}}`) - return nil -} diff --git a/x-pack/apm-server/sampling/pubsub/pubsub_integration_test.go b/x-pack/apm-server/sampling/pubsub/pubsub_integration_test.go index 2887f08ccd2..6cfea4b53ae 100644 --- a/x-pack/apm-server/sampling/pubsub/pubsub_integration_test.go +++ b/x-pack/apm-server/sampling/pubsub/pubsub_integration_test.go @@ -9,7 +9,6 @@ import ( "context" "encoding/json" "net" - "net/url" "os" "strings" "testing" @@ -20,8 +19,9 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" - "github.com/elastic/go-elasticsearch/v7" + "github.com/elastic/go-elasticsearch/v7/esapi" + "github.com/elastic/apm-server/elasticsearch" "github.com/elastic/apm-server/x-pack/apm-server/sampling/pubsub" ) @@ -35,28 +35,47 @@ const ( func TestElasticsearchIntegration_PublishSampledTraceIDs(t *testing.T) { const ( localBeatID = "local_beat_id" - indexName = "apm-testing-sampled-traces" ) - client := newElasticsearchClient(t) - recreateIndex(t, client, indexName) - - var input []string - for i := 0; i < 50; i++ { - input = append(input, uuid.Must(uuid.NewV4()).String()) + dataStream := pubsub.DataStreamConfig{ + Type: "apm", + Dataset: "sampled_traces", + Namespace: "testing", } + client := newElasticsearchClient(t) + recreateDataStream(t, client, dataStream) + es, err := pubsub.New(pubsub.Config{ Client: client, - Index: indexName, + DataStream: dataStream, BeatID: localBeatID, FlushInterval: 100 * time.Millisecond, SearchInterval: time.Minute, }) require.NoError(t, err) - err = es.PublishSampledTraceIDs(context.Background(), input...) - assert.NoError(t, err) + var input []string + for i := 0; i < 50; i++ { + input = append(input, uuid.Must(uuid.NewV4()).String()) + } + ids := make(chan string, len(input)) + for _, id := range input { + ids <- id + } + + ctx, cancel := context.WithCancel(context.Background()) + g, ctx := errgroup.WithContext(ctx) + g.Go(func() error { + return es.PublishSampledTraceIDs(ctx, ids) + }) + defer func() { + err := g.Wait() + assert.NoError(t, err) + }() + defer cancel() + + //input...) var result struct { Hits struct { @@ -74,10 +93,11 @@ func TestElasticsearchIntegration_PublishSampledTraceIDs(t *testing.T) { } for { - resp, err := client.Search( - client.Search.WithIndex(indexName), - client.Search.WithSize(len(input)+1), - ) + size := len(input) + 1 + resp, err := esapi.SearchRequest{ + Index: []string{dataStream.String()}, + Size: &size, + }.Do(context.Background(), client) require.NoError(t, err) if resp.IsError() { resp.Body.Close() @@ -104,15 +124,20 @@ func TestElasticsearchIntegration_SubscribeSampledTraceIDs(t *testing.T) { const ( localBeatID = "local_observer_id" remoteBeatID = "remote_observer_id" - indexName = "apm-testing-sampled-traces" ) + dataStream := pubsub.DataStreamConfig{ + Type: "apm", + Dataset: "sampled_traces", + Namespace: "testing", + } + client := newElasticsearchClient(t) - recreateIndex(t, client, indexName) + recreateDataStream(t, client, dataStream) es, err := pubsub.New(pubsub.Config{ Client: client, - Index: indexName, + DataStream: dataStream, BeatID: localBeatID, FlushInterval: time.Minute, SearchInterval: 100 * time.Millisecond, @@ -124,7 +149,7 @@ func TestElasticsearchIntegration_SubscribeSampledTraceIDs(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() g.Go(func() error { - return es.SubscribeSampledTraceIDs(ctx, out) + return es.SubscribeSampledTraceIDs(ctx, pubsub.SubscriberPosition{}, out, nil) }) assert.NoError(t, err) @@ -152,7 +177,10 @@ func TestElasticsearchIntegration_SubscribeSampledTraceIDs(t *testing.T) { assert.NoError(t, enc.Encode(indexAction{})) assert.NoError(t, enc.Encode(&doc)) } - resp, err := client.Bulk(&body, client.Bulk.WithIndex(indexName)) + resp, err := esapi.BulkRequest{ + Index: dataStream.String(), + Body: &body, + }.Do(context.Background(), client) require.NoError(t, err) assert.False(t, resp.IsError()) resp.Body.Close() @@ -184,8 +212,11 @@ func TestElasticsearchIntegration_SubscribeSampledTraceIDs(t *testing.T) { } } -func recreateIndex(tb testing.TB, client *elasticsearch.Client, indexName string) { +func recreateDataStream(tb testing.TB, client elasticsearch.Client, dataStream pubsub.DataStreamConfig) { body := strings.NewReader(`{ + "settings": { + "index.number_of_shards": 1 + }, "mappings": { "properties": { "event.ingested": {"type": "date"}, @@ -202,40 +233,43 @@ func recreateIndex(tb testing.TB, client *elasticsearch.Client, indexName string } } }`) - resp, err := client.Indices.Delete([]string{indexName}) + + // NOTE(aww) we cheat and create an index, rather than a + // data stream. System tests will test with data streams, + // and will pick up any resulting discrepancies. + + name := dataStream.String() + resp, err := esapi.IndicesDeleteRequest{ + Index: []string{dataStream.String()}, + }.Do(context.Background(), client) require.NoError(tb, err) resp.Body.Close() - resp, err = client.Indices.Create(indexName, client.Indices.Create.WithBody(body)) + resp, err = esapi.IndicesCreateRequest{ + Index: name, + Body: body, + }.Do(context.Background(), client) require.NoError(tb, err) - assert.False(tb, resp.IsError()) + require.False(tb, resp.IsError()) resp.Body.Close() } -func newElasticsearchClient(tb testing.TB) *elasticsearch.Client { +func newElasticsearchClient(tb testing.TB) elasticsearch.Client { switch strings.ToLower(os.Getenv("INTEGRATION_TESTS")) { case "1", "true": default: tb.Skip("Skipping integration test, export INTEGRATION_TESTS=1 to run") } - esURL := url.URL{Scheme: "http", Host: net.JoinHostPort( + esHost := net.JoinHostPort( getenvDefault("ES_HOST", defaultElasticsearchHost), getenvDefault("ES_PORT", defaultElasticsearchPort), - )} - cfg := elasticsearch.Config{ - Addresses: []string{esURL.String()}, - RetryBackoff: func(attempt int) time.Duration { - backoff := time.Duration(attempt*100) * time.Millisecond - if backoff > time.Second { - backoff = time.Second - } - return backoff - }, - } - cfg.Username = getenvDefault("ES_USER", defaultElasticsearchUser) - cfg.Password = getenvDefault("ES_PASS", defaultElasticsearchPass) - client, err := elasticsearch.NewClient(cfg) + ) + client, err := elasticsearch.NewClient(&elasticsearch.Config{ + Hosts: []string{esHost}, + Username: getenvDefault("ES_USER", defaultElasticsearchUser), + Password: getenvDefault("ES_PASS", defaultElasticsearchPass), + }) require.NoError(tb, err) return client } diff --git a/x-pack/apm-server/sampling/pubsub/pubsub_test.go b/x-pack/apm-server/sampling/pubsub/pubsub_test.go index 469e551559a..815b0e46a84 100644 --- a/x-pack/apm-server/sampling/pubsub/pubsub_test.go +++ b/x-pack/apm-server/sampling/pubsub/pubsub_test.go @@ -10,10 +10,10 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/http/httptest" "strings" + "sync" "testing" "time" @@ -22,71 +22,68 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" - "github.com/elastic/go-elasticsearch/v7" - + "github.com/elastic/apm-server/elasticsearch" "github.com/elastic/apm-server/x-pack/apm-server/sampling/pubsub" ) -func TestPublishSampledTraceIDs(t *testing.T) { - const ( - indexName = "trace-ids" - beatID = "beat_id" - ) - - requests := make(chan *http.Request, 1) - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var buf bytes.Buffer - if _, err := io.Copy(&buf, r.Body); err != nil { - panic(err) - } - r.Body = ioutil.NopCloser(&buf) +const ( + beatID = "beat_id" +) + +var ( + dataStream = pubsub.DataStreamConfig{ + Type: "traces", + Dataset: "sampled", + Namespace: "testing", + } +) +func TestPublishSampledTraceIDs(t *testing.T) { + requestBodies := make(chan string) + ms := newMockElasticsearchServer(t) + ms.onBulk = func(r *http.Request) { select { case <-r.Context().Done(): - case requests <- r: + case requestBodies <- readBody(r): } - })) - defer srv.Close() - - client, err := elasticsearch.NewClient(elasticsearch.Config{ - Addresses: []string{srv.URL}, - }) - require.NoError(t, err) - - pub, err := pubsub.New(pubsub.Config{ - Client: client, - Index: indexName, - BeatID: beatID, - FlushInterval: time.Millisecond, - SearchInterval: time.Minute, - }) - require.NoError(t, err) + } + pub := newPubsub(t, ms.srv, time.Millisecond, time.Minute) - var ids []string - for i := 0; i < 20; i++ { - ids = append(ids, uuid.Must(uuid.NewV4()).String()) + input := make([]string, 20) + for i := 0; i < len(input); i++ { + input[i] = uuid.Must(uuid.NewV4()).String() } // Publish in a separate goroutine, as it may get blocked if we don't // service bulk requests. - go func() { - for i := 0; i < len(ids); i += 2 { - err = pub.PublishSampledTraceIDs(context.Background(), ids[i], ids[i+1]) - assert.NoError(t, err) - time.Sleep(10 * time.Millisecond) // sleep to force a new request + ids := make(chan string) + ctx, cancel := context.WithCancel(context.Background()) + var g errgroup.Group + defer g.Wait() + defer cancel() + g.Go(func() error { + return pub.PublishSampledTraceIDs(ctx, ids) + }) + g.Go(func() error { + for _, id := range input { + select { + case <-ctx.Done(): + return ctx.Err() + case ids <- id: + } + time.Sleep(10 * time.Millisecond) // sleep to force new requests } - }() + return nil + }) var received []string deadlineTimer := time.NewTimer(10 * time.Second) - for len(received) < len(ids) { + for len(received) < len(input) { select { case <-deadlineTimer.C: t.Fatal("timed out waiting for events to be received by server") - case req := <-requests: - require.Equal(t, fmt.Sprintf("/%s/_bulk", indexName), req.URL.Path) - - d := json.NewDecoder(req.Body) + case body := <-requestBodies: + d := json.NewDecoder(bytes.NewReader([]byte(body))) for { action := make(map[string]interface{}) err := d.Decode(&action) @@ -94,11 +91,15 @@ func TestPublishSampledTraceIDs(t *testing.T) { break } assert.NoError(t, err) - assert.Equal(t, map[string]interface{}{"index": map[string]interface{}{}}, action) + assert.Equal(t, map[string]interface{}{"create": map[string]interface{}{}}, action) doc := make(map[string]interface{}) assert.NoError(t, d.Decode(&doc)) + assert.Contains(t, doc, "@timestamp") assert.Equal(t, map[string]interface{}{"id": beatID}, doc["observer"]) + assert.Equal(t, dataStream.Type, doc["data_stream.type"]) + assert.Equal(t, dataStream.Dataset, doc["data_stream.dataset"]) + assert.Equal(t, dataStream.Namespace, doc["data_stream.namespace"]) trace := doc["trace"].(map[string]interface{}) traceID := trace["id"].(string) @@ -106,8 +107,13 @@ func TestPublishSampledTraceIDs(t *testing.T) { delete(trace, "id") assert.Empty(t, trace) // no other fields in "trace" + delete(doc, "@timestamp") + delete(doc, "data_stream.type") + delete(doc, "data_stream.dataset") + delete(doc, "data_stream.namespace") delete(doc, "observer") delete(doc, "trace") + assert.Empty(t, doc) // no other fields in doc } } @@ -115,134 +121,283 @@ func TestPublishSampledTraceIDs(t *testing.T) { // The publisher uses an esutil.BulkIndexer, which may index items out // of order due to having multiple goroutines picking items off a queue. - assert.ElementsMatch(t, ids, received) + assert.ElementsMatch(t, input, received) } func TestSubscribeSampledTraceIDs(t *testing.T) { - const ( - indexName = "trace-ids" - beatID = "beat_id" - ) - - var requests []*http.Request - responses := make(chan string) - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var buf bytes.Buffer - if _, err := io.Copy(&buf, r.Body); err != nil { - panic(err) + ms := newMockElasticsearchServer(t) + ms.statsGlobalCheckpoint = 99 + + assertSearchQueryFilterEqual := func(filter, body string) { + expect := fmt.Sprintf( + `{"query":{"bool":{"filter":%s,"must_not":{"term":{"observer.id":{"value":"beat_id"}}}}},"seq_no_primary_term":true,"size":1000,"sort":[{"_seq_no":"asc"}],"track_total_hits":false}`, + filter, + ) + assert.Equal(t, expect, body) + } + + var searchRequests int + ms.onSearch = func(r *http.Request) { + body := readBody(r) + searchRequests++ + switch searchRequests { + case 1: + assertSearchQueryFilterEqual(`[{"range":{"_seq_no":{"lte":99}}}]`, body) + ms.searchResults = []searchHit{ + newSearchHit(1, "trace_1"), + newSearchHit(2, "trace_2"), + } + case 2: + // The previous _search responded non-empty, and the greatest + // _seq_no was not equal to the global checkpoint: _search again + // after _seq_no 2. + assertSearchQueryFilterEqual(`[{"range":{"_seq_no":{"lte":99}}}]`, body) + ms.searchResults = []searchHit{ + newSearchHit(3, "trace_3"), + newSearchHit(98, "trace_98"), + } + case 3: + // Again the previous _search responded non-empty, and the greatest + // _seq_no was not equal to the global checkpoint: _search again + // after _seq_no 98. This time we respond with no hits, so the + // subscriber goes back to sleep. + assertSearchQueryFilterEqual(`[{"range":{"_seq_no":{"lte":99}}}]`, body) + ms.searchResults = nil + case 4: + // The search now has an exclusive lower bound of the previously + // observed maximum _seq_no. When the global checkpoint is observed, + // the server stops issuing search requests and goes back to sleep. + assertSearchQueryFilterEqual(`[{"range":{"_seq_no":{"lte":99}}},{"range":{"_seq_no":{"gt":98}}}]`, body) + ms.searchResults = []searchHit{ + newSearchHit(99, "trace_99"), + } + case 5: + // After advancing the global checkpoint, a new search will be made + // with increased lower and upper bounds. + assertSearchQueryFilterEqual(`[{"range":{"_seq_no":{"lte":100}}},{"range":{"_seq_no":{"gt":99}}}]`, body) + ms.searchResults = []searchHit{ + newSearchHit(100, "trace_100"), + } } - r.Body = ioutil.NopCloser(&buf) - requests = append(requests, r) + } + + ids, positions, closeSubscriber := newSubscriber(t, ms.srv) + assert.Equal(t, "trace_1", expectValue(t, ids)) + assert.Equal(t, "trace_2", expectValue(t, ids)) + assert.Equal(t, "trace_3", expectValue(t, ids)) + assert.Equal(t, "trace_98", expectValue(t, ids)) + assert.Equal(t, "trace_99", expectValue(t, ids)) + expectNone(t, ids) + + // Wait for the position to be reported. The position should be + // non-zero, and when used should resume subscription without + // returning already observed IDs. + var pos pubsub.SubscriberPosition + select { + case pos = <-positions: + assert.NotZero(t, pos) + case <-time.After(10 * time.Second): + t.Fatal("timed out waiting for position to be reported") + } + + // close first subscriber, create a new one initialised with position + closeSubscriber() + ids, positions, _ = newSubscriberPosition(t, ms.srv, pos) + // Global checkpoint hasn't changed. + expectNone(t, ids) + + // Advance global checkpoint, expect a new search and new position to be reported. + ms.statsGlobalCheckpointMu.Lock() + ms.statsGlobalCheckpoint = 100 + ms.statsGlobalCheckpointMu.Unlock() + assert.Equal(t, "trace_100", expectValue(t, ids)) + select { + case pos2 := <-positions: + assert.NotEqual(t, pos, pos2) + case <-time.After(10 * time.Second): + t.Fatal("timed out waiting for position to be reported") + } +} + +func TestSubscribeSampledTraceIDsErrors(t *testing.T) { + statsRequests := make(chan struct{}) + firstStats := true + m := newMockElasticsearchServer(t) + m.searchStatusCode = http.StatusNotFound + m.statsGlobalCheckpoint = 99 + m.onStats = func(r *http.Request) { select { case <-r.Context().Done(): - case resp := <-responses: - w.Write([]byte(resp)) + case statsRequests <- struct{}{}: + } + if firstStats { + firstStats = false + return + } + m.statsStatusCode = http.StatusInternalServerError + } + newSubscriber(t, m.srv) + + // Show that failed requests to Elasticsearch are not fatal, and + // that the subscriber will retry. + timeout := time.After(10 * time.Second) + for i := 0; i < 10; i++ { + select { + case <-statsRequests: + case <-timeout: + t.Fatal("timed out waiting for _stats request") } - })) - defer srv.Close() + } +} + +func newSubscriber(t testing.TB, srv *httptest.Server) (<-chan string, <-chan pubsub.SubscriberPosition, context.CancelFunc) { + return newSubscriberPosition(t, srv, pubsub.SubscriberPosition{}) +} + +func newSubscriberPosition(t testing.TB, srv *httptest.Server, pos pubsub.SubscriberPosition) (<-chan string, <-chan pubsub.SubscriberPosition, context.CancelFunc) { + sub := newPubsub(t, srv, time.Minute, time.Millisecond) + ids := make(chan string) + positions := make(chan pubsub.SubscriberPosition) + ctx, cancel := context.WithCancel(context.Background()) + g, ctx := errgroup.WithContext(ctx) + g.Go(func() error { + return sub.SubscribeSampledTraceIDs(ctx, pos, ids, positions) + }) + cancelFunc := func() { + cancel() + g.Wait() + } + t.Cleanup(cancelFunc) + return ids, positions, cancelFunc +} - client, err := elasticsearch.NewClient(elasticsearch.Config{ - Addresses: []string{srv.URL}, +func newPubsub(t testing.TB, srv *httptest.Server, flushInterval, searchInterval time.Duration) *pubsub.Pubsub { + client, err := elasticsearch.NewClient(&elasticsearch.Config{ + Hosts: []string{srv.Listener.Addr().String()}, }) require.NoError(t, err) sub, err := pubsub.New(pubsub.Config{ Client: client, - Index: indexName, + DataStream: dataStream, BeatID: beatID, - FlushInterval: time.Minute, - SearchInterval: time.Millisecond, + FlushInterval: flushInterval, + SearchInterval: searchInterval, }) require.NoError(t, err) + return sub +} - ids := make(chan string) - ctx, cancel := context.WithCancel(context.Background()) - g, ctx := errgroup.WithContext(ctx) - go g.Go(func() error { - return sub.SubscribeSampledTraceIDs(ctx, ids) - }) - defer g.Wait() - defer cancel() +type mockElasticsearchServer struct { + srv *httptest.Server - responses <- `{ - "hits": { - "hits": [ - { - "_seq_no": 1, - "_primary_term": 1, - "_source": {"trace": {"id": "trace_1"}, "observer": {"id": "another_beat_id"}} - }, - { - "_seq_no": 2, - "_primary_term": 2, - "_source": {"trace": {"id": "trace_2"}, "observer": {"id": "another_beat_id"}} - } - ] - } - }` + // statsGlobalCheckpoint is the shard seq_no global_checkpoint to respond with in + // the _stats/get handler. If this is negative, the handler responds with status + statsGlobalCheckpointMu sync.RWMutex + statsGlobalCheckpoint int - assert.Equal(t, "trace_1", expectValue(t, ids)) - assert.Equal(t, "trace_2", expectValue(t, ids)) + // statsStatusCode is the status code that the _stats/get handler responds with. + statsStatusCode int - responses <- "nonsense" // bad response, subscriber continues - - // trace_2 is repeated, since we search for >= the last - // _seq_no, in case there's a new _primary_term. - responses <- `{ - "hits": { - "hits": [ - { - "_seq_no": 2, - "_primary_term": 2, - "_source": {"trace": {"id": "trace_2"}, "observer": {"id": "another_beat_id"}} - }, - { - "_seq_no": 2, - "_primary_term": 3, - "_source": {"trace": {"id": "trace_2b"}, "observer": {"id": "another_beat_id"}} - }, - { - "_seq_no": 99, - "_primary_term": 3, - "_source": {"trace": {"id": "trace_99"}, "observer": {"id": "another_beat_id"}} - } - ] - } - }` + // searchResults is the search hits that the _search handler responds with. + searchResults []searchHit - assert.Equal(t, "trace_2b", expectValue(t, ids)) - assert.Equal(t, "trace_99", expectValue(t, ids)) + // searchStatusCode is the status code that the _search handler responds with. + searchStatusCode int - responses <- `{"hits":{"hits":[]}}` // no hits - expectNone(t, ids) + // onStats is a function that is invoked whenever a _stats/get request is received. + // This may be used to adjust the status code or global checkpoint that will be + // returned. + onStats func(r *http.Request) - cancel() // stop subscriber - srv.Close() + // onSearch is a function that is invoked whenever a _search request is received. + // This may be used to check the search query, and adjust the search results that + // will be returned. + onSearch func(r *http.Request) - var bodies []string - for _, r := range requests { - assert.Equal(t, fmt.Sprintf("/%s/_search", indexName), r.URL.Path) + // onBulk is a function that is invoked whenever a _bulk request is received. + // This may be used to check the publication of sampled trace IDs. + onBulk func(r *http.Request) +} - var buf bytes.Buffer - io.Copy(&buf, r.Body) - bodies = append(bodies, strings.TrimSpace(buf.String())) +func newMockElasticsearchServer(t testing.TB) *mockElasticsearchServer { + m := &mockElasticsearchServer{ + statsStatusCode: http.StatusOK, + searchStatusCode: http.StatusOK, + onStats: func(*http.Request) {}, + onSearch: func(*http.Request) {}, + onBulk: func(*http.Request) {}, } - assert.Equal(t, []string{ - `{"query":{"bool":{"must_not":{"term":{"observer.id":{"value":"beat_id"}}}}},"search_after":[-1],"seq_no_primary_term":true,"size":1000,"sort":[{"_seq_no":"asc"}]}`, + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + panic(fmt.Errorf("unexpected URL path: %s", r.URL.Path)) + }) + mux.HandleFunc("/"+dataStream.String()+"/_bulk", m.handleBulk) + mux.HandleFunc("/"+dataStream.String()+"/_stats/get", m.handleStats) + mux.HandleFunc("/index_name/_refresh", m.handleRefresh) + mux.HandleFunc("/index_name/_search", m.handleSearch) + + m.srv = httptest.NewServer(mux) + t.Cleanup(m.srv.Close) + return m +} + +func (m *mockElasticsearchServer) handleStats(w http.ResponseWriter, r *http.Request) { + m.onStats(r) + w.WriteHeader(m.statsStatusCode) + if m.statsStatusCode != http.StatusOK { + return + } + + m.statsGlobalCheckpointMu.RLock() + checkpoint := m.statsGlobalCheckpoint + m.statsGlobalCheckpointMu.RUnlock() + + w.Write([]byte(fmt.Sprintf(`{ + "indices": { + "index_name": { + "shards": { + "0": [{ + "routing": { + "primary": true + }, + "seq_no": { + "global_checkpoint": %d + } + }] + } + } + } + }`, checkpoint))) +} + +func (m *mockElasticsearchServer) handleRefresh(w http.ResponseWriter, r *http.Request) { + // Empty 200 OK response +} - // Repeats because of the invalid response. - `{"query":{"bool":{"must_not":{"term":{"observer.id":{"value":"beat_id"}}}}},"search_after":[1],"seq_no_primary_term":true,"size":1000,"sort":[{"_seq_no":"asc"}]}`, - `{"query":{"bool":{"must_not":{"term":{"observer.id":{"value":"beat_id"}}}}},"search_after":[1],"seq_no_primary_term":true,"size":1000,"sort":[{"_seq_no":"asc"}]}`, +func (m *mockElasticsearchServer) handleSearch(w http.ResponseWriter, r *http.Request) { + m.onSearch(r) + w.WriteHeader(m.searchStatusCode) + if m.searchStatusCode != http.StatusOK { + return + } + var body struct { + Hits struct { + Hits []searchHit `json:"hits"` + } `json:"hits"` + } + body.Hits.Hits = m.searchResults + json.NewEncoder(w).Encode(body) +} - // Repeats because of the zero hits response. - `{"query":{"bool":{"must_not":{"term":{"observer.id":{"value":"beat_id"}}}}},"search_after":[98],"seq_no_primary_term":true,"size":1000,"sort":[{"_seq_no":"asc"}]}`, - `{"query":{"bool":{"must_not":{"term":{"observer.id":{"value":"beat_id"}}}}},"search_after":[98],"seq_no_primary_term":true,"size":1000,"sort":[{"_seq_no":"asc"}]}`, - }, bodies) +func (m *mockElasticsearchServer) handleBulk(w http.ResponseWriter, r *http.Request) { + m.onBulk(r) } func expectValue(t testing.TB, ch <-chan string) string { + t.Helper() select { case <-time.After(10 * time.Second): t.Fatalf("timed out waiting for trace ID to be sent") @@ -254,9 +409,39 @@ func expectValue(t testing.TB, ch <-chan string) string { } func expectNone(t testing.TB, ch <-chan string) { + t.Helper() select { - case <-time.After(500 * time.Millisecond): + case <-time.After(100 * time.Millisecond): case v := <-ch: t.Errorf("unexpected send on channel: %q", v) } } + +func readBody(r *http.Request) string { + var buf bytes.Buffer + io.Copy(&buf, r.Body) + return strings.TrimSpace(buf.String()) +} + +type searchHit struct { + SeqNo int64 `json:"_seq_no,omitempty"` + Source traceIDDocument `json:"_source"` + Sort []int64 `json:"sort"` +} + +func newSearchHit(seqNo int64, traceID string) searchHit { + var source traceIDDocument + source.Observer.ID = "another_beat_id" + source.Trace.ID = traceID + return searchHit{SeqNo: seqNo, Source: source, Sort: []int64{seqNo}} +} + +type traceIDDocument struct { + Observer struct { + ID string `json:"id"` + } `json:"observer"` + + Trace struct { + ID string `json:"id"` + } `json:"trace"` +} diff --git a/x-pack/apm-server/sampling/pubsub/pubsubtest/client.go b/x-pack/apm-server/sampling/pubsub/pubsubtest/client.go new file mode 100644 index 00000000000..22cc3d4e49c --- /dev/null +++ b/x-pack/apm-server/sampling/pubsub/pubsubtest/client.go @@ -0,0 +1,241 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package pubsubtest + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/http/httptest" + "strings" + "time" + + "github.com/elastic/apm-server/elasticsearch" +) + +// Publisher is an interface to pass to Client that responds to publish +// requests, consuming a trace ID sent by the requester. +type Publisher interface { + Publish(ctx context.Context, traceID string) error +} + +// PublisherChan is a Publisher implemented as a channel. +type PublisherChan chan<- string + +// Publish waits for traceID to be sent on c, or for ctx to be done. +func (c PublisherChan) Publish(ctx context.Context, traceID string) error { + select { + case <-ctx.Done(): + return ctx.Err() + case c <- traceID: + return nil + } +} + +// PublisherFunc is a Publisher implemented as a function. +type PublisherFunc func(context.Context, string) error + +// Publish calls f(ctx, traceID). +func (f PublisherFunc) Publish(ctx context.Context, traceID string) error { + return f(ctx, traceID) +} + +// Subscriber is an interface to pass to Client that responds to subscribe +// requests, returning a trace ID to send back to the requester. +type Subscriber interface { + Subscribe(ctx context.Context) (traceID string, err error) +} + +// SubscriberChan is a Subscriber implemented as a channel. +type SubscriberChan <-chan string + +// Subscribe waits for a trace ID to be received on c, or for ctx to be done. +func (c SubscriberChan) Subscribe(ctx context.Context) (string, error) { + select { + case <-ctx.Done(): + return "", ctx.Err() + case traceID, ok := <-c: + if !ok { + return "", errors.New("channel closed") + } + return traceID, nil + } +} + +// SubscriberFunc is a Subscriber implemented as a function. +type SubscriberFunc func(ctx context.Context) (string, error) + +// Subscribe calls f(ctx). +func (f SubscriberFunc) Subscribe(ctx context.Context) (string, error) { + return f(ctx) +} + +// Client returns a new elasticsearch.Client, suitable for use with pubsub, +// that responds to publish requests by calling pub (if non-nil) and subscribe +// requests by calling sub (if non-nil). If either function is nil, then the +// respective operation will be a no-op. +func Client(pub Publisher, sub Subscriber) elasticsearch.Client { + client, err := elasticsearch.NewClientParams(elasticsearch.ClientParams{ + Config: elasticsearch.DefaultConfig(), + Transport: &channelClientRoundTripper{pub: pub, sub: sub}, + }) + if err != nil { + panic(err) + } + return client +} + +type channelClientRoundTripper struct { + pub Publisher + sub Subscriber + seqno int64 +} + +func (rt *channelClientRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { + var handler func(*http.Request, *httptest.ResponseRecorder) error + switch r.Method { + case "GET": + if strings.HasSuffix(r.URL.Path, "/_stats/get") { + handler = rt.roundTripStats + } + case "POST": + if strings.HasSuffix(r.URL.Path, "/_refresh") { + handler = rt.roundTripRefreshIndices + } else if strings.HasSuffix(r.URL.Path, "/_bulk") { + handler = rt.roundTripBulk + } else if strings.HasSuffix(r.URL.Path, "/_search") { + handler = rt.roundTripSearch + } + } + if handler == nil { + panic(fmt.Errorf("unhandled path %q %q", r.Method, r.URL.Path)) + } + recorder := httptest.NewRecorder() + if err := handler(r, recorder); err != nil { + return nil, err + } + return recorder.Result(), nil +} + +func (rt *channelClientRoundTripper) roundTripStats(r *http.Request, recorder *httptest.ResponseRecorder) error { + type shardRouting struct { + Primary bool `json:"primary"` + } + type shardSeqNo struct { + GlobalCheckpoint int64 `json:"global_checkpoint"` + } + type shardStats struct { + Routing shardRouting `json:"routing"` + SeqNo shardSeqNo `json:"seq_no"` + } + type indexStats struct { + Shards map[string][]shardStats `json:"shards"` + } + var result struct { + Indices map[string]indexStats `json:"indices"` + } + result.Indices = map[string]indexStats{ + "index_name": { + Shards: map[string][]shardStats{ + "0": []shardStats{{ + Routing: shardRouting{Primary: true}, + SeqNo: shardSeqNo{GlobalCheckpoint: 1<<63 - 1}, + }}, + }, + }, + } + if err := json.NewEncoder(recorder).Encode(result); err != nil { + return err + } + return nil +} + +func (rt *channelClientRoundTripper) roundTripRefreshIndices(r *http.Request, recorder *httptest.ResponseRecorder) error { + return nil +} + +func (rt *channelClientRoundTripper) roundTripSearch(r *http.Request, recorder *httptest.ResponseRecorder) error { + var result struct { + Hits struct { + Hits []traceIDDocumentHit + } + } + if rt.sub != nil { + ctx, cancel := context.WithTimeout(r.Context(), 50*time.Millisecond) + defer cancel() + for { + var traceID string + err := ctx.Err() + if err == nil { + traceID, err = rt.sub.Subscribe(ctx) + } + if err == context.DeadlineExceeded { + break + } else if err != nil { + return err + } + rt.seqno++ + hit := traceIDDocumentHit{SeqNo: rt.seqno} + hit.Source.Trace.ID = traceID + hit.Source.Observer.ID = "👀" + result.Hits.Hits = append(result.Hits.Hits, hit) + } + } + if err := json.NewEncoder(recorder).Encode(result); err != nil { + return err + } + recorder.Flush() + return nil +} + +func (rt *channelClientRoundTripper) roundTripBulk(r *http.Request, recorder *httptest.ResponseRecorder) error { + var results []map[string]elasticsearch.BulkIndexerResponseItem + dec := json.NewDecoder(r.Body) + for { + var m map[string]interface{} + if err := dec.Decode(&m); err != nil { + if err == io.EOF { + break + } + return err + } + var action string + for action = range m { + } + var doc traceIDDocument + if err := dec.Decode(&doc); err != nil { + return err + } + if rt.pub != nil { + if err := rt.pub.Publish(r.Context(), doc.Trace.ID); err != nil { + return err + } + } + result := elasticsearch.BulkIndexerResponseItem{Status: 200} + results = append(results, map[string]elasticsearch.BulkIndexerResponseItem{action: result}) + } + if err := json.NewEncoder(recorder).Encode(results); err != nil { + return err + } + return nil +} + +type traceIDDocument struct { + Observer struct { + ID string `json:"id"` + } `json:"observer"` + + Trace struct { + ID string `json:"id"` + } `json:"trace"` +} + +type traceIDDocumentHit struct { + SeqNo int64 `json:"_seq_no,omitempty"` + Source traceIDDocument `json:"_source"` +} diff --git a/x-pack/apm-server/sampling/reservoir.go b/x-pack/apm-server/sampling/reservoir.go new file mode 100644 index 00000000000..535cb4d4808 --- /dev/null +++ b/x-pack/apm-server/sampling/reservoir.go @@ -0,0 +1,130 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package sampling + +import ( + "container/heap" + "math" + "math/rand" +) + +// weightedRandomSample provides a weighted, random, reservoir sampling +// implementing Algorithm A-Res (Algorithm A with Reservoir). +// +// See https://en.wikipedia.org/wiki/Reservoir_sampling#Algorithm_A-Res +type weightedRandomSample struct { + rng *rand.Rand + itemheap +} + +// newWeightedRandomSample constructs a new weighted random sampler, +// with the given random number generator and reservoir size. +func newWeightedRandomSample(rng *rand.Rand, reservoirSize int) *weightedRandomSample { + return &weightedRandomSample{ + rng: rng, + itemheap: itemheap{ + keys: make([]float64, 0, reservoirSize), + values: make([]string, 0, reservoirSize), + }, + } +} + +// Sample records a trace ID with a random probability, proportional to +// the given weight in the range [0, math.MaxFloat64]. +func (s *weightedRandomSample) Sample(weight float64, traceID string) bool { + k := math.Pow(s.rng.Float64(), 1/weight) + if len(s.values) < cap(s.values) { + heap.Push(&s.itemheap, item{key: k, value: traceID}) + return true + } + if k > s.keys[0] { + s.keys[0] = k + s.values[0] = traceID + heap.Fix(&s.itemheap, 0) + return true + } + return false +} + +// Reset clears the current values, retaining the underlying storage space. +func (s *weightedRandomSample) Reset() { + s.keys = s.keys[:0] + s.values = s.values[:0] +} + +// Size returns the reservoir capacity. +func (s *weightedRandomSample) Size() int { + return cap(s.keys) +} + +// Resize resizes the reservoir capacity to n. +// +// Resize is not guaranteed to retain the items with the greatest weight, +// due to randomisation. +func (s *weightedRandomSample) Resize(n int) { + if n > cap(s.keys) { + // Increase capacity by copying into a new slice. + keys := make([]float64, len(s.keys), n) + values := make([]string, len(s.values), n) + copy(keys, s.keys) + copy(values, s.values) + s.keys = keys + s.values = values + } else if cap(s.keys) > n { + for len(s.keys) > n { + heap.Pop(&s.itemheap) + } + s.keys = s.keys[0:len(s.keys):n] + s.values = s.values[0:len(s.values):n] + } +} + +// Pop removes the trace ID with the lowest weight. +// Pop panics when called on an empty reservoir. +func (s *weightedRandomSample) Pop() string { + item := heap.Pop(&s.itemheap).(item) + return item.value +} + +// Values returns a copy of at most n of the currently sampled trace IDs. +func (s *weightedRandomSample) Values() []string { + values := make([]string, len(s.values)) + copy(values, s.values) + return values +} + +type itemheap struct { + keys []float64 + values []string +} + +type item struct { + key float64 + value string +} + +func (h itemheap) Len() int { return len(h.keys) } +func (h itemheap) Less(i, j int) bool { return h.keys[i] < h.keys[j] } +func (h itemheap) Swap(i, j int) { + h.keys[i], h.keys[j] = h.keys[j], h.keys[i] + h.values[i], h.values[j] = h.values[j], h.values[i] +} + +func (h *itemheap) Push(x interface{}) { + item := x.(item) + h.keys = append(h.keys, item.key) + h.values = append(h.values, item.value) +} + +func (h *itemheap) Pop() interface{} { + n := len(h.keys) + item := item{ + key: h.keys[n-1], + value: h.values[n-1], + } + h.keys = h.keys[:n-1] + h.values = h.values[:n-1] + return item +} diff --git a/x-pack/apm-server/sampling/reservoir_test.go b/x-pack/apm-server/sampling/reservoir_test.go new file mode 100644 index 00000000000..26a0bf86311 --- /dev/null +++ b/x-pack/apm-server/sampling/reservoir_test.go @@ -0,0 +1,32 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package sampling + +import ( + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestResizeReservoir(t *testing.T) { + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + res := newWeightedRandomSample(rng, 2) + res.Sample(1, "a") + res.Sample(2, "b") + assert.Len(t, res.Values(), 2) + res.Resize(1) + assert.Len(t, res.Values(), 1) +} + +func TestResetReservoir(t *testing.T) { + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + res := newWeightedRandomSample(rng, 2) + res.Sample(1, "a") + res.Sample(2, "b") + res.Reset() + assert.Len(t, res.Values(), 0) +}